repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
praekelt/vumi-go
|
go/billing/migrations/0009_auto__chg_field_messagecost_tag_pool__add_index_messagecost_message_di.py
|
1
|
10898
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool'], null=True))
# Adding index on 'MessageCost', fields ['message_direction']
db.create_index(u'billing_messagecost', ['message_direction'])
# Adding unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Adding index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
def backwards(self, orm):
# Removing index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing index on 'MessageCost', fields ['message_direction']
db.delete_index(u'billing_messagecost', ['message_direction'])
# User chose to not deal with backwards NULL issues for 'MessageCost.tag_pool'
raise RuntimeError("Cannot reverse this migration. 'MessageCost.tag_pool' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.gouser': {
'Meta': {'object_name': 'GoUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'billing.account': {
'Meta': {'object_name': 'Account'},
'account_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'alert_credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'alert_threshold': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.GoUser']"})
},
u'billing.lineitem': {
'Meta': {'object_name': 'LineItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_direction': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'statement': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Statement']"}),
'tag_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'billing.messagecost': {
'Meta': {'unique_together': "[['account', 'tag_pool', 'message_direction']]", 'object_name': 'MessageCost', 'index_together': "[['account', 'tag_pool', 'message_direction']]"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'tag_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.TagPool']", 'null': 'True', 'blank': 'True'})
},
u'billing.statement': {
'Meta': {'object_name': 'Statement'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_date': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'billing.tagpool': {
'Meta': {'object_name': 'TagPool'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'billing.transaction': {
'Meta': {'object_name': 'Transaction'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credit_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'credit_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'session_created': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Pending'", 'max_length': '20'}),
'tag_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['billing']
|
bsd-3-clause
|
kneufeld/crossbarexamples
|
rest/subscriber/publish.py
|
12
|
2204
|
###############################################################################
##
## Copyright (C) 2014, Tavendo GmbH and/or collaborators. All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##
###############################################################################
import crossbarconnect
if __name__ == '__main__':
## create a new Crossbar.io push client (once)
##
client = crossbarconnect.Client("http://127.0.0.1:8080/publish")
## publish an event without payload
##
client.publish("com.myapp.topic1")
## publish an event with (positional) payload and get publication ID
##
event_id = client.publish("com.myapp.topic1", "Hello, world!", 23)
print("event published with ID {0}".format(event_id))
## publish 5 events with complex payload
##
for i in range(5):
client.publish("com.myapp.topic1", i, sq=i * i, msg="Hello, world!")
|
apache-2.0
|
endolith/scipy
|
scipy/stats/mstats.py
|
18
|
2260
|
"""
===================================================================
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
===================================================================
.. currentmodule:: scipy.stats.mstats
This module contains a large number of statistical functions that can
be used with masked arrays.
Most of these functions are similar to those in `scipy.stats` but might
have small differences in the API or in the algorithm used. Since this
is a relatively new package, some API changes are still possible.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe
gmean
hmean
kurtosis
mode
mquantiles
hdmedian
hdquantiles
hdquantiles_sd
idealfourths
plotting_positions
meppf
moment
skew
tmean
tvar
tmin
tmax
tsem
variation
find_repeats
sem
trimmed_mean
trimmed_mean_ci
trimmed_std
trimmed_var
Frequency statistics
====================
.. autosummary::
:toctree: generated/
scoreatpercentile
Correlation functions
=====================
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
kendalltau_seasonal
linregress
siegelslopes
theilslopes
sen_seasonal_slopes
Statistical tests
=================
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_onesamp
ttest_ind
ttest_rel
chisquare
kstest
ks_2samp
ks_1samp
ks_twosamp
mannwhitneyu
rankdata
kruskal
kruskalwallis
friedmanchisquare
brunnermunzel
skewtest
kurtosistest
normaltest
Transformations
===============
.. autosummary::
:toctree: generated/
obrientransform
trim
trima
trimmed_stde
trimr
trimtail
trimboth
winsorize
zmap
zscore
Other
=====
.. autosummary::
:toctree: generated/
argstoarray
count_tied_groups
msign
compare_medians_ms
median_cihs
mjci
mquantiles_cimj
rsh
"""
from .mstats_basic import *
from .mstats_extras import *
# Functions that support masked array input in stats but need to be kept in the
# mstats namespace for backwards compatibility:
from scipy.stats import gmean, hmean, zmap, zscore, chisquare
|
bsd-3-clause
|
tamac-io/jenkins-job-builder
|
jenkins_jobs/modules/base.py
|
2
|
3134
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Base class for a jenkins_jobs module
import xml.etree.ElementTree as XML
def add_nonblank_xml_subelement(parent, tag, value):
"""
Adds an XML SubElement with the name tag to parent if value is a non-empty
string
"""
if value is not None and value != '':
XML.SubElement(parent, tag).text = value
class Base(object):
"""
A base class for a Jenkins Job Builder Module.
The module is initialized before any YAML is parsed.
:arg ModuleRegistry registry: the global module registry.
"""
#: The sequence number for the module. Modules are invoked in the
#: order of their sequence number in order to produce consistently
#: ordered XML output.
sequence = 10
#: The component type for components of this module. This will be
#: used to look for macros (they are defined singularly, and should
#: not be plural).
#: Set both component_type and component_list_type to None if module
#: doesn't have components.
component_type = None
#: The component list type will be used to look up possible
#: implementations of the component type via entry points (entry
#: points provide a list of components, so it should be plural).
#: Set both component_type and component_list_type to None if module
#: doesn't have components.
component_list_type = None
def __init__(self, registry):
self.registry = registry
def handle_data(self, job_data):
"""This method is called before any XML is generated. By
overriding this method, a module may arbitrarily modify a data
structure which will probably be the JJB YamlParser's intermediate data
representation. If it has changed the data structure at all, it must
return ``True``, otherwise, it must return ``False``.
:arg dict job_data: the intermediate representation of job data
loaded from JJB Yaml files without variables interpolation or other
yaml expansions.
:rtype: boolean
"""
return False
def gen_xml(self, xml_parent, data):
"""Update the XML element tree based on YAML data. Override
this method to add elements to the XML output. Create new
Element objects and add them to the xml_parent. The YAML data
structure must not be modified.
:arg YAMLParser parser: the global YAML Parser
:arg Element xml_parent: the parent XML element
:arg dict data: the YAML data structure
"""
pass
|
apache-2.0
|
jounex/hue
|
desktop/core/ext-py/Django-1.6.10/tests/fixtures/tests.py
|
50
|
32310
|
from __future__ import absolute_import
import warnings
from django.contrib.sites.models import Site
from django.core import management
from django.db import connection, IntegrityError
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from django.utils import six
from .models import Article, Book, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', natural_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'use_natural_keys': natural_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_initial_data(self):
# syncdb introduces 1 initial data object from initial_data.json.
self.assertQuerysetEqual(Book.objects.all(), [
'<Book: Achieving self-awareness of Python programs>'
])
def test_loading_and_dumping(self):
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(['fixtures.Category'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]')
# ...and just fixtures.Article
self._dumpdata_assert(['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# ...and both
self._dumpdata_assert(['fixtures.Category', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a specific model twice
self._dumpdata_assert(['fixtures.Article', 'fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(['fixtures.Article', 'fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Same again, but specify in the reverse order
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(['fixtures.Category', 'sites'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]')
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
self.assertQuerysetEqual(Book.objects.all(), [
'<Book: Achieving self-awareness of Python programs>',
'<Book: Music for all ages by Artist formerly known as "Prince" and Django Reinhardt>'
])
# Loading a fixture that doesn't exist emits a warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
management.call_command('loaddata', 'unknown.json', verbosity=0)
self.assertEqual(len(w), 1)
self.assertTrue(w[0].message, "No fixture named 'unknown' found.")
# An attempt to load a nonexistent 'initial_data' fixture isn't an error
with warnings.catch_warnings(record=True) as w:
management.call_command('loaddata', 'initial_data.json', verbosity=0)
self.assertEqual(len(w), 0)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]', natural_keys=True)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, "model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": "2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", "fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], ["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": "fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person": ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, {"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]', natural_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag"><field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="1" model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as "Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object><object pk="10" model="fixtures.book"><field type="CharField" name="name">Achieving self-awareness of Python programs</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"></field></object></django-objects>""", format='xml', natural_keys=True)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites'])
# Excluding a bogus app should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"Unknown app in excludes: foo_app"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk), use_base_manager=True)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambigous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with warnings.catch_warnings(record=True):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with warnings.catch_warnings(record=True):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]', natural_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field><field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field><field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Prince</field></object><object pk="10" model="fixtures.book"><field type="CharField" name="name">Achieving self-awareness of Python programs</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"></field></object></django-objects>""", format='xml', natural_keys=True)
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
|
apache-2.0
|
cjh1/tomviz
|
tomviz/python/tomviz/operators.py
|
3
|
3147
|
# -*- coding: utf-8 -*-
###############################################################################
#
# This source file is part of the tomviz project.
#
# Copyright Kitware, Inc.
#
# This source code is released under the New BSD License, (the "License").
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
class Progress(object):
"""
Class used to update operator progress.
"""
def __init__(self, operator):
self._operator = operator
@property
def maximum(self):
"""
Property defining the maxium progress value
"""
return self._operator._operator_wrapper.progress_maximum
@maximum.setter
def maximum(self, value):
self._operator._operator_wrapper.progress_maximum = value
@property
def value(self):
"""
Property defining the current progress value
"""
return self._operator._operator_wrapper.progress_value
@value.setter
def value(self, value):
"""
Updates the progress of the the operator.
:param value The current progress value.
:type value: int
"""
self._operator._operator_wrapper.progress_value = value
@property
def message(self):
"""
Property defining the current progress message
"""
return self._operator._operator_wrapper.progress_message
@message.setter
def message(self, msg):
"""
Updates the progress message of the the operator.
:param msg The current progress message.
:type msg: str
"""
self._operator._operator_wrapper.progress_message = msg
class Operator(object):
"""
The base operator class from which all operators should be derived.
"""
def __new__(cls, *args, **kwargs):
obj = super(Operator, cls).__new__(cls)
obj.progress = Progress(obj)
return obj
def transform_scalars(self, data):
"""
This method should be overriden by subclasses to implement the
operations the operator should perform.
"""
raise NotImplementedError('Must be implemented by subclass')
class CancelableOperator(Operator):
"""
A cancelable operator allows the user to interrupt the execution of the
operator. The canceled property can be using in the transform_scalars(...)
method to break out when the operator is canceled. The basic structure of
the transform_scalars(...) might look something like this:
def transform_scalars(self, data):
while(not self.canceled):
# Do work
"""
@property
def canceled(self):
"""
:returns True if the operator has been canceled, False otherwise.
"""
return self._operator_wrapper.canceled
|
bsd-3-clause
|
Sabayon/entropy
|
lib/tests/spm.py
|
1
|
13331
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, '.')
sys.path.insert(0, '../')
import unittest
import shutil
import entropy.tools as et
from entropy.const import const_mkdtemp
from entropy.client.interfaces import Client
from entropy.spm.plugins.interfaces.portage_plugin import \
PortageEntropyDepTranslator
import tests._misc as _misc
class SpmTest(unittest.TestCase):
def setUp(self):
self.Client = Client(installed_repo = -1, indexing = False,
xcache = False, repo_validation = False)
self.test_pkg = _misc.get_test_entropy_package()
self.test_pkg2 = _misc.get_test_entropy_package2()
self.test_pkg3 = _misc.get_test_entropy_package3()
self.test_pkgs = [self.test_pkg, self.test_pkg2, self.test_pkg3]
def tearDown(self):
"""
tearDown is run after each test
"""
# calling destroy() and shutdown()
# need to call destroy() directly to remove all the SystemSettings
# plugins because shutdown() doesn't, since it's meant to be called
# right before terminating the process
self.Client.destroy()
self.Client.shutdown()
def test_portage_translator(self):
deps = {
"""|| ( app-emulation/virtualbox
>=app-emulation/virtualbox-bin-2.2.0
)""": \
"( app-emulation/virtualbox | >=app-emulation/virtualbox-bin-2.2.0 )",
"""|| ( ( gnome-extra/zenity ) ( kde-base/kdialog ) )
""": \
"( ( gnome-extra/zenity ) | ( kde-base/kdialog ) )",
"""|| ( <media-libs/xine-lib-1.2
( >=media-libs/xine-lib-1.2 virtual/ffmpeg ) )
""": \
"( <media-libs/xine-lib-1.2 | ( >=media-libs/xine-lib-1.2 & virtual/ffmpeg ) )",
}
for dep, expected in deps.items():
tr = PortageEntropyDepTranslator(dep)
self.assertEqual(expected, tr.translate())
def test_init(self):
spm = self.Client.Spm()
spm2 = self.Client.Spm()
self.assertTrue(spm is spm2)
spm_class = self.Client.Spm_class()
spm_class2 = self.Client.Spm_class()
self.assertTrue(spm_class is spm_class2)
def test_basic_methods(self):
spm = self.Client.Spm()
spm_class = self.Client.Spm_class()
path = spm.get_user_installed_packages_file()
self.assertTrue(path)
groups = spm_class.get_package_groups()
self.assertTrue(isinstance(groups, dict))
keys = spm.package_metadata_keys()
self.assertTrue(isinstance(keys, list))
cache_dir = spm.get_cache_directory()
self.assertTrue(cache_dir)
sys_pkgs = spm.get_system_packages()
self.assertTrue(sys_pkgs)
self.assertTrue(isinstance(sys_pkgs, list))
path1 = spm.get_merge_protected_paths_mask()
path2 = spm.get_merge_protected_paths()
self.assertTrue(isinstance(path1, list))
self.assertTrue(isinstance(path2, list))
pkg = spm.convert_from_entropy_package_name("app-foo/foo")
self.assertTrue(pkg)
def test_portage_xpak(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
sums = {}
paths = []
from entropy.spm.plugins.interfaces.portage_plugin import xpak
from entropy.spm.plugins.interfaces.portage_plugin import xpaktools
temp_unpack = const_mkdtemp(prefix="test_portage_xpak")
temp_unpack2 = const_mkdtemp(prefix="test_portage_xpak2")
test_pkg = os.path.join(temp_unpack2, "test.pkg")
dbdir = _misc.get_entrofoo_test_spm_portage_dir()
for path in os.listdir(dbdir):
xpath = os.path.join(dbdir, path)
paths.append(xpath)
sums[path] = et.md5sum(xpath)
et.compress_files(test_pkg, paths)
comp_file = xpak.tbz2(test_pkg)
result = comp_file.recompose(dbdir)
shutil.rmtree(temp_unpack)
os.mkdir(temp_unpack)
# now extract xpak
new_sums = {}
xpaktools.extract_xpak(test_pkg, tmpdir = temp_unpack)
for path in os.listdir(temp_unpack):
xpath = os.path.join(temp_unpack, path)
new_sums[path] = et.md5sum(xpath)
self.assertEqual(sums, new_sums)
shutil.rmtree(temp_unpack)
shutil.rmtree(temp_unpack2)
def test_extract_xpak(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
from entropy.spm.plugins.interfaces.portage_plugin import xpaktools
tmp_path = const_mkdtemp(prefix="test_extract_xpak")
for test_pkg in self.test_pkgs:
out_path = xpaktools.extract_xpak(test_pkg, tmp_path)
self.assertNotEqual(out_path, None)
self.assertTrue(os.listdir(out_path))
shutil.rmtree(tmp_path, True)
def test_extract_xpak_only(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
from entropy.spm.plugins.interfaces.portage_plugin import xpaktools
pkg_path = _misc.get_test_xpak_empty_package()
tmp_path = const_mkdtemp(prefix="test_extract_xpak_only")
out_path = xpaktools.extract_xpak(pkg_path, tmp_path)
self.assertNotEqual(out_path, None)
self.assertTrue(os.listdir(out_path))
shutil.rmtree(tmp_path, True)
def test_sets_load(self):
spm = self.Client.Spm()
sets = spm.get_package_sets(False)
self.assertNotEqual(sets, None)
def test_static_sets_load(self):
spm = self.Client.Spm()
sets = spm.get_package_sets(False)
self.assertNotEqual(sets, None)
def test_dependencies_calculation(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
iuse = "system-sqlite"
use = "amd64 dbus elibc_glibc kernel_linux multilib " + \
"startup-notification userland_GNU"
license = "MPL-1.1 GPL-2"
depend = """>=mail-client/thunderbird-3.1.1-r1[system-sqlite=]
x11-libs/libXrender x11-libs/libXt x11-libs/libXmu
>=sys-libs/zlib-1.1.4 dev-util/pkgconfig x11-libs/libXrender
x11-libs/libXt x11-libs/libXmu virtual/jpeg dev-libs/expat
app-arch/zip app-arch/unzip >=x11-libs/gtk+-2.8.6
>=dev-libs/glib-2.8.2 >=x11-libs/pango-1.10.1 >=dev-libs/libIDL-0.8.0
>=dev-libs/dbus-glib-0.72 >=x11-libs/startup-notification-0.8
!<x11-base/xorg-x11-6.7.0-r2 >=x11-libs/cairo-1.6.0 app-arch/unzip
=sys-devel/automake-1.11* =sys-devel/autoconf-2.1*
>=sys-devel/libtool-2.2.6b""".replace("\n", " ")
rdepend = """>=mail-client/thunderbird-3.1.1-r1[system-sqlite=] ||
( ( >=app-crypt/gnupg-2.0 || ( app-crypt/pinentry
app-crypt/pinentry-base ) ) =app-crypt/gnupg-1.4* ) x11-libs/libXrender
x11-libs/libXt x11-libs/libXmu >=sys-libs/zlib-1.1.4 x11-libs/libXrender
x11-libs/libXt x11-libs/libXmu virtual/jpeg dev-libs/expat app-arch/zip
app-arch/unzip >=x11-libs/gtk+-2.8.6 >=dev-libs/glib-2.8.2
>=x11-libs/pango-1.10.1 >=dev-libs/libIDL-0.8.0
>=dev-libs/dbus-glib-0.72 >=x11-libs/startup-notification-0.8
!<x11-base/xorg-x11-6.7.0-r2 >=x11-libs/cairo-1.6.0""".replace("\n", " ")
pdepend = ""
bdepend = ""
provide = ""
sources = ""
eapi = "2"
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
portage_metadata = spm._calculate_dependencies(
iuse, use, license,
depend, rdepend, pdepend, bdepend, provide, sources, eapi)
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
expected_deps = [
'>=mail-client/thunderbird-3.1.1-r1[-system-sqlite]',
'( ( >=app-crypt/gnupg-2.0 & ( app-crypt/pinentry | app-crypt/pinentry-base ) ) | ( app-crypt/pinentry & app-crypt/pinentry-base ) | =app-crypt/gnupg-1.4* )',
'x11-libs/libXrender',
'x11-libs/libXt',
'x11-libs/libXmu',
'>=sys-libs/zlib-1.1.4',
'x11-libs/libXrender',
'x11-libs/libXt',
'x11-libs/libXmu',
'virtual/jpeg',
'dev-libs/expat',
'app-arch/zip',
'app-arch/unzip',
'>=x11-libs/gtk+-2.8.6',
'>=dev-libs/glib-2.8.2',
'>=x11-libs/pango-1.10.1',
'>=dev-libs/libIDL-0.8.0',
'>=dev-libs/dbus-glib-0.72',
'>=x11-libs/startup-notification-0.8',
'!<x11-base/xorg-x11-6.7.0-r2',
'>=x11-libs/cairo-1.6.0']
expected_deps.sort()
resolved_deps = portage_metadata['RDEPEND']
resolved_deps.sort()
self.assertEqual(resolved_deps, expected_deps)
def test_eapi5_portage_slotdeps(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
iuse = "system-sqlite"
use = "amd64 dbus elibc_glibc kernel_linux multilib " + \
"startup-notification userland_GNU"
license = "MPL-1.1 GPL-2"
depend = """
>=mail-client/thunderbird-3.1.1-r1:2=[system-sqlite=]
>=mail-client/thunderbird-3.1.1-r1:2*[system-sqlite=]
>=mail-client/thunderbird-3.1.1-r1:2*
>=mail-client/thunderbird-3.1.1-r1:2=
>=mail-client/thunderbird-3.1.1-r1:=
>=mail-client/thunderbird-3.1.1-r1:*
>=mail-client/thunderbird-3.1.1-r1:0/1
>=mail-client/thunderbird-3.1.1-r1:0/1=
""".replace("\n", " ")
rdepend = depend[:]
pdepend = depend[:]
bdepend = []
provide = ""
sources = ""
eapi = "2"
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
portage_metadata = spm._calculate_dependencies(
iuse, use, license,
depend, rdepend, pdepend, bdepend, provide, sources, eapi)
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
expected_deps = [
'>=mail-client/thunderbird-3.1.1-r1:2[-system-sqlite]',
'>=mail-client/thunderbird-3.1.1-r1:2[-system-sqlite]',
'>=mail-client/thunderbird-3.1.1-r1:2',
'>=mail-client/thunderbird-3.1.1-r1:2',
'>=mail-client/thunderbird-3.1.1-r1',
'>=mail-client/thunderbird-3.1.1-r1',
'>=mail-client/thunderbird-3.1.1-r1:0',
'>=mail-client/thunderbird-3.1.1-r1:0',
]
expected_deps.sort()
for k in ("RDEPEND", "PDEPEND", "DEPEND"):
resolved_deps = portage_metadata[k]
resolved_deps.sort()
self.assertEqual(resolved_deps, expected_deps)
def test_eapi7_portage_bdepend(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
iuse = "system-sqlite"
use = "amd64 dbus elibc_glibc kernel_linux multilib " + \
"startup-notification userland_GNU"
license = "MPL-1.1 GPL-2"
depend = """
=mail-client/thunderbird-3.1.1-r1:2
x11-misc/dwm
""".replace("\n", " ")
rdepend = ">=mail-client/thunderbird-3"
pdepend = "www-client/firefox:0"
bdepend = """
dev-lang/python[xml]
virtual/pkgconfig:0/1
""".replace("\n", " ")
provide = ""
sources = ""
eapi = "2"
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
portage_metadata = spm._calculate_dependencies(
iuse, use, license,
depend, rdepend, pdepend, bdepend, provide, sources, eapi)
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
expected = {
'DEPEND': [
"=mail-client/thunderbird-3.1.1-r1:2",
"x11-misc/dwm"
],
'RDEPEND': [">=mail-client/thunderbird-3"],
'PDEPEND': ["www-client/firefox:0"],
'BDEPEND': [
"dev-lang/python[xml]",
"virtual/pkgconfig:0"
]
}
for k in ("RDEPEND", "PDEPEND", "DEPEND", "BDEPEND"):
resolved_deps = portage_metadata[k]
resolved_deps.sort()
expected_deps = expected[k]
expected_deps.sort()
self.assertEqual(resolved_deps, expected_deps)
def test_portage_or_selector(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
or_deps = ['x11-foo/foo', 'x11-bar/bar']
self.assertEqual(spm._dep_or_select(
or_deps, top_level = True),
["( x11-foo/foo | x11-bar/bar )"])
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
if __name__ == '__main__':
unittest.main()
raise SystemExit(0)
|
gpl-2.0
|
jaggu303619/asylum
|
openerp/tools/sql.py
|
455
|
1173
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def drop_view_if_exists(cr, viewname):
cr.execute("DROP view IF EXISTS %s CASCADE" % (viewname,))
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
pablooliveira/cere
|
src/cere/lec.py
|
3
|
15222
|
#!/usr/bin/env python
# This file is part of CERE.
#
# Copyright (c) 2013-2016, Universite de Versailles St-Quentin-en-Yvelines
#
# CERE is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# CERE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERE. If not, see <http://www.gnu.org/licenses/>.
import os
import csv
from cere.vars import *
OMP_FLAGS=""
if "CERE_OMP" in os.environ:
OMP_FLAGS="-omp"
BACKEND_FLAGS=""
if "CERE_BACKEND_FLAGS" in os.environ:
BACKEND_FLAGS=os.environ["CERE_BACKEND_FLAGS"]
MIDEND_FLAGS=""
if "CERE_MIDEND_FLAGS" in os.environ:
MIDEND_FLAGS=os.environ["CERE_MIDEND_FLAGS"]
BACKEND_USE=""
if "CERE_LLC" in os.environ:
BACKEND_USE="llc"
REGION_EXTRACTED = False
def fail_lec(error_message):
exit("Error {prog} : {cmd}".format(prog='lec', cmd=error_message))
def safe_system(command, EXIT=True):
'''
Try-catch system call
Verify system call and exit with appropriate error message
'''
if(os.system(command)):
if (EXIT):
fail_lec("safe_system -> " + command)
else:
print("Warning Error {prog} : safe_system -> {cmd}".format(
prog='lec', cmd=command))
#Keep regions that are in sources
def read_file(regions_file, regions_infos, sources):
regions = {}
all_regions = {}
if not (os.path.isfile(regions_infos)):
fail_lec("No such file: {0}".format(regions_infos))
with open(regions_infos) as regions_list:
regions_reader = csv.DictReader(regions_list)
for regions_row in regions_reader:
all_regions[regions_row["Region Name"]] = {"file":os.path.basename(regions_row["Original Location"]), "function": regions_row["Function Name"]}
with open(regions_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if (row["Region"] in all_regions) and (all_regions[row["Region"]]["file"] in sources) and (int(row["Id"]) >= 0):
regions[row["Region"]] = {"midend": row["Mid_end"], "backend": row["Back_end"], "file": all_regions[row["Region"]]["file"], "function": all_regions[row["Region"]]["function"]}
return regions
def dump_fun(mode_opt, BASE, regions):
'''
Dump mode
Call LoopExtractor and LoopManager in dump mode on wanted loops
by passing options like --invocation
In link mode: Call the linker and copy the original binary
'''
temp = ""
print "Compiling dump mode"
if(mode_opt.region):
temp = " --region="+mode_opt.region+" "
if(mode_opt.regions_file):
fail_lec("--regions-file and --region are incompatible")
else:
if(mode_opt.regions_file):
temp = temp+"--regions-file="+mode_opt.regions_file+" "
if(mode_opt.invocation):
temp = temp+"--invocation="+mode_opt.invocation+" "
safe_system(("{llvm_bindir}/opt -S -load {LoopExt} {Omp}-region-outliner " +
"{base}.ll -o {base}.ll").format(llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
LoopExt=LOOP_EXT, base=BASE,Omp=OMP_FLAGS), EXIT=False)
safe_system(("{llvm_bindir}/opt -S -load {LoopMan} {Omp}-region-dump {opts} " +
"{base}.ll -o {base}.ll").format(llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
LoopMan=LOOP_DUMP, opts=temp, base=BASE,Omp=OMP_FLAGS), EXIT=False)
globalize_variables(BASE, mode_opt)
#in replay mode
def replay_fun(mode_opt, BASE, regions):
'''
Replay mode
Call LoopExtractor and LoopManager in replay mode on wanted loop
by passing options like --invocation
'''
temp = ""
if (not (mode_opt.region)):
fail_lec("Need --region with replay mode")
print "Compiling replay mode"
if (mode_opt.invocation):
temp = temp + "--invocation=" + mode_opt.invocation + " "
safe_system(("{llvm_bindir}/opt -S -load {LoopExt} {Omp}-region-outliner " +
"-isolate-region={loop} {base}.ll -o {base}.ll").format(
llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT, LoopExt=LOOP_EXT,
loop=mode_opt.region, base=BASE,Omp=OMP_FLAGS), EXIT=False)
safe_system(("{llvm_bindir}/opt -S -load {LoopMan} {opts} {Omp}-region-replay -region={loop} " +
"{base}.ll -o {base}.ll").format(llvm_bindir=LLVM_BINDIR,
Root=PROJECT_ROOT, LoopMan=LOOP_REPLAY, opts=temp, loop=mode_opt.region,
base=BASE,Omp=OMP_FLAGS), EXIT=False)
if (mode_opt.instrument):
print "Instrumentation mode"
temp_instr = "--instrument-region=" + mode_opt.region + " "
safe_system(("{llvm_bindir}/opt -S -loop-simplify {base}.ll -o {base}.ll").format(
llvm_bindir=LLVM_BINDIR,
base=BASE), EXIT=False)
safe_system(("{llvm_bindir}/opt -S -load {LoopInstr} " +
"{Omp}-region-instrumentation --replay {opts} {base}.ll " +
"-o {base}.ll").format(
llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
LoopInstr=LOOP_INSTR, opts=temp_instr, base=BASE,Omp=OMP_FLAGS),
EXIT=False)
globalize_variables(BASE, mode_opt)
def globalize_variables(BASE, mode_opt):
# When inside the compilation unit that contains the replayable
# code internal symbols should be globalized
# Globalizing symbols ensures that symbol is not bound until link time.
# At link time, symbols will be weaken and replaced by the ones captured
# during dump that are exported in the dump .sym file.
# Globalization is also done during dump to ensure that the variable is not
# optimized out during capture (see /issues/51)
# XXX: An LLVM pass would be preferable than the ugly sed hack below.
if BASE in mode_opt.region:
os.system("sed -i 's/internal global/global/' {base}.ll".format(base=BASE))
#Extract region into a seperate file
def extract_function(mode_opt, regions, BASE):
for region, data in regions.items():
baseName = os.path.splitext(data["file"])[0]
if baseName == BASE:
to_extract=data["function"]
#Extract at loop lvl
if mode_opt.extraction_lvl == "loop":
to_extract=region
#Outline the loop into a function
safe_system(("{llvm_bindir}/opt -S -load {LoopExt} {Omp}-region-outliner " +
"-isolate-region={loop} {base}.ll -o {base}.ll").format(llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
LoopExt=LOOP_EXT, loop=to_extract, base=BASE, Omp=OMP_FLAGS), EXIT=False)
#Rename global variables in this module
safe_system(("{llvm_bindir}/opt -S -load {globRename} -global-rename " +
"{base}.ll -o {base}.ll").format(llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
globRename=GLOB_RENAME, base=BASE), EXIT=False)
#Then extract this function into a new file
safe_system("llvm-extract -S -func={0} {1}.ll -o {0}.ll".format(to_extract, baseName), EXIT=False)
safe_system("llvm-extract -S -func={0} {1}.ll -o {1}.ll -delete".format(to_extract, baseName), EXIT=False)
global REGION_EXTRACTED
REGION_EXTRACTED = True
#in original mode
def original_fun(mode_opt, BASE, regions):
'''
Original mode
Call LoopInstrumentation on wanted loops
by passing options like --invocation
'''
extract_function(mode_opt, regions, BASE)
instru_opts = ""
extract_opts = ""
if(mode_opt.region):
instru_opts = instru_opts + "--instrument-region=" + mode_opt.region + " "
if(mode_opt.regions_file):
fail_lec("--regions-file and --region are incompatible")
if(mode_opt.invocation):
instru_opts = instru_opts + "--invocation=" + mode_opt.invocation + " "
if(mode_opt.instrument_app):
fail_lec("--instrument-app and --region are incompatible")
else:
if(mode_opt.invocation):
fail_lec("Can't measure specific invocation with --regions-file")
if(mode_opt.regions_file):
instru_opts = instru_opts + "--regions-file=" + mode_opt.regions_file + " "
if(mode_opt.instrument_app):
fail_lec("--regions-file and --instrument-app are incompatible")
if(mode_opt.invocation):
fail_lec("--regions-file and --invocation are incompatible")
else:
if(mode_opt.instrument_app):
extract_opts = extract_opts + "--instrument-app "
else:
instru_opts = instru_opts + "--instrument-app "
if(mode_opt.instrument):
if(mode_opt.instrument_app):
if(mode_opt.regions_infos):
extract_opts = extract_opts + "-regions-infos=" + mode_opt.regions_infos + " "
safe_system(("{llvm_bindir}/opt -S -load {LoopExt} {Omp}-region-outliner {opts} " +
"{base}.ll -o {base}.ll").format(llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
LoopExt=LOOP_EXT, opts=extract_opts, base=BASE,Omp=OMP_FLAGS), EXIT=False)
else:
safe_system(("{llvm_bindir}/opt -S -load {LoopInstr} " +
"{Omp}-region-instrumentation {opts} {base}.ll " +
"-o {base}.ll").format(
llvm_bindir=LLVM_BINDIR, Root=PROJECT_ROOT,
LoopInstr=LOOP_INSTR, opts=instru_opts, base=BASE,Omp=OMP_FLAGS, EXIT=False))
def first_compil(INCLUDES, SOURCE, BASE, ext, COMPIL_OPT):
'''
First Compilation
Detect source language (fortran or C/C++ for the moment)
and compile SOURCE code
'''
if ext in FORTRAN_EXTENSIONS:
if DRAGONEGG_PATH:
opt = [s for s in COMPIL_OPT if s.startswith('-J')]
if opt:
INCLUDES.append(opt[0])
safe_system(("{gcc} -O0 -g {includes} -cpp {source} -S " +
"-fplugin={dragonegg} -fplugin-arg-dragonegg-emit-ir -o {base}.ll").format(
gcc=GCC, opts=" ".join(COMPIL_OPT), includes=" ".join(INCLUDES), source=SOURCE,
Root=PROJECT_ROOT, dragonegg=DRAGONEGG_PATH, base=BASE))
else:
fail_lec("fortran support disabled. Please reconfigure using --with-dragonegg.")
else:
safe_system(("{llvm_bindir}/clang {opts} -O0 -g {includes} {source} -S -emit-llvm -o " +
"{base}.ll").format(llvm_bindir=LLVM_BINDIR, opts=" ".join(COMPIL_OPT), includes=" ".join(INCLUDES),
source=SOURCE, base=BASE))
def last_compil(INCLUDES, SOURCE, BASE, OBJECT, COMPIL_OPT):
'''
Last Compilation
Compile BASE.ll
If Error compile with INCLUDES
'''
# Optionnal midend optimizations to explore
if (MIDEND_FLAGS):
os.system("{llvm_bindir}/opt -S {midend_flags} {base}.ll -o {base}.ll".format(
llvm_bindir=LLVM_BINDIR, midend_flags=MIDEND_FLAGS, base=BASE))
if REGION_EXTRACTED:
#Regions have been extracted from this file. So we must change globals
#definitions.
safe_system("opt -S -O3 {base}.ll -o {base}.ll".format(opts=" ".join(COMPIL_OPT),
base=BASE))
os.system("sed -i 's/internal constant/hidden constant/' {base}.ll".format(base=BASE))
#This prevent from new globals optimizations
os.system("llc -O3 {base}.ll -o {base}.s".format(opts=" ".join(COMPIL_OPT), base=BASE))
if "-g" in COMPIL_OPT:
COMPIL_OPT = [x for x in COMPIL_OPT if x != "-g"]
safe_system("clang -c {opts} {base}.s -o {object}".format(opts=" ".join(COMPIL_OPT),
base=BASE, object=OBJECT))
else:
# Can choose llc as llvm backend
failure = False
if (BACKEND_USE):
failure = os.system("{llvm_bindir}/{backend} -filetype=obj {backend_flags} {base}.ll -o {object}".format(
llvm_bindir=LLVM_BINDIR, backend=BACKEND_USE,
backend_flags=BACKEND_FLAGS, base=BASE, object=OBJECT))
else:
failure = os.system("{llvm_bindir}/clang -c {opts} {backend_flags} {base}.ll -o {object}".format(
llvm_bindir=LLVM_BINDIR, opts=" ".join(COMPIL_OPT),
backend_flags=BACKEND_FLAGS, base=BASE, object=OBJECT))
# In case of failure fallback to a simple clang compilation without any CERE passes
if (failure):
safe_system("{llvm_bindir}/clang -c {opts} {backend_flags} {includes} {source} -o {object}".format(
llvm_bindir=LLVM_BINDIR, opts=" ".join(COMPIL_OPT), includes=" ".join(INCLUDES),
backend_flags=BACKEND_FLAGS, source=SOURCE, base=BASE, object=OBJECT))
def compile(args, args2):
function={}
function["replay_fun"] = replay_fun
function["dump_fun"] = dump_fun
function["original_fun"] = original_fun
SOURCES = []
if (len(args2[1]) == 0):
exit("Error:Need source file")
INCLUDES = args2[0].Inc
COMPIL_OPT = []
for source in args2[1]:
if os.path.splitext(source)[1] in SOURCE_EXTENSIONS:
SOURCES.append(source)
else: COMPIL_OPT.append(source)
regions={}
if args[0].hybrid:
if not (args[0].regions_infos):
fail_lec("--regions-infos needed with --hybrid.")
if not (args[0].cere_objects):
fail_lec("--cere-objects needed with --hybrid.")
args[0].cere_objects = os.path.realpath(args[0].cere_objects)
regions = read_file(args[0].hybrid_regions, args[0].regions_infos, SOURCES)
if args[0].static:
COMPIL_OPT.append("-static")
for SOURCE in SOURCES:
BASE, ext = os.path.splitext(SOURCE)
OBJECT = args[0].o if args[0].o else BASE + '.o'
# call mode_function
first_compil(INCLUDES, SOURCE, BASE, ext, COMPIL_OPT)
function[args[0].func](args[0], BASE, regions)
last_compil(INCLUDES, SOURCE, BASE, OBJECT, COMPIL_OPT)
global REGION_EXTRACTED, BACKEND_USE
REGION_EXTRACTED = False
BACKEND_USE="llc"
#Compile extracted region with choosen flags
objs = ""
for region, data in regions.items():
global BACKEND_FLAGS, MIDEND_FLAGS
BACKEND_FLAGS = data['backend']
MIDEND_FLAGS = data['midend']
if args[0].extraction_lvl == "loop":
last_compil(INCLUDES, "", region, region+'.o', COMPIL_OPT)
objs = objs + ' ' + os.path.realpath(region+'.o')
else:
last_compil(INCLUDES, "", data["function"], data["function"]+'.o', COMPIL_OPT)
objs = objs + ' ' + os.path.realpath(data["function"]+'.o')
#Save new *.o files for link
if objs:
with open(args[0].cere_objects, "a") as text_file:
text_file.write(objs)
|
lgpl-3.0
|
yu239/Paddle
|
v1_api_demo/quick_start/trainer_config.lstm.py
|
13
|
1975
|
# edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
dict_file = "./data/dict.txt"
word_dict = dict()
with open(dict_file, 'r') as f:
for i, line in enumerate(f):
w = line.strip().split()[0]
word_dict[w] = i
is_predict = get_config_arg('is_predict', bool, False)
trn = 'data/train.list' if not is_predict else None
tst = 'data/test.list' if not is_predict else 'data/pred.list'
process = 'process' if not is_predict else 'process_predict'
define_py_data_sources2(
train_list=trn,
test_list=tst,
module="dataprovider_emb",
obj=process,
args={"dictionary": word_dict})
batch_size = 128 if not is_predict else 1
settings(
batch_size=batch_size,
learning_rate=2e-3,
learning_method=AdamOptimizer(),
regularization=L2Regularization(8e-4),
gradient_clipping_threshold=25)
data = data_layer(name="word", size=len(word_dict))
emb = embedding_layer(input=data, size=128)
lstm = simple_lstm(
input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.25))
lstm_max = pooling_layer(input=lstm, pooling_type=MaxPooling())
output = fc_layer(input=lstm_max, size=2, act=SoftmaxActivation())
if is_predict:
maxid = maxid_layer(output)
outputs([maxid, output])
else:
label = data_layer(name="label", size=2)
cls = classification_cost(input=output, label=label)
outputs(cls)
|
apache-2.0
|
davidhrbac/spacewalk
|
client/rhel/yum-rhn-plugin/test/profilesynctests.py
|
19
|
4583
|
"""
Unit Tests for syncing the self.package profile with rhn.
"""
import unittest
from yum import transactioninfo
from yum.packages import YumAvailablePackage
import settestpath
import rhnplugin
class DummyRepo:
def __init__(self, channel):
self.id = channel
class SimplePkgDict:
"""
A Simple package dictionary object.
This is essentially another way to get up2date info into yum.
"""
def __init__(self, nevra):
self.nevra = nevra
class DummyPackageObject(YumAvailablePackage):
""" A yum package object for objects stored in RHN. """
def __init__(self, pkg, storageDir, repo):
name = pkg[0]
epoch = pkg[3]
version = pkg[1]
release = pkg[2]
arch = pkg[4]
channel = pkg[6]
size = pkg[5]
# YUM prefers the epoch to be '0', not ''.
if epoch == '':
epoch = '0'
nevra = (name, epoch, version, release, arch)
pkgdict = SimplePkgDict(nevra)
YumAvailablePackage.__init__(self, repo, pkgdict)
self.pkg = pkg
self.simple['repoid'] = channel
self.simple['id'] = name
self.simple['packagesize'] = size
# Not including epoch here because up2date doesn't.
hdrname = "%s-%s-%s.%s.hdr" % (name, version, release, arch)
rpmname = "%s-%s-%s.%s.rpm" % (name, version, release, arch)
self.simple['relativepath'] = rpmname
self.hdrpath = "%s/%s" % (storageDir, hdrname)
self.localpath = "%s/%s" % (storageDir, rpmname)
self.hdr = None
def returnSimple(self, name):
"""
Return one of the package's simple attributes. If we don't know about it,
return None instead.
"""
try:
return YumAvailablePackage.returnSimple(self, name)
except KeyError:
return None
class ProfileSyncTests(unittest.TestCase):
"""
Tests for the RHN self.package Object.
"""
def setUp(self):
repo = DummyRepo("rhel-4")
self.pkg_tup = ("zsh", "0.1", "EL3", "0", "noarch", "23456533",
"rhel-4")
self.package = DummyPackageObject(self.pkg_tup,
"/Fake/Location", repo)
self.old_pkg_tup = ("figgle", "0.1", "EL3", "0", "noarch", "23456533",
"rhel-4")
self.old_package = DummyPackageObject(self.old_pkg_tup,
"/Fake/Location", repo)
self.ts_info = transactioninfo.TransactionData()
def testEmptyTsData(self):
delta = rhnplugin.make_package_delta(self.ts_info)
# We need the two lists
self.assertTrue(delta.has_key("added"))
self.assertTrue(delta.has_key("removed"))
self.assertEquals(0, len(delta["added"]))
self.assertEquals(0, len(delta["removed"]))
def testAddedProfileSync(self):
self.ts_info.addInstall(self.package)
delta = rhnplugin.make_package_delta(self.ts_info)
self.assertEquals(0, len(delta["removed"]))
self.assertEquals(1, len(delta["added"]))
self.assertEquals(self.pkg_tup[:5], delta["added"][0])
def testRemovedProfileSync(self):
self.ts_info.addErase(self.package)
delta = rhnplugin.make_package_delta(self.ts_info)
self.assertEquals(1, len(delta["removed"]))
self.assertEquals(0, len(delta["added"]))
self.assertEquals(self.pkg_tup[:5], delta["removed"][0])
def testUpdatedProfileSync(self):
self.ts_info.addUpdate(self.package, self.old_package)
delta = rhnplugin.make_package_delta(self.ts_info)
self.assertEquals(0, len(delta["removed"]))
self.assertEquals(1, len(delta["added"]))
self.assertEquals(self.pkg_tup[:5], delta["added"][0])
def testObsoletingProfileSync(self):
self.ts_info.addObsoleting(self.package, self.old_package)
delta = rhnplugin.make_package_delta(self.ts_info)
self.assertEquals(0, len(delta["removed"]))
self.assertEquals(1, len(delta["added"]))
self.assertEquals(self.pkg_tup[:5], delta["added"][0])
def testObsoletedProfileSync(self):
self.ts_info.addObsoleted(self.old_package, self.package)
delta = rhnplugin.make_package_delta(self.ts_info)
self.assertEquals(0, len(delta["removed"]))
self.assertEquals(0, len(delta["added"]))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProfileSyncTests))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
gpl-2.0
|
blois/AndroidSDKCloneMin
|
ndk/prebuilt/linux-x86_64/lib/python2.7/markupbase.py
|
173
|
14643
|
"""Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the HTMLParser and sgmllib
modules (indirectly, for htmllib as well). It has no documented
public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in ("attlist", "linktype", "link", "element"):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in ("if", "else", "endif"):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in ("attlist", "element", "entity", "notation"):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
apache-2.0
|
klahnakoski/SpotManager
|
vendor/mo_logs/log_usingThreadedStream.py
|
4
|
3885
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from mo_future import is_text, is_binary
import sys
from time import time
from mo_dots import Data
from mo_future import PY3, text
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import CR, expand_template
from mo_threads import THREAD_STOP, Thread, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
if is_text(stream):
name = stream
stream = self.stream = eval(stream)
if name.startswith("sys.") and PY3:
self.stream = Data(write=lambda d: stream.write(d.decode('utf8')))
else:
name = "stream"
self.stream = stream
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
def utf8_appender(value):
if is_text(value):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def time_delta_pusher(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
profiler = Thread.current().cprofiler
profiler.disable()
(Till(till=next_run) | please_stop).wait()
profiler.enable()
next_run = time() + interval
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(CR.join(lines) + CR)
except Exception as e:
sys.stderr.write(str("Trouble with appender: ") + str(e.__class__.__name__) + str(CR))
# SWALLOW ERROR, MUST KEEP RUNNING
|
mpl-2.0
|
cajone/pychess
|
lib/pychess/System/TaskQueue.py
|
1
|
2187
|
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475160
# Was accepted into Python 2.5, but earlier versions still have
# to do stuff manually
import threading
from pychess.compat import Queue
def TaskQueue():
if hasattr(Queue, "task_done"):
return Queue()
return _TaskQueue()
class _TaskQueue(Queue):
def __init__(self):
Queue.__init__(self)
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
|
gpl-3.0
|
wenyu1001/scrapy
|
scrapy/linkextractors/regex.py
|
23
|
1360
|
import re
from six.moves.urllib.parse import urljoin
from w3lib.html import remove_tags, replace_entities, replace_escape_chars, get_base_url
from scrapy.link import Link
from .sgml import SgmlLinkExtractor
linkre = re.compile(
"<a\s.*?href=(\"[.#]+?\"|\'[.#]+?\'|[^\s]+?)(>|\s.*?>)(.*?)<[/ ]?a>",
re.DOTALL | re.IGNORECASE)
def clean_link(link_text):
"""Remove leading and trailing whitespace and punctuation"""
return link_text.strip("\t\r\n '\"")
class RegexLinkExtractor(SgmlLinkExtractor):
"""High performant link extractor"""
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
def clean_text(text):
return replace_escape_chars(remove_tags(text.decode(response_encoding))).strip()
def clean_url(url):
clean_url = ''
try:
clean_url = urljoin(base_url, replace_entities(clean_link(url.decode(response_encoding))))
except ValueError:
pass
return clean_url
if base_url is None:
base_url = get_base_url(response_text, response_url, response_encoding)
links_text = linkre.findall(response_text)
return [Link(clean_url(url).encode(response_encoding),
clean_text(text))
for url, _, text in links_text]
|
bsd-3-clause
|
KyleJamesWalker/ansible
|
test/runner/lib/pytar.py
|
70
|
1512
|
"""Python native TGZ creation."""
from __future__ import absolute_import, print_function
import tarfile
import os
# improve performance by disabling uid/gid lookups
tarfile.pwd = None
tarfile.grp = None
# To reduce archive time and size, ignore non-versioned files which are large or numerous.
# Also ignore miscellaneous git related files since the .git directory is ignored.
IGNORE_DIRS = (
'.tox',
'.git',
'.idea',
'__pycache__',
'ansible.egg-info',
)
IGNORE_FILES = (
'.gitignore',
'.gitdir',
)
IGNORE_EXTENSIONS = (
'.pyc',
'.retry',
)
def ignore(item):
"""
:type item: tarfile.TarInfo
:rtype: tarfile.TarInfo | None
"""
filename = os.path.basename(item.path)
name, ext = os.path.splitext(filename)
dirs = os.path.split(item.path)
if not item.isdir():
if item.path.startswith('./test/results/'):
return None
if item.path.startswith('./docsite/') and filename.endswith('_module.rst'):
return None
if name in IGNORE_FILES:
return None
if ext in IGNORE_EXTENSIONS:
return None
if any(d in IGNORE_DIRS for d in dirs):
return None
return item
def create_tarfile(dst_path, src_path, tar_filter):
"""
:type dst_path: str
:type src_path: str
:type tar_filter: (tarfile.TarInfo) -> tarfile.TarInfo | None
"""
with tarfile.TarFile.gzopen(dst_path, mode='w', compresslevel=4) as tar:
tar.add(src_path, filter=tar_filter)
|
gpl-3.0
|
gngrwzrd/gity
|
python/pushto.py
|
2
|
1558
|
# Copyright Aaron Smith 2009
#
# This file is part of Gity.
#
# Gity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gity. If not, see <http://www.gnu.org/licenses/>.
from _util import *
try:
import re,os,subprocess,simplejson as json
except Exception,e:
sys.stderr.write(str(e))
exit(84)
command=""
try:
from _argv import *
if not options.misc: raise Exception("Gitty Error: The push to command requires a remote and a branch.")
remote=sanitize_str(options.misc[0])
branch=sanitize_str(options.misc[1])
command="%s %s %s refs/heads/%s:refs/heads/%s" % (options.git,"push",remote,branch,branch)
rcode,stout,sterr=run_command(command)
if cant_push_to_newer_remote(sterr): exit(91)
if cant_push_to(sterr): exit(90)
if server_hung_up(sterr): exit(85)
if server_unreachable(sterr): exit(86)
rcode_for_git_exit(rcode,sterr)
exit(0)
except Exception, e:
sys.stderr.write("The push to command threw this error: " + str(e))
sys.stderr.write("\ncommand: %s\n" % command)
log_gity_version(options.gityversion)
log_gitv(options.git)
exit(84)
|
gpl-3.0
|
mattjmorrison/ReportLab
|
ez_setup.py
|
6
|
9415
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to "use_setuptools()".
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c8"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
"version" should be a valid setuptools version number that is available
as an egg for download under the "download_base" URL (which should end with
a '/'). "to_dir" is the directory where setuptools will be downloaded, if
it is not already available. If "download_delay" is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to "sys.stderr" and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
"version" should be a valid setuptools version number that is available
as an egg for download under the "download_base" URL (which should end
with a '/'). "to_dir" is the directory where the egg will be downloaded.
"delay" is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
bsd-3-clause
|
TeamExodus/external_chromium_org
|
tools/telemetry/telemetry/results/page_run_unittest.py
|
33
|
2090
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import page_set
from telemetry.results import page_run
from telemetry.value import failure
from telemetry.value import scalar
from telemetry.value import skip
class PageRunTest(unittest.TestCase):
def setUp(self):
self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
@property
def pages(self):
return self.page_set.pages
def testPageRunFailed(self):
run = page_run.PageRun(self.pages[0])
run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertTrue(run.failed)
self.assertFalse(run.skipped)
run = page_run.PageRun(self.pages[0])
run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertTrue(run.failed)
self.assertFalse(run.skipped)
def testPageRunSkipped(self):
run = page_run.PageRun(self.pages[0])
run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
run.AddValue(skip.SkipValue(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
run = page_run.PageRun(self.pages[0])
run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
run.AddValue(skip.SkipValue(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
def testPageRunSucceeded(self):
run = page_run.PageRun(self.pages[0])
self.assertTrue(run.ok)
self.assertFalse(run.failed)
self.assertFalse(run.skipped)
run = page_run.PageRun(self.pages[0])
run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
self.assertTrue(run.ok)
self.assertFalse(run.failed)
self.assertFalse(run.skipped)
|
bsd-3-clause
|
daspots/dasapp
|
lib/oauthlib/oauth1/rfc5849/request_validator.py
|
9
|
30459
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
from __future__ import absolute_import, unicode_literals
import sys
from . import SIGNATURE_METHODS, utils
class RequestValidator(object):
"""A validator/datastore interaction base class for OAuth 1 providers.
OAuth providers should inherit from RequestValidator and implement the
methods and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be usefull to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realms
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum length. Rather than
overloading the methods a few properties can be used to configure these
methods.
* @safe_characters -> (character set)
* @client_key_length -> (min, max)
* @request_token_length -> (min, max)
* @access_token_length -> (min, max)
* @nonce_length -> (min, max)
* @verifier_length -> (min, max)
* @realms -> [list, of, realms]
Methods used to validate/invalidate input parameters. These checks usually
hit either persistent or temporary storage such as databases or the
filesystem. See each methods documentation for detailed usage.
The following methods must be implemented:
- validate_client_key
- validate_request_token
- validate_access_token
- validate_timestamp_and_nonce
- validate_redirect_uri
- validate_requested_realms
- validate_realms
- validate_verifier
- invalidate_request_token
Methods used to retrieve sensitive information from storage.
The following methods must be implemented:
- get_client_secret
- get_request_token_secret
- get_access_token_secret
- get_rsa_key
- get_realms
- get_default_realms
- get_redirect_uri
Methods used to save credentials.
The following methods must be implemented:
- save_request_token
- save_verifier
- save_access_token
Methods used to verify input parameters. This methods are used during
authorizing request token by user (AuthorizationEndpoint), to check if
parameters are valid. During token authorization request is not signed,
thus 'validation' methods can not be used. The following methods must be
implemented:
- verify_realms
- verify_request_token
To prevent timing attacks it is necessary to not exit early even if the
client key or resource owner key is invalid. Instead dummy values should
be used during the remaining verification process. It is very important
that the dummy client and token are valid input parameters to the methods
get_client_secret, get_rsa_key and get_(access/request)_token_secret and
that the running time of those methods when given a dummy value remain
equivalent to the running time when given a valid client/resource owner.
The following properties must be implemented:
* @dummy_client
* @dummy_request_token
* @dummy_access_token
Example implementations have been provided, note that the database used is
a simple dictionary and serves only an illustrative purpose. Use whichever
database suits your project and how to access it is entirely up to you.
The methods are introduced in an order which should make understanding
their use more straightforward and as such it could be worth reading what
follows in chronological order.
.. _`whitelisting or blacklisting`: http://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
"""
def __init__(self):
pass
@property
def allowed_signature_methods(self):
return SIGNATURE_METHODS
@property
def safe_characters(self):
return set(utils.UNICODE_ASCII_CHARACTER_SET)
@property
def client_key_length(self):
return 20, 30
@property
def request_token_length(self):
return 20, 30
@property
def access_token_length(self):
return 20, 30
@property
def timestamp_lifetime(self):
return 600
@property
def nonce_length(self):
return 20, 30
@property
def verifier_length(self):
return 20, 30
@property
def realms(self):
return []
@property
def enforce_ssl(self):
return True
def check_client_key(self, client_key):
"""Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper)
def check_request_token(self, request_token):
"""Checks that the request token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.request_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_nonce(self, nonce):
"""Checks that the nonce only contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.nonce_length
return (set(nonce) <= self.safe_characters and
lower <= len(nonce) <= upper)
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper)
def check_realms(self, realms):
"""Check that the realm is one of a set allowed realms."""
return all((r in self.realms for r in realms))
def _subclass_must_implement(self, fn):
"""
Returns a NotImplementedError for a function that should be implemented.
:param fn: name of the function
"""
m = "Missing function implementation in {}: {}".format(type(self), fn)
return NotImplementedError(m)
@property
def dummy_client(self):
"""Dummy client used when an invalid client key is supplied.
:returns: The dummy client key string.
The dummy client should be associated with either a client secret,
a rsa key or both depending on which signature methods are supported.
Providers should make sure that
get_client_secret(dummy_client)
get_rsa_key(dummy_client)
return a valid secret or key for the dummy client.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("dummy_client")
@property
def dummy_request_token(self):
"""Dummy request token used when an invalid token was supplied.
:returns: The dummy request token string.
The dummy request token should be associated with a request token
secret such that get_request_token_secret(.., dummy_request_token)
returns a valid secret.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("dummy_request_token")
@property
def dummy_access_token(self):
"""Dummy access token used when an invalid token was supplied.
:returns: The dummy access token string.
The dummy access token should be associated with an access token
secret such that get_access_token_secret(.., dummy_access_token)
returns a valid secret.
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("dummy_access_token")
def get_client_secret(self, client_key, request):
"""Retrieves the client secret associated with the client key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The client secret as a string.
This method must allow the use of a dummy client_key value.
Fetching the secret using the dummy key must take the same amount of
time as fetching a secret for a valid client::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import ClientSecret
if ClientSecret.has(client_key):
return ClientSecret.get(client_key)
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import ClientSecret
return ClientSecret.get(client_key, 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement('get_client_secret')
def get_request_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the request token.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import RequestTokenSecret
if RequestTokenSecret.has(client_key):
return RequestTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import RequestTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement('get_request_token_secret')
def get_access_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the access token.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import AccessTokenSecret
if AccessTokenSecret.has(client_key):
return AccessTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import AccessTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("get_access_token_secret")
def get_default_realms(self, client_key, request):
"""Get the default realms for a client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The list of default realms associated with the client.
The list of default realms will be set during client registration and
is outside the scope of OAuthLib.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("get_default_realms")
def get_realms(self, token, request):
"""Get realms associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The list of realms associated with the request token.
This method is used by
* AuthorizationEndpoint
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("get_realms")
def get_redirect_uri(self, token, request):
"""Get the redirect URI associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The redirect URI associated with the request token.
It may be desirable to return a custom URI if the redirect is set to "oob".
In this case, the user will be redirected to the returned URI and at that
endpoint the verifier can be displayed.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("get_redirect_uri")
def get_rsa_key(self, client_key, request):
"""Retrieves a previously stored client provided RSA key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The rsa public key as a string.
This method must allow the use of a dummy client_key value. Fetching
the rsa key using the dummy key must take the same amount of time
as fetching a key for a valid client. The dummy key must also be of
the same bit length as client keys.
Note that the key must be returned in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("get_rsa_key")
def invalidate_request_token(self, client_key, request_token, request):
"""Invalidates a used request token.
:param client_key: The client/consumer key.
:param request_token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: None
Per `Section 2.3`__ of the spec:
"The server MUST (...) ensure that the temporary
credentials have not expired or been used before."
.. _`Section 2.3`: http://tools.ietf.org/html/rfc5849#section-2.3
This method should ensure that provided token won't validate anymore.
It can be simply removing RequestToken from storage or setting
specific flag that makes it invalid (note that such flag should be
also validated during request token validation).
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("invalidate_request_token")
def validate_client_key(self, client_key, request):
"""Validates that supplied client key is a registered and valid client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy client is supplied it should validate in same
or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import Client
try:
return Client.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import Client
if access_token == self.dummy_access_token:
return False
else:
return Client.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("validate_client_key")
def validate_request_token(self, client_key, token, request):
"""Validates that supplied request token is registered and valid.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy request_token is supplied it should validate in
the same nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import RequestToken
try:
return RequestToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import RequestToken
if access_token == self.dummy_access_token:
return False
else:
return RequestToken.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("validate_request_token")
def validate_access_token(self, client_key, token, request):
"""Validates that supplied access token is registered and valid.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy access token is supplied it should validate in
the same or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import AccessToken
try:
return AccessToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import AccessToken
if access_token == self.dummy_access_token:
return False
else:
return AccessToken.exists(client_key, access_token)
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("validate_access_token")
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None, access_token=None):
"""Validates that the nonce has not been used before.
:param client_key: The client/consumer key.
:param timestamp: The ``oauth_timestamp`` parameter.
:param nonce: The ``oauth_nonce`` parameter.
:param request_token: Request token string, if any.
:param access_token: Access token string, if any.
:param request: An oauthlib.common.Request object.
:returns: True or False
Per `Section 3.3`_ of the spec.
"A nonce is a random string, uniquely generated by the client to allow
the server to verify that a request has never been made before and
helps prevent replay attacks when requests are made over a non-secure
channel. The nonce value MUST be unique across all requests with the
same timestamp, client credentials, and token combinations."
.. _`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
One of the first validation checks that will be made is for the validity
of the nonce and timestamp, which are associated with a client key and
possibly a token. If invalid then immediately fail the request
by returning False. If the nonce/timestamp pair has been used before and
you may just have detected a replay attack. Therefore it is an essential
part of OAuth security that you not allow nonce/timestamp reuse.
Note that this validation check is done before checking the validity of
the client and token.::
nonces_and_timestamps_database = [
(u'foo', 1234567890, u'rannoMstrInghere', u'bar')
]
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
return ((client_key, timestamp, nonce, request_token or access_token)
not in self.nonces_and_timestamps_database)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("validate_timestamp_and_nonce")
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""Validates the client supplied redirection URI.
:param client_key: The client/consumer key.
:param redirect_uri: The URI the client which to redirect back to after
authorization is successful.
:param request: An oauthlib.common.Request object.
:returns: True or False
It is highly recommended that OAuth providers require their clients
to register all redirection URIs prior to using them in requests and
register them as absolute URIs. See `CWE-601`_ for more information
about open redirection attacks.
By requiring registration of all redirection URIs it should be
straightforward for the provider to verify whether the supplied
redirect_uri is valid or not.
Alternatively per `Section 2.1`_ of the spec:
"If the client is unable to receive callbacks or a callback URI has
been established via other means, the parameter value MUST be set to
"oob" (case sensitive), to indicate an out-of-band configuration."
.. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
.. _`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("validate_redirect_uri")
def validate_requested_realms(self, client_key, realms, request):
"""Validates that the client may request access to the realm.
:param client_key: The client/consumer key.
:param realms: The list of realms that client is requesting access to.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is invoked when obtaining a request token and should
tie a realm to the request token and after user authorization
this realm restriction should transfer to the access token.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("validate_requested_realms")
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Validates access to the request realm.
:param client_key: The client/consumer key.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:param uri: The URI the realms is protecting.
:param realms: A list of realms that must have been granted to
the access token.
:returns: True or False
How providers choose to use the realm parameter is outside the OAuth
specification but it is commonly used to restrict access to a subset
of protected resources such as "photos".
realms is a convenience parameter which can be used to provide
a per view method pre-defined list of allowed realms.
Can be as simple as::
from your_datastore import RequestToken
request_token = RequestToken.get(token, None)
if not request_token:
return False
return set(request_token.realms).issuperset(set(realms))
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("validate_realms")
def validate_verifier(self, client_key, token, verifier, request):
"""Validates a verification code.
:param client_key: The client/consumer key.
:param token: A request token string.
:param verifier: The authorization verifier string.
:param request: An oauthlib.common.Request object.
:returns: True or False
OAuth providers issue a verification code to clients after the
resource owner authorizes access. This code is used by the client to
obtain token credentials and the provider must verify that the
verifier is valid and associated with the client as well as the
resource owner.
Verifier validation should be done in near constant time
(to avoid verifier enumeration). To achieve this we need a
constant time string comparison which is provided by OAuthLib
in ``oauthlib.common.safe_string_equals``::
from your_datastore import Verifier
correct_verifier = Verifier.get(client_key, request_token)
from oauthlib.common import safe_string_equals
return safe_string_equals(verifier, correct_verifier)
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("validate_verifier")
def verify_request_token(self, token, request):
"""Verify that the given OAuth1 request token is valid.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is used only in AuthorizationEndpoint to check whether the
oauth_token given in the authorization URL is valid or not.
This request is not signed and thus similar ``validate_request_token``
method can not be used.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("verify_request_token")
def verify_realms(self, token, realms, request):
"""Verify authorized realms to see if they match those given to token.
:param token: An access token string.
:param realms: A list of realms the client attempts to access.
:param request: An oauthlib.common.Request object.
:returns: True or False
This prevents the list of authorized realms sent by the client during
the authorization step to be altered to include realms outside what
was bound with the request token.
Can be as simple as::
valid_realms = self.get_realms(token)
return all((r in valid_realms for r in realms))
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("verify_realms")
def save_access_token(self, token, request):
"""Save an OAuth1 access token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the access token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_authorized_realms`` a space separated list of realms.
Client key can be obtained from ``request.client_key``.
The list of realms (not joined string) can be obtained from
``request.realm``.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("save_access_token")
def save_request_token(self, token, request):
"""Save an OAuth1 request token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the request token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_callback_confirmed`` the string ``true``.
Client key can be obtained from ``request.client_key``.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("save_request_token")
def save_verifier(self, token, verifier, request):
"""Associate an authorization verifier with a request token.
:param token: A request token string.
:param verifier A dictionary containing the oauth_verifier and
oauth_token
:param request: An oauthlib.common.Request object.
We need to associate verifiers with tokens for validation during the
access token request.
Note that unlike save_x_token token here is the ``oauth_token`` token
string from the request token saved previously.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("save_verifier")
|
mit
|
cetic/ansible
|
lib/ansible/modules/system/capabilities.py
|
9
|
6584
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Nate Coraor <nate@bx.psu.edu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: capabilities
short_description: Manage Linux capabilities
description:
- This module manipulates files privileges using the Linux capabilities(7) system.
version_added: "1.6"
options:
path:
description:
- Specifies the path to the file to be managed.
required: true
default: null
capability:
description:
- Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
required: true
default: null
aliases: [ 'cap' ]
state:
description:
- Whether the entry should be present or absent in the file's capabilities.
choices: [ "present", "absent" ]
default: present
notes:
- The capabilities system will automatically transform operators and flags
into the effective set, so (for example, cap_foo=ep will probably become
cap_foo+ep). This module does not attempt to determine the final operator
and flags to compare, so you will want to ensure that your capabilities
argument matches the final capabilities.
requirements: []
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Set cap_sys_chroot+ep on /foo
- capabilities:
path: /foo
capability: cap_sys_chroot+ep
state: present
# Remove cap_net_bind_service from /bar
- capabilities:
path: /bar
capability: cap_net_bind_service
state: absent
'''
from ansible.module_utils.basic import AnsibleModule
OPS = ( '=', '-', '+' )
class CapabilitiesModule(object):
platform = 'Linux'
distribution = None
def __init__(self, module):
self.module = module
self.path = module.params['path'].strip()
self.capability = module.params['capability'].strip().lower()
self.state = module.params['state']
self.getcap_cmd = module.get_bin_path('getcap', required=True)
self.setcap_cmd = module.get_bin_path('setcap', required=True)
self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present')
self.run()
def run(self):
current = self.getcap(self.path)
caps = [ cap[0] for cap in current ]
if self.state == 'present' and self.capability_tup not in current:
# need to add capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list if it's already set (but op/flags differ)
current = filter(lambda x: x[0] != self.capability_tup[0], current)
# add new cap with correct op/flags
current.append( self.capability_tup )
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
elif self.state == 'absent' and self.capability_tup[0] in caps:
# need to remove capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list and then set current list
current = filter(lambda x: x[0] != self.capability_tup[0], current)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
self.module.exit_json(changed=False, state=self.state)
def getcap(self, path):
rval = []
cmd = "%s -v %s" % (self.getcap_cmd, path)
rc, stdout, stderr = self.module.run_command(cmd)
# If file xattrs are set but no caps are set the output will be:
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
# If the file does not eixst the output will be (with rc == 0...):
# '/foo (No such file or directory)'
if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
caps = stdout.split(' =')[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
# comma-separated list, so we have to parse that
if ',' in cap:
cap_group = cap.split(',')
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
for subcap in cap_group:
rval.append( ( subcap, op, flags ) )
else:
rval.append(self._parse_cap(cap))
return rval
def setcap(self, path, caps):
caps = ' '.join([ ''.join(cap) for cap in caps ])
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
else:
return stdout
def _parse_cap(self, cap, op_required=True):
opind = -1
try:
i = 0
while opind == -1:
opind = cap.find(OPS[i])
i += 1
except:
if op_required:
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
else:
return (cap, None, None)
op = cap[opind]
cap, flags = cap.split(op)
return (cap, op, flags)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec = dict(
path = dict(aliases=['key'], required=True),
capability = dict(aliases=['cap'], required=True),
state = dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True
)
CapabilitiesModule(module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mxOBS/deb-pkg_trusty_chromium-browser
|
testing/gmock/scripts/upload.py
|
2511
|
51024
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
stamhe/ppcoin
|
contrib/pyminer/pyminer.py
|
1257
|
6438
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
|
marcosmodesto/django-testapp
|
django/views/decorators/cache.py
|
83
|
3975
|
from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
def warn():
import warnings
warnings.warn('The cache_page decorator must be called like: '
'cache_page(timeout, [cache=cache name], [key_prefix=key prefix]). '
'All other ways are deprecated.',
PendingDeprecationWarning,
stacklevel=3)
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
warn()
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
# The One True Way
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
bsd-3-clause
|
anilmuthineni/tensorflow
|
tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py
|
19
|
18947
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention-based decoder functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"prepare_attention", "attention_decoder_fn_train",
"attention_decoder_fn_inference"
]
def attention_decoder_fn_train(encoder_state,
attention_keys,
attention_values,
attention_score_fn,
attention_construct_fn,
name=None):
"""Attentional decoder function for `dynamic_rnn_decoder` during training.
The `attention_decoder_fn_train` is a training function for an
attention-based sequence-to-sequence model. It should be used when
`dynamic_rnn_decoder` is in the training mode.
The `attention_decoder_fn_train` is called with a set of the user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_train = attention_decoder_fn_train(encoder_state)
outputs_train, state_train = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_train, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
attention_keys: to be compared with target states.
attention_values: to be used to construct context vectors.
attention_score_fn: to compute similarity between key and target states.
attention_construct_fn: to build attention states.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_train"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for training.
"""
with ops.name_scope(name, "attention_decoder_fn_train", [
encoder_state, attention_keys, attention_values, attention_score_fn,
attention_construct_fn
]):
pass
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
"""Decoder function used in the `dynamic_rnn_decoder` for training.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: `None`, which is used by the `dynamic_rnn_decoder` to indicate
that `sequence_lengths` in `dynamic_rnn_decoder` should be used.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: `cell_input`, this decoder function does not modify the
given input. The input could be modified when applying e.g. attention.
emit output: `cell_output`, this decoder function does not modify the
given output.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with ops.name_scope(
name, "attention_decoder_fn_train",
[time, cell_state, cell_input, cell_output, context_state]):
if cell_state is None: # first call, return encoder_state
cell_state = encoder_state
# init attention
attention = _init_attention(encoder_state)
else:
# construct attention
attention = attention_construct_fn(cell_output, attention_keys,
attention_values)
cell_output = attention
# combine cell_input and attention
next_input = array_ops.concat([cell_input, attention], 1)
return (None, cell_state, next_input, cell_output, context_state)
return decoder_fn
def attention_decoder_fn_inference(output_fn,
encoder_state,
attention_keys,
attention_values,
attention_score_fn,
attention_construct_fn,
embeddings,
start_of_sequence_id,
end_of_sequence_id,
maximum_length,
num_decoder_symbols,
dtype=dtypes.int32,
name=None):
"""Attentional decoder function for `dynamic_rnn_decoder` during inference.
The `attention_decoder_fn_inference` is a simple inference function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the inference mode.
The `attention_decoder_fn_inference` is called with user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_inference = attention_decoder_fn_inference(...)
outputs_inference, state_inference = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_inference, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
output_fn: An output function to project your `cell_output` onto class
logits.
An example of an output function;
```
tf.variable_scope("decoder") as varscope
output_fn = lambda x: layers.linear(x, num_decoder_symbols,
scope=varscope)
outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...)
logits_train = output_fn(outputs_train)
varscope.reuse_variables()
logits_inference, state_inference = seq2seq.dynamic_rnn_decoder(
output_fn=output_fn, ...)
```
If `None` is supplied it will act as an identity function, which
might be wanted when using the RNNCell `OutputProjectionWrapper`.
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
attention_keys: to be compared with target states.
attention_values: to be used to construct context vectors.
attention_score_fn: to compute similarity between key and target states.
attention_construct_fn: to build attention states.
embeddings: The embeddings matrix used for the decoder sized
`[num_decoder_symbols, embedding_size]`.
start_of_sequence_id: The start of sequence ID in the decoder embeddings.
end_of_sequence_id: The end of sequence ID in the decoder embeddings.
maximum_length: The maximum allowed of time steps to decode.
num_decoder_symbols: The number of classes to decode at each time step.
dtype: (default: `dtypes.int32`) The default data type to use when
handling integer objects.
name: (default: `None`) NameScope for the decoder function;
defaults to "attention_decoder_fn_inference"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for inference.
"""
with ops.name_scope(name, "attention_decoder_fn_inference", [
output_fn, encoder_state, attention_keys, attention_values,
attention_score_fn, attention_construct_fn, embeddings,
start_of_sequence_id, end_of_sequence_id, maximum_length,
num_decoder_symbols, dtype
]):
start_of_sequence_id = ops.convert_to_tensor(start_of_sequence_id, dtype)
end_of_sequence_id = ops.convert_to_tensor(end_of_sequence_id, dtype)
maximum_length = ops.convert_to_tensor(maximum_length, dtype)
num_decoder_symbols = ops.convert_to_tensor(num_decoder_symbols, dtype)
encoder_info = nest.flatten(encoder_state)[0]
batch_size = encoder_info.get_shape()[0].value
if output_fn is None:
output_fn = lambda x: x
if batch_size is None:
batch_size = array_ops.shape(encoder_info)[0]
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
"""Decoder function used in the `dynamic_rnn_decoder` for inference.
The main difference between this decoder function and the `decoder_fn` in
`attention_decoder_fn_train` is how `next_cell_input` is calculated. In
decoder function we calculate the next input by applying an argmax across
the feature dimension of the output from the decoder. This is a
greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014)
use beam-search instead.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: A boolean vector to indicate which sentences has reached a
`end_of_sequence_id`. This is used for early stopping by the
`dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with
all elements as `true` is returned.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: The embedding from argmax of the `cell_output` is used as
`next_input`.
emit output: If `output_fn is None` the supplied `cell_output` is
returned, else the `output_fn` is used to update the `cell_output`
before calculating `next_input` and returning `cell_output`.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
Raises:
ValueError: if cell_input is not None.
"""
with ops.name_scope(
name, "attention_decoder_fn_inference",
[time, cell_state, cell_input, cell_output, context_state]):
if cell_input is not None:
raise ValueError("Expected cell_input to be None, but saw: %s" %
cell_input)
if cell_output is None:
# invariant that this is time == 0
next_input_id = array_ops.ones(
[batch_size,], dtype=dtype) * (start_of_sequence_id)
done = array_ops.zeros([batch_size,], dtype=dtypes.bool)
cell_state = encoder_state
cell_output = array_ops.zeros(
[num_decoder_symbols], dtype=dtypes.float32)
cell_input = array_ops.gather(embeddings, next_input_id)
# init attention
attention = _init_attention(encoder_state)
else:
# construct attention
attention = attention_construct_fn(cell_output, attention_keys,
attention_values)
cell_output = attention
# argmax decoder
cell_output = output_fn(cell_output) # logits
next_input_id = math_ops.cast(
math_ops.argmax(cell_output, 1), dtype=dtype)
done = math_ops.equal(next_input_id, end_of_sequence_id)
cell_input = array_ops.gather(embeddings, next_input_id)
# combine cell_input and attention
next_input = array_ops.concat([cell_input, attention], 1)
# if time > maxlen, return all true vector
done = control_flow_ops.cond(
math_ops.greater(time, maximum_length),
lambda: array_ops.ones([batch_size,], dtype=dtypes.bool),
lambda: done)
return (done, cell_state, next_input, cell_output, context_state)
return decoder_fn
## Helper functions ##
def prepare_attention(attention_states,
attention_option,
num_units,
reuse=False):
"""Prepare keys/values/functions for attention.
Args:
attention_states: hidden states to attend over.
attention_option: how to compute attention, either "luong" or "bahdanau".
num_units: hidden state dimension.
reuse: whether to reuse variable scope.
Returns:
attention_keys: to be compared with target states.
attention_values: to be used to construct context vectors.
attention_score_fn: to compute similarity between key and target states.
attention_construct_fn: to build attention states.
"""
# Prepare attention keys / values from attention_states
with variable_scope.variable_scope("attention_keys", reuse=reuse) as scope:
attention_keys = layers.linear(
attention_states, num_units, biases_initializer=None, scope=scope)
attention_values = attention_states
# Attention score function
attention_score_fn = _create_attention_score_fn("attention_score", num_units,
attention_option, reuse)
# Attention construction function
attention_construct_fn = _create_attention_construct_fn("attention_construct",
num_units,
attention_score_fn,
reuse)
return (attention_keys, attention_values, attention_score_fn,
attention_construct_fn)
def _init_attention(encoder_state):
"""Initialize attention. Handling both LSTM and GRU.
Args:
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
Returns:
attn: initial zero attention vector.
"""
# Multi- vs single-layer
# TODO(thangluong): is this the best way to check?
if isinstance(encoder_state, tuple):
top_state = encoder_state[-1]
else:
top_state = encoder_state
# LSTM vs GRU
if isinstance(top_state, core_rnn_cell_impl.LSTMStateTuple):
attn = array_ops.zeros_like(top_state.h)
else:
attn = array_ops.zeros_like(top_state)
return attn
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
"""Function to compute attention vectors.
Args:
name: to label variables.
num_units: hidden state dimension.
attention_score_fn: to compute similarity between key and target states.
reuse: whether to reuse variable scope.
Returns:
attention_construct_fn: to build attention states.
"""
with variable_scope.variable_scope(name, reuse=reuse) as scope:
def construct_fn(attention_query, attention_keys, attention_values):
context = attention_score_fn(attention_query, attention_keys,
attention_values)
concat_input = array_ops.concat([attention_query, context], 1)
attention = layers.linear(
concat_input, num_units, biases_initializer=None, scope=scope)
return attention
return construct_fn
# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length]
@function.Defun(func_name="attn_add_fun", noinline=True)
def _attn_add_fun(v, keys, query):
return math_ops.reduce_sum(v * math_ops.tanh(keys + query), [2])
@function.Defun(func_name="attn_mul_fun", noinline=True)
def _attn_mul_fun(keys, query):
return math_ops.reduce_sum(keys * query, [2])
def _create_attention_score_fn(name,
num_units,
attention_option,
reuse,
dtype=dtypes.float32):
"""Different ways to compute attention scores.
Args:
name: to label variables.
num_units: hidden state dimension.
attention_option: how to compute attention, either "luong" or "bahdanau".
"bahdanau": additive (Bahdanau et al., ICLR'2015)
"luong": multiplicative (Luong et al., EMNLP'2015)
reuse: whether to reuse variable scope.
dtype: (default: `dtypes.float32`) data type to use.
Returns:
attention_score_fn: to compute similarity between key and target states.
"""
with variable_scope.variable_scope(name, reuse=reuse):
if attention_option == "bahdanau":
query_w = variable_scope.get_variable(
"attnW", [num_units, num_units], dtype=dtype)
score_v = variable_scope.get_variable("attnV", [num_units], dtype=dtype)
def attention_score_fn(query, keys, values):
"""Put attention masks on attention_values using attention_keys and query.
Args:
query: A Tensor of shape [batch_size, num_units].
keys: A Tensor of shape [batch_size, attention_length, num_units].
values: A Tensor of shape [batch_size, attention_length, num_units].
Returns:
context_vector: A Tensor of shape [batch_size, num_units].
Raises:
ValueError: if attention_option is neither "luong" or "bahdanau".
"""
if attention_option == "bahdanau":
# transform query
query = math_ops.matmul(query, query_w)
# reshape query: [batch_size, 1, num_units]
query = array_ops.reshape(query, [-1, 1, num_units])
# attn_fun
scores = _attn_add_fun(score_v, keys, query)
elif attention_option == "luong":
# reshape query: [batch_size, 1, num_units]
query = array_ops.reshape(query, [-1, 1, num_units])
# attn_fun
scores = _attn_mul_fun(keys, query)
else:
raise ValueError("Unknown attention option %s!" % attention_option)
# Compute alignment weights
# scores: [batch_size, length]
# alignments: [batch_size, length]
# TODO(thangluong): not normalize over padding positions.
alignments = nn_ops.softmax(scores)
# Now calculate the attention-weighted vector.
alignments = array_ops.expand_dims(alignments, 2)
context_vector = math_ops.reduce_sum(alignments * values, [1])
context_vector.set_shape([None, num_units])
return context_vector
return attention_score_fn
|
apache-2.0
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/html/services/notebooks/filenbmanager.py
|
2
|
14448
|
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import io
import os
import glob
import shutil
from unicodedata import normalize
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Dict, Bool, TraitError
from IPython.utils import tz
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
checkpoint_dir = Unicode(config=True,
help="""The location in which to keep notebook checkpoints
By default, it is notebook-dir/.ipynb_checkpoints
"""
)
def _checkpoint_dir_default(self):
return os.path.join(self.notebook_dir, '.ipynb_checkpoints')
def _checkpoint_dir_changed(self, name, old, new):
"""do a bit of validation of the checkpoint dir"""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
abs_new = os.path.abspath(new)
self.checkpoint_dir = abs_new
return
if os.path.exists(new) and not os.path.isdir(new):
raise TraitError("checkpoint dir %r is not a directory" % new)
if not os.path.exists(new):
self.log.info("Creating checkpoint dir %s", new)
try:
os.mkdir(new)
except:
raise TraitError("Couldn't create checkpoint dir %r" % new)
filename_ext = Unicode(u'.ipynb')
# Map notebook names to notebook_ids
rev_mapping = Dict()
def get_notebook_names(self):
"""List all notebook names in the notebook dir."""
names = glob.glob(os.path.join(self.notebook_dir,
'*' + self.filename_ext))
names = [normalize('NFC', os.path.splitext(os.path.basename(name))[0])
for name in names]
return names
def list_notebooks(self):
"""List all notebooks in the notebook dir."""
names = self.get_notebook_names()
data = []
for name in names:
if name not in self.rev_mapping:
notebook_id = self.new_notebook_id(name)
else:
notebook_id = self.rev_mapping[name]
data.append(dict(notebook_id=notebook_id,name=name))
data = sorted(data, key=lambda item: item['name'])
return data
def new_notebook_id(self, name):
"""Generate a new notebook_id for a name and store its mappings."""
notebook_id = super(FileNotebookManager, self).new_notebook_id(name)
self.rev_mapping[name] = notebook_id
return notebook_id
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id in the mapping."""
name = self.mapping[notebook_id]
super(FileNotebookManager, self).delete_notebook_id(notebook_id)
del self.rev_mapping[name]
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
exists = super(FileNotebookManager, self).notebook_exists(notebook_id)
if not exists:
return False
path = self.get_path_by_name(self.mapping[notebook_id])
return os.path.isfile(path)
def get_name(self, notebook_id):
"""get a notebook name, raising 404 if not found"""
try:
name = self.mapping[notebook_id]
except KeyError:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
return name
def get_path(self, notebook_id):
"""Return a full path to a notebook given its notebook_id."""
name = self.get_name(notebook_id)
return self.get_path_by_name(name)
def get_path_by_name(self, name):
"""Return a full path to a notebook given its name."""
filename = name + self.filename_ext
path = os.path.join(self.notebook_dir, filename)
return path
def read_notebook_object_from_path(self, path):
"""read a notebook object from a path"""
info = os.stat(path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
with open(path,'r') as f:
s = f.read()
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except ValueError as e:
msg = u"Unreadable Notebook: %s" % e
raise web.HTTPError(400, msg, reason=msg)
return last_modified, nb
def read_notebook_object(self, notebook_id):
"""Get the Notebook representation of a notebook by notebook_id."""
path = self.get_path(notebook_id)
if not os.path.isfile(path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
last_modified, nb = self.read_notebook_object_from_path(path)
# Always use the filename as the notebook name.
# Eventually we will get rid of the notebook name in the metadata
# but for now, that name is just an empty string. Until the notebooks
# web service knows about names in URLs we still pass the name
# back to the web app using the metadata though.
nb.metadata.name = os.path.splitext(os.path.basename(path))[0]
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = normalize('NFC', nb.metadata.name)
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
old_name = self.mapping[notebook_id]
old_checkpoints = self.list_checkpoints(notebook_id)
path = self.get_path_by_name(new_name)
# Right before we save the notebook, we write an empty string as the
# notebook name in the metadata. This is to prepare for removing
# this attribute entirely post 1.0. The web app still uses the metadata
# name for now.
nb.metadata.name = u''
try:
self.log.debug("Autosaving notebook %s", path)
with open(path,'w') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s' % e)
# save .py script as well
if self.save_script:
pypath = os.path.splitext(path)[0] + '.py'
self.log.debug("Writing script %s", pypath)
try:
with io.open(pypath,'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s' % e)
# remove old files if the name changed
if old_name != new_name:
# update mapping
self.mapping[notebook_id] = new_name
self.rev_mapping[new_name] = notebook_id
del self.rev_mapping[old_name]
# remove renamed original, if it exists
old_path = self.get_path_by_name(old_name)
if os.path.isfile(old_path):
self.log.debug("unlinking notebook %s", old_path)
os.unlink(old_path)
# cleanup old script, if it exists
if self.save_script:
old_pypath = os.path.splitext(old_path)[0] + '.py'
if os.path.isfile(old_pypath):
self.log.debug("unlinking script %s", old_pypath)
os.unlink(old_pypath)
# rename checkpoints to follow file
for cp in old_checkpoints:
checkpoint_id = cp['checkpoint_id']
old_cp_path = self.get_checkpoint_path_by_name(old_name, checkpoint_id)
new_cp_path = self.get_checkpoint_path_by_name(new_name, checkpoint_id)
if os.path.isfile(old_cp_path):
self.log.debug("renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
os.rename(old_cp_path, new_cp_path)
return notebook_id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
nb_path = self.get_path(notebook_id)
if not os.path.isfile(nb_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
# clear checkpoints
for checkpoint in self.list_checkpoints(notebook_id):
checkpoint_id = checkpoint['checkpoint_id']
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
self.log.debug(path)
if os.path.isfile(path):
self.log.debug("unlinking checkpoint %s", path)
os.unlink(path)
self.log.debug("unlinking notebook %s", nb_path)
os.unlink(nb_path)
self.delete_notebook_id(notebook_id)
def increment_filename(self, basename):
"""Return a non-used filename of the form basename<int>.
This searches through the filenames (basename0, basename1, ...)
until is find one that is not already being used. It is used to
create Untitled and Copy names that are unique.
"""
i = 0
while True:
name = u'%s%i' % (basename,i)
path = self.get_path_by_name(name)
if not os.path.isfile(path):
break
else:
i = i+1
return name
# Checkpoint-related utilities
def get_checkpoint_path_by_name(self, name, checkpoint_id):
"""Return a full path to a notebook checkpoint, given its name and checkpoint id."""
filename = u"{name}-{checkpoint_id}{ext}".format(
name=name,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
path = os.path.join(self.checkpoint_dir, filename)
return path
def get_checkpoint_path(self, notebook_id, checkpoint_id):
"""find the path to a checkpoint"""
name = self.get_name(notebook_id)
return self.get_checkpoint_path_by_name(name, checkpoint_id)
def get_checkpoint_info(self, notebook_id, checkpoint_id):
"""construct the info dict for a given checkpoint"""
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
stats = os.stat(path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
checkpoint_id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, notebook_id):
"""Create a checkpoint from the current state of a notebook"""
nb_path = self.get_path(notebook_id)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(notebook_id, checkpoint_id)
self.log.debug("creating checkpoint for notebook %s", notebook_id)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
shutil.copy2(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_info(notebook_id, checkpoint_id)
def list_checkpoints(self, notebook_id):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
checkpoint_id = u"checkpoint"
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
if not os.path.exists(path):
return []
else:
return [self.get_checkpoint_info(notebook_id, checkpoint_id)]
def restore_checkpoint(self, notebook_id, checkpoint_id):
"""restore a notebook to a checkpointed state"""
self.log.info("restoring Notebook %s from checkpoint %s", notebook_id, checkpoint_id)
nb_path = self.get_path(notebook_id)
cp_path = self.get_checkpoint_path(notebook_id, checkpoint_id)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (notebook_id, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
last_modified, nb = self.read_notebook_object_from_path(cp_path)
shutil.copy2(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, notebook_id, checkpoint_id):
"""delete a notebook's checkpoint"""
path = self.get_checkpoint_path(notebook_id, checkpoint_id)
if not os.path.isfile(path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (notebook_id, checkpoint_id)
)
self.log.debug("unlinking %s", path)
os.unlink(path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
|
apache-2.0
|
garyfeng/pybrain
|
pybrain/rl/environments/ode/tasks/ccrl.py
|
25
|
15208
|
from __future__ import print_function
__author__ = 'Frank Sehnke, sehnke@in.tum.de'
from pybrain.rl.environments import EpisodicTask
from pybrain.rl.environments.ode.sensors import SpecificBodyPositionSensor
from scipy import tanh, zeros, array, random, sqrt, asarray
#Basic class for all ccrl tasks
class CCRLTask(EpisodicTask):
def __init__(self, env):
EpisodicTask.__init__(self, env)
#Overall maximal tourque - is multiplied with relative max tourque for individual joint.
self.maxPower = 100.0
self.reward_history = []
self.count = 0 #timestep counter
self.epiLen = 1500 #suggestet episodic length for normal Johnnie tasks
self.incLearn = 0 #counts the task resets for incrementall learning
self.env.FricMu = 20.0 #We need higher friction for CCRL
self.env.dt = 0.002 #We also need more timly resolution
# normalize standard sensors to (-1, 1)
self.sensor_limits = []
#Angle sensors
for i in range(self.env.actLen):
self.sensor_limits.append((self.env.cLowList[i], self.env.cHighList[i]))
# Joint velocity sensors
for i in range(self.env.actLen):
self.sensor_limits.append((-20, 20))
#Norm all actor dimensions to (-1, 1)
self.actor_limits = [(-1, 1)] * env.actLen
self.oldAction = zeros(env.actLen, float)
self.dist = zeros(9, float)
self.dif = array([0.0, 0.0, 0.0])
self.target = array([-6.5, 1.75, -10.5])
self.grepRew = 0.0
self.tableFlag = 0.0
self.env.addSensor(SpecificBodyPositionSensor(['objectP00'], "glasPos"))
self.env.addSensor(SpecificBodyPositionSensor(['palmLeft'], "palmPos"))
self.env.addSensor(SpecificBodyPositionSensor(['fingerLeft1'], "finger1Pos"))
self.env.addSensor(SpecificBodyPositionSensor(['fingerLeft2'], "finger2Pos"))
#we changed sensors so we need to update environments sensorLength variable
self.env.obsLen = len(self.env.getSensors())
#normalization for the task spezific sensors
for i in range(self.env.obsLen - 2 * self.env.actLen):
self.sensor_limits.append((-4, 4))
self.actor_limits = None
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
#Sensor hand to target object
for i in range(3):
self.dist[i] = ((sensors[self.env.obsLen - 9 + i] + sensors[self.env.obsLen - 6 + i] + sensors[self.env.obsLen - 3 + i]) / 3.0 - (sensors[self.env.obsLen - 12 + i] + self.dif[i])) * 4.0 #sensors[self.env.obsLen-12+i]
#Sensor hand angle to horizontal plane X-Axis
for i in range(3):
self.dist[i + 3] = (sensors[self.env.obsLen - 3 + i] - sensors[self.env.obsLen - 6 + i]) * 5.0
#Sensor hand angle to horizontal plane Y-Axis
for i in range(3):
self.dist[i + 6] = ((sensors[self.env.obsLen - 3 + i] + sensors[self.env.obsLen - 6 + i]) / 2.0 - sensors[self.env.obsLen - 9 + i]) * 10.0
if self.sensor_limits:
sensors = self.normalize(sensors)
sens = []
for i in range(self.env.obsLen - 12):
sens.append(sensors[i])
for i in range(9):
sens.append(self.dist[i])
for i in self.oldAction:
sens.append(i)
return sens
def performAction(self, action):
#Filtered mapping towards performAction of the underlying environment
#The standard CCRL task uses a PID controller to controll directly angles instead of forces
#This makes most tasks much simpler to learn
self.oldAction = action
#Grasping as reflex depending on the distance to target - comment in for more easy grasping
if abs(abs(self.dist[:3]).sum())<2.0: action[15]=1.0 #self.grepRew=action[15]*.01
else: action[15]=-1.0 #self.grepRew=action[15]*-.03
isJoints=array(self.env.getSensorByName('JointSensor')) #The joint angles
isSpeeds=array(self.env.getSensorByName('JointVelocitySensor')) #The joint angular velocitys
act=(action+1.0)/2.0*(self.env.cHighList-self.env.cLowList)+self.env.cLowList #norm output to action intervall
action=tanh((act-isJoints-0.9*isSpeeds*self.env.tourqueList)*16.0)*self.maxPower*self.env.tourqueList #simple PID
EpisodicTask.performAction(self, action)
#self.env.performAction(action)
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
self.count += 1
return False
def res(self):
#sets counter and history back, increases incremental counter
self.count = 0
self.incLearn += 1
self.reward_history.append(self.getTotalReward())
self.tableFlag = 0.0
def getReward(self):
#rewarded for approaching the object
dis = sqrt((self.dist[0:3] ** 2).sum())
return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
#Learn to grasp a glas at a fixed location
class CCRLGlasTask(CCRLTask):
def __init__(self, env):
CCRLTask.__init__(self, env)
self.dif = array([0.0, 0.0, 0.0])
self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
self.count += 1
return False
def getReward(self):
if self.env.glasSum >= 2: grip = 1000.0
else: grip = 0.0
if self.env.tableSum > 0: self.tableFlag = -1.0
else: tableFlag = 0.0
self.dist[3] = 0.0
self.dist[8] = 0.0
dis = 100.0/((self.dist[:3] ** 2).sum()+0.1)
nig = 10.0/((self.dist[3:] ** 2).sum()+0.1)
if self.env.stepCounter == self.epiLen: print(("Grip:", grip, "Dis:", dis, "Nig:", nig, "Table:", self.tableFlag))
return (10 + grip + nig + dis + self.tableFlag) / float(self.epiLen) #-dis
#else:
# return (25.0 - dis) / float(self.epiLen) + (grip / nig - float(self.env.tableSum)) * 0.1 #+self.grepRew (10.0-dis)/float(self.epiLen)+
#Learn to grasp a plate at a fixed location
class CCRLPlateTask(CCRLTask):
def __init__(self, env):
CCRLTask.__init__(self, env)
self.dif = array([0.0, 0.2, 0.8])
self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1: self.pertGlasPos(0)
self.count += 1
return False
def pertGlasPos(self, num):
if num == 0: self.env.pert = asarray([0.0, 0.0, 0.5])
def getReward(self):
if self.env.glasSum >= 2: grip = 1.0
else: grip = 0.0
if self.env.tableSum > 0: self.tableFlag = 10.0
#self.dist[4]=0.0
#self.dist[8]=0.0
dis = sqrt((self.dist[0:3] ** 2).sum())
if self.count == self.epiLen:
return 25.0 + grip - dis - self.tableFlag #/nig
else:
return (25.0 - dis) / float(self.epiLen) + (grip - float(self.env.tableSum)) * 0.1 #/nig -(1.0+self.oldAction[15])
#Learn to grasp a glas at 5 different locations
class CCRLGlasVarTask(CCRLGlasTask):
def __init__(self, env):
CCRLGlasTask.__init__(self, env)
self.epiLen = 5000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1:
self.pertGlasPos(0)
if self.count == self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(1)
if self.count == 2 * self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(2)
if self.count == 3 * self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(3)
if self.count == 4 * self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(4)
self.count += 1
return False
def pertGlasPos(self, num):
if num == 0: self.env.pert = asarray([1.0, 0.0, 0.5])
if num == 1: self.env.pert = asarray([-1.0, 0.0, 0.5])
if num == 2: self.env.pert = asarray([1.0, 0.0, 0.0])
if num == 3: self.env.pert = asarray([-1.0, 0.0, 0.0])
if num == 4: self.env.pert = asarray([0.0, 0.0, 0.25])
def getReward(self):
if self.env.glasSum >= 2: grip = 1.0
else: grip = 0.0
if self.env.tableSum > 0: self.tableFlag = 10.0
self.dist[3] = 0.0
self.dist[8] = 0.0
dis = sqrt((self.dist ** 2).sum())
nig = (abs(self.dist[4]) + 1.0)
if self.count == self.epiLen or self.count == self.epiLen / 5 or self.count == 2 * self.epiLen / 5 or self.count == 3 * self.epiLen / 5 or self.count == 4 * self.epiLen / 5:
return 25.0 + grip / nig - dis - self.tableFlag #/nig
else:
return (25.0 - dis) / float(self.epiLen) + (grip / nig - float(self.env.tableSum)) * 0.1 #/nig
#Learn to grasp a glas at random locations
class CCRLGlasVarRandTask(CCRLGlasVarTask):
def pertGlasPos(self, num):
self.env.pert = asarray([random.random()*2.0 - 1.0, 0.0, random.random()*0.5 + 0.5])
#Some experimental stuff
class CCRLPointTask(CCRLGlasVarTask):
def __init__(self, env):
CCRLGlasVarTask.__init__(self, env)
self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1:
self.pertGlasPos(0)
self.count += 1
return False
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
sensSort = []
#Angle and angleVelocity
for i in range(32):
sensSort.append(sensors[i])
#Angles wanted (old action)
for i in self.oldAction:
sensSort.append(i)
#Hand position
for i in range(3):
sensSort.append((sensors[38 + i] + sensors[41 + i]) / 2)
#Hand orientation (Hack - make correkt!!!!)
sensSort.append((sensors[38] - sensors[41]) / 2 - sensors[35]) #pitch
sensSort.append((sensors[38 + 1] - sensors[41 + 1]) / 2 - sensors[35 + 1]) #yaw
sensSort.append((sensors[38 + 1] - sensors[41 + 1])) #roll
#Target position
for i in range(3):
sensSort.append(self.target[i])
#Target orientation
for i in range(3):
sensSort.append(0.0)
#Object type (start with random)
sensSort.append(float(random.randint(-1, 1))) #roll
#normalisation
if self.sensor_limits:
sensors = self.normalize(sensors)
sens = []
for i in range(32):
sens.append(sensors[i])
for i in range(29):
sens.append(sensSort[i + 32])
#calc dist to target
self.dist = array([(sens[54] - sens[48]), (sens[55] - sens[49]), (sens[56] - sens[50]), sens[51], sens[52], sens[53], sens[15]])
return sens
def pertGlasPos(self, num):
if num == 0: self.target = asarray([0.0, 0.0, 1.0])
self.env.pert = self.target.copy()
self.target = self.target.copy() + array([-6.5, 1.75, -10.5])
def getReward(self):
dis = sqrt((self.dist ** 2).sum())
return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
class CCRLPointVarTask(CCRLPointTask):
def __init__(self, env):
CCRLPointTask.__init__(self, env)
self.epiLen = 2000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1:
self.pertGlasPos(0)
if self.count == self.epiLen / 2 + 1:
self.env.reset()
self.pertGlasPos(1)
self.count += 1
return False
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
sensSort = []
#Angle and angleVelocity
for i in range(32):
sensSort.append(sensors[i])
#Angles wanted (old action)
for i in self.oldAction:
sensSort.append(i)
#Hand position
for i in range(3):
sensSort.append((sensors[38 + i] + sensors[41 + i]) / 2)
#Hand orientation (Hack - make correkt!!!!)
sensSort.append((sensors[38] - sensors[41]) / 2 - sensors[35]) #pitch
sensSort.append((sensors[38 + 1] - sensors[41 + 1]) / 2 - sensors[35 + 1]) #yaw
sensSort.append((sensors[38 + 1] - sensors[41 + 1])) #roll
#Target position
for i in range(3):
sensSort.append(self.target[i])
#Target orientation
for i in range(3):
sensSort.append(0.0)
#Object type (start with random)
sensSort.append(float(random.randint(-1, 1))) #roll
#normalisation
if self.sensor_limits:
sensors = self.normalize(sensors)
sens = []
for i in range(32):
sens.append(sensors[i])
for i in range(29):
sens.append(sensSort[i + 32])
#calc dist to target
self.dist = array([(sens[54] - sens[48]) * 10.0, (sens[55] - sens[49]) * 10.0, (sens[56] - sens[50]) * 10.0, sens[51], sens[52], sens[53], 1.0 + sens[15]])
return sens
def pertGlasPos(self, num):
if num == 0: self.target = asarray([1.0, 0.0, 1.0])
if num == 1: self.target = asarray([-1.0, 0.0, 1.0])
if num == 2: self.target = asarray([1.0, 0.0, 0.0])
if num == 3: self.target = asarray([-1.0, 0.0, 0.0])
if num == 4: self.target = asarray([0.0, 0.0, 0.5])
self.env.pert = self.target.copy()
self.target = self.target.copy() + array([-6.5, 1.75, -10.5])
def getReward(self):
dis = sqrt((self.dist ** 2).sum())
subEpi = self.epiLen / 2
if self.count == self.epiLen or self.count == subEpi:
return (25.0 - dis) / 2.0
else:
return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
|
bsd-3-clause
|
savoirfairelinux/odoo
|
addons/mrp_operations/mrp_operations.py
|
33
|
27326
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import time
from datetime import datetime
from openerp.tools.translate import _
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'move_dest_id_lines': fields.one2many('stock.move','move_dest_id', 'Children Moves')
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update({
'move_dest_id_lines': [],
})
return super(stock_move, self).copy(cr, uid, id, default, context)
class mrp_production_workcenter_line(osv.osv):
def _get_date_end(self, cr, uid, ids, field_name, arg, context=None):
""" Finds ending date.
@return: Dictionary of values.
"""
ops = self.browse(cr, uid, ids, context=context)
date_and_hours_by_cal = [(op.date_planned, op.hour, op.workcenter_id.calendar_id.id) for op in ops if op.date_planned]
intervals = self.pool.get('resource.calendar').interval_get_multi(cr, uid, date_and_hours_by_cal)
res = {}
for op in ops:
res[op.id] = False
if op.date_planned:
i = intervals.get((op.date_planned, op.hour, op.workcenter_id.calendar_id.id))
if i:
res[op.id] = i[-1][1].strftime('%Y-%m-%d %H:%M:%S')
else:
res[op.id] = op.date_planned
return res
def onchange_production_id(self, cr, uid, ids, production_id, context=None):
if not production_id:
return {}
production = self.pool.get('mrp.production').browse(cr, uid, production_id, context=None)
result = {
'product': production.product_id.id,
'qty': production.product_qty,
'uom': production.product_uom.id,
}
return {'value': result}
_inherit = 'mrp.production.workcenter.line'
_order = "sequence, date_planned"
_columns = {
'state': fields.selection([('draft','Draft'),('cancel','Cancelled'),('pause','Pending'),('startworking', 'In Progress'),('done','Finished')],'Status', readonly=True,
help="* When a work order is created it is set in 'Draft' status.\n" \
"* When user sets work order in start mode that time it will be set in 'In Progress' status.\n" \
"* When work order is in running mode, during that time if user wants to stop or to make changes in order then can set in 'Pending' status.\n" \
"* When the user cancels the work order it will be set in 'Canceled' status.\n" \
"* When order is completely processed that time it is set in 'Finished' status."),
'date_planned': fields.datetime('Scheduled Date', select=True),
'date_planned_end': fields.function(_get_date_end, string='End Date', type='datetime'),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'delay': fields.float('Working Hours',help="The elapsed time between operation start and stop in this Work Center",readonly=True),
'production_state':fields.related('production_id','state',
type='selection',
selection=[('draft','Draft'),('confirmed','Waiting Goods'),('ready','Ready to Produce'),('in_production','In Production'),('cancel','Canceled'),('done','Done')],
string='Production Status', readonly=True),
'product':fields.related('production_id','product_id',type='many2one',relation='product.product',string='Product',
readonly=True),
'qty':fields.related('production_id','product_qty',type='float',string='Qty',readonly=True, store=True),
'uom':fields.related('production_id','product_uom',type='many2one',relation='product.uom',string='Unit of Measure',readonly=True),
}
_defaults = {
'state': 'draft',
'delay': 0.0,
'production_state': 'draft'
}
def modify_production_order_state(self, cr, uid, ids, action):
""" Modifies production order state if work order state is changed.
@param action: Action to perform.
@return: Nothing
"""
prod_obj_pool = self.pool.get('mrp.production')
oper_obj = self.browse(cr, uid, ids)[0]
prod_obj = oper_obj.production_id
if action == 'start':
if prod_obj.state =='confirmed':
prod_obj_pool.force_production(cr, uid, [prod_obj.id])
prod_obj_pool.signal_button_produce(cr, uid, [prod_obj.id])
elif prod_obj.state =='ready':
prod_obj_pool.signal_button_produce(cr, uid, [prod_obj.id])
elif prod_obj.state =='in_production':
return
else:
raise osv.except_osv(_('Error!'),_('Manufacturing order cannot be started in state "%s"!') % (prod_obj.state,))
else:
oper_ids = self.search(cr,uid,[('production_id','=',prod_obj.id)])
obj = self.browse(cr,uid,oper_ids)
flag = True
for line in obj:
if line.state != 'done':
flag = False
if flag:
for production in prod_obj_pool.browse(cr, uid, [prod_obj.id], context= None):
if production.move_lines or production.move_created_ids:
prod_obj_pool.action_produce(cr,uid, production.id, production.product_qty, 'consume_produce', context = None)
prod_obj_pool.signal_button_produce_done(cr, uid, [oper_obj.production_id.id])
return
def write(self, cr, uid, ids, vals, context=None, update=True):
result = super(mrp_production_workcenter_line, self).write(cr, uid, ids, vals, context=context)
prod_obj = self.pool.get('mrp.production')
if vals.get('date_planned', False) and update:
for prod in self.browse(cr, uid, ids, context=context):
if prod.production_id.workcenter_lines:
dstart = min(vals['date_planned'], prod.production_id.workcenter_lines[0]['date_planned'])
prod_obj.write(cr, uid, [prod.production_id.id], {'date_start':dstart}, context=context, mini=False)
return result
def action_draft(self, cr, uid, ids, context=None):
""" Sets state to draft.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_start_working(self, cr, uid, ids, context=None):
""" Sets state to start working and writes starting date.
@return: True
"""
self.modify_production_order_state(cr, uid, ids, 'start')
self.write(cr, uid, ids, {'state':'startworking', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
""" Sets state to done, writes finish date and calculates delay.
@return: True
"""
delay = 0.0
date_now = time.strftime('%Y-%m-%d %H:%M:%S')
obj_line = self.browse(cr, uid, ids[0])
date_start = datetime.strptime(obj_line.date_start,'%Y-%m-%d %H:%M:%S')
date_finished = datetime.strptime(date_now,'%Y-%m-%d %H:%M:%S')
delay += (date_finished-date_start).days * 24
delay += (date_finished-date_start).seconds / float(60*60)
self.write(cr, uid, ids, {'state':'done', 'date_finished': date_now,'delay':delay}, context=context)
self.modify_production_order_state(cr,uid,ids,'done')
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Sets state to cancel.
@return: True
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def action_pause(self, cr, uid, ids, context=None):
""" Sets state to pause.
@return: True
"""
return self.write(cr, uid, ids, {'state':'pause'}, context=context)
def action_resume(self, cr, uid, ids, context=None):
""" Sets state to startworking.
@return: True
"""
return self.write(cr, uid, ids, {'state':'startworking'}, context=context)
class mrp_production(osv.osv):
_inherit = 'mrp.production'
_columns = {
'allow_reorder': fields.boolean('Free Serialisation', help="Check this to be able to move independently all production orders, without moving dependent ones."),
}
def _production_date_end(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates planned end date of production order.
@return: Dictionary of values
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = prod.date_planned
for line in prod.workcenter_lines:
result[prod.id] = max(line.date_planned_end, result[prod.id])
return result
def action_production_end(self, cr, uid, ids):
""" Finishes work order if production order is done.
@return: Super method
"""
obj = self.browse(cr, uid, ids)[0]
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for workcenter_line in obj.workcenter_lines:
if workcenter_line.state == 'draft':
workcenter_pool.signal_button_start_working(cr, uid, [workcenter_line.id])
workcenter_pool.signal_button_done(cr, uid, [workcenter_line.id])
return super(mrp_production,self).action_production_end(cr, uid, ids)
def action_in_production(self, cr, uid, ids):
""" Changes state to In Production and writes starting date.
@return: True
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for prod in self.browse(cr, uid, ids):
if prod.workcenter_lines:
workcenter_pool.signal_button_start_working(cr, uid, [prod.workcenter_lines[0].id])
return super(mrp_production,self).action_in_production(cr, uid, ids)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels work order if production order is canceled.
@return: Super method
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
obj = self.browse(cr, uid, ids,context=context)[0]
for workcenter_line in obj.workcenter_lines:
workcenter_pool.signal_button_cancel(cr, uid, [workcenter_line.id])
return super(mrp_production,self).action_cancel(cr,uid,ids,context=context)
def _compute_planned_workcenter(self, cr, uid, ids, context=None, mini=False):
""" Computes planned and finished dates for work order.
@return: Calculated date
"""
dt_end = datetime.now()
if context is None:
context = {}
for po in self.browse(cr, uid, ids, context=context):
dt_end = datetime.strptime(po.date_planned, '%Y-%m-%d %H:%M:%S')
if not po.date_start:
self.write(cr, uid, [po.id], {
'date_start': po.date_planned
}, context=context, update=False)
old = None
for wci in range(len(po.workcenter_lines)):
wc = po.workcenter_lines[wci]
if (old is None) or (wc.sequence>old):
dt = dt_end
if context.get('__last_update'):
del context['__last_update']
if (wc.date_planned < dt.strftime('%Y-%m-%d %H:%M:%S')) or mini:
self.pool.get('mrp.production.workcenter.line').write(cr, uid, [wc.id], {
'date_planned': dt.strftime('%Y-%m-%d %H:%M:%S')
}, context=context, update=False)
i = self.pool.get('resource.calendar').interval_get(
cr,
uid,
#passing False makes resource_resource._schedule_hours run 1000 iterations doing nothing
wc.workcenter_id.calendar_id and wc.workcenter_id.calendar_id.id or None,
dt,
wc.hour or 0.0
)
if i:
dt_end = max(dt_end, i[-1][1])
else:
dt_end = datetime.strptime(wc.date_planned_end, '%Y-%m-%d %H:%M:%S')
old = wc.sequence or 0
super(mrp_production, self).write(cr, uid, [po.id], {
'date_finished': dt_end
})
return dt_end
def _move_pass(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves finding interval from resource calendar.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
todo = po.move_lines
dt = datetime.strptime(po.date_start,'%Y-%m-%d %H:%M:%S')
while todo:
l = todo.pop(0)
if l.state in ('done','cancel','draft'):
continue
todo += l.move_dest_id_lines
if l.production_id and (l.production_id.date_finished > dt):
if l.production_id.state not in ('done','cancel'):
for wc in l.production_id.workcenter_lines:
i = self.pool.get('resource.calendar').interval_min_get(
cr,
uid,
wc.workcenter_id.calendar_id.id or False,
dt, wc.hour or 0.0
)
dt = i[0][0]
if l.production_id.date_start > dt.strftime('%Y-%m-%d %H:%M:%S'):
self.write(cr, uid, [l.production_id.id], {'date_start':dt.strftime('%Y-%m-%d %H:%M:%S')}, mini=True)
return True
def _move_futur(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
for line in po.move_created_ids:
l = line
while l.move_dest_id:
l = l.move_dest_id
if l.state in ('done','cancel','draft'):
break
if l.production_id.state in ('done','cancel'):
break
if l.production_id and (l.production_id.date_start < po.date_finished):
self.write(cr, uid, [l.production_id.id], {'date_start': po.date_finished})
break
return True
def write(self, cr, uid, ids, vals, context=None, update=True, mini=True):
direction = {}
if vals.get('date_start', False):
for po in self.browse(cr, uid, ids, context=context):
direction[po.id] = cmp(po.date_start, vals.get('date_start', False))
result = super(mrp_production, self).write(cr, uid, ids, vals, context=context)
if (vals.get('workcenter_lines', False) or vals.get('date_start', False)) and update:
self._compute_planned_workcenter(cr, uid, ids, context=context, mini=mini)
for d in direction:
if direction[d] == 1:
# the production order has been moved to the passed
self._move_pass(cr, uid, [d], context=context)
pass
elif direction[d] == -1:
self._move_futur(cr, uid, [d], context=context)
# the production order has been moved to the future
pass
return result
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product and planned date of work order.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
result = super(mrp_production, self).action_compute(cr, uid, ids, properties=properties, context=context)
self._compute_planned_workcenter(cr, uid, ids, context=context)
return result
class mrp_operations_operation_code(osv.osv):
_name="mrp_operations.operation.code"
_columns={
'name': fields.char('Operation Name',size=64, required=True),
'code': fields.char('Code', size=16, required=True),
'start_stop': fields.selection([('start','Start'),('pause','Pause'),('resume','Resume'),('cancel','Cancelled'),('done','Done')], 'Status', required=True),
}
class mrp_operations_operation(osv.osv):
_name="mrp_operations.operation"
def _order_date_search_production(self, cr, uid, ids, context=None):
""" Finds operations for a production order.
@return: List of ids
"""
operation_ids = self.pool.get('mrp_operations.operation').search(cr, uid, [('production_id','=',ids[0])], context=context)
return operation_ids
def _get_order_date(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates planned date for an operation.
@return: Dictionary of values
"""
res={}
operation_obj = self.browse(cr, uid, ids, context=context)
for operation in operation_obj:
res[operation.id] = operation.production_id.date_planned
return res
def calc_delay(self, cr, uid, vals):
""" Calculates delay of work order.
@return: Delay
"""
code_lst = []
time_lst = []
code_ids = self.pool.get('mrp_operations.operation.code').search(cr, uid, [('id','=',vals['code_id'])])
code = self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids)[0]
oper_ids = self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs = self.browse(cr,uid,oper_ids)
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
time_lst.append(oper.date_start)
code_lst.append(code.start_stop)
time_lst.append(vals['date_start'])
diff = 0
for i in range(0,len(code_lst)):
if code_lst[i] == 'pause' or code_lst[i] == 'done' or code_lst[i] == 'cancel':
if not i: continue
if code_lst[i-1] not in ('resume','start'):
continue
a = datetime.strptime(time_lst[i-1],'%Y-%m-%d %H:%M:%S')
b = datetime.strptime(time_lst[i],'%Y-%m-%d %H:%M:%S')
diff += (b-a).days * 24
diff += (b-a).seconds / float(60*60)
return diff
def check_operation(self, cr, uid, vals):
""" Finds which operation is called ie. start, pause, done, cancel.
@param vals: Dictionary of values.
@return: True or False
"""
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr,uid,code_ids)[0]
code_lst = []
oper_ids=self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs=self.browse(cr,uid,oper_ids)
if not oper_objs:
if code.start_stop!='start':
raise osv.except_osv(_('Sorry!'),_('Operation is not started yet!'))
return False
else:
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
if code.start_stop=='start':
if 'start' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation has already started! You can either Pause/Finish/Cancel the operation.'))
return False
if code.start_stop=='pause':
if code_lst[len(code_lst)-1]!='resume' and code_lst[len(code_lst)-1]!='start':
raise osv.except_osv(_('Error!'),_('In order to Pause the operation, it must be in the Start or Resume state!'))
return False
if code.start_stop=='resume':
if code_lst[len(code_lst)-1]!='pause':
raise osv.except_osv(_('Error!'),_('In order to Resume the operation, it must be in the Pause state!'))
return False
if code.start_stop=='done':
if code_lst[len(code_lst)-1]!='start' and code_lst[len(code_lst)-1]!='resume':
raise osv.except_osv(_('Sorry!'),_('In order to Finish the operation, it must be in the Start or Resume state!'))
return False
if 'cancel' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation is Already Cancelled!'))
return False
if code.start_stop=='cancel':
if not 'start' in code_lst :
raise osv.except_osv(_('Error!'),_('No operation to cancel.'))
return False
if 'done' in code_lst:
raise osv.except_osv(_('Error!'),_('Operation is already finished!'))
return False
return True
def write(self, cr, uid, ids, vals, context=None):
oper_objs = self.browse(cr, uid, ids, context=context)[0]
vals['production_id']=oper_objs.production_id.id
vals['workcenter_id']=oper_objs.workcenter_id.id
if 'code_id' in vals:
self.check_operation(cr, uid, vals)
if 'date_start' in vals:
vals['date_start']=vals['date_start']
vals['code_id']=oper_objs.code_id.id
delay=self.calc_delay(cr, uid, vals)
wc_op_id=self.pool.get('mrp.production.workcenter.line').search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
self.pool.get('mrp.production.workcenter.line').write(cr,uid,wc_op_id,{'delay':delay})
return super(mrp_operations_operation, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids, context=context)[0]
wc_op_id=workcenter_pool.search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
if code.start_stop in ('start','done','pause','cancel','resume'):
if not wc_op_id:
production_obj=self.pool.get('mrp.production').browse(cr, uid, vals['production_id'], context=context)
wc_op_id.append(workcenter_pool.create(cr,uid,{'production_id':vals['production_id'],'name':production_obj.product_id.name,'workcenter_id':vals['workcenter_id']}))
if code.start_stop=='start':
workcenter_pool.action_start_working(cr,uid,wc_op_id)
workcenter_pool.signal_button_start_working(cr, uid, [wc_op_id[0]])
if code.start_stop=='done':
workcenter_pool.action_done(cr,uid,wc_op_id)
workcenter_pool.signal_button_done(cr, uid, [wc_op_id[0]])
self.pool.get('mrp.production').write(cr,uid,vals['production_id'],{'date_finished':datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
if code.start_stop=='pause':
workcenter_pool.action_pause(cr,uid,wc_op_id)
workcenter_pool.signal_button_pause(cr, uid, [wc_op_id[0]])
if code.start_stop=='resume':
workcenter_pool.action_resume(cr,uid,wc_op_id)
workcenter_pool.signal_button_resume(cr, uid, [wc_op_id[0]])
if code.start_stop=='cancel':
workcenter_pool.action_cancel(cr,uid,wc_op_id)
workcenter_pool.signal_button_cancel(cr, uid, [wc_op_id[0]])
if not self.check_operation(cr, uid, vals):
return
delay=self.calc_delay(cr, uid, vals)
line_vals = {}
line_vals['delay'] = delay
if vals.get('date_start',False):
if code.start_stop == 'done':
line_vals['date_finished'] = vals['date_start']
elif code.start_stop == 'start':
line_vals['date_start'] = vals['date_start']
self.pool.get('mrp.production.workcenter.line').write(cr, uid, wc_op_id, line_vals, context=context)
return super(mrp_operations_operation, self).create(cr, uid, vals, context=context)
def initialize_workflow_instance(self, cr, uid, context=None):
mrp_production_workcenter_line = self.pool.get('mrp.production.workcenter.line')
line_ids = mrp_production_workcenter_line.search(cr, uid, [], context=context)
mrp_production_workcenter_line.create_workflow(cr, uid, line_ids)
return True
_columns={
'production_id':fields.many2one('mrp.production','Production',required=True),
'workcenter_id':fields.many2one('mrp.workcenter','Work Center',required=True),
'code_id':fields.many2one('mrp_operations.operation.code','Code',required=True),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'order_date': fields.function(_get_order_date,string='Order Date',type='date',store={'mrp.production':(_order_date_search_production,['date_planned'], 10)}),
}
_defaults={
'date_start': lambda *a:datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mkutlak/abrt
|
src/cli/tests/test_utils.py
|
3
|
3930
|
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
import logging
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import problem
from abrtcli.config import ONELINE_FMT
from abrtcli.utils import (format_problems,
get_problem_field,
get_human_identifier,
remember_cwd,
sort_problems,
upcase_first_letter)
import clitests
class UtilsTestCase(clitests.TestCase):
'''
Tests for utility functions
'''
def test_format_problems(self):
'''
Test default problem formatting
'''
pl = problem.list()
res = format_problems(pl)
for prob in pl:
self.assertIn(prob.short_id, res)
field, value = get_human_identifier(prob)
self.assertIn(value, res)
self.assertIn(str(prob.count), res)
self.assertIn('Bugzilla', res)
self.assertIn('https://bugzilla.redhat.com/show_bug.cgi?id=1223349',
res)
self.assertIn('ABRT Server', res)
furl = 'https://retrace.fedoraproject.org/faf/reports/bthash/' \
'3505a6db8a6bd51a3d690f1553b'
self.assertIn(furl, res)
self.assertIn('Not reportable', res)
self.assertIn('Not reportable reason', res)
def test_format_problems_oneline(self):
'''
Test oneline problem formatting
'''
pl = problem.list()
res = format_problems(pl, fmt=ONELINE_FMT)
self.assertIn('bc60a5c 15x pavucontrol', res)
self.assertIn('ffe635c 1x /home/user/bin/user_app', res)
def test_format_problems_custom(self):
'''
Test custom problem formatting
'''
pl = problem.list()
fmt = '''#table|id,{short_id}|user id,{uid_username}| '''
res = format_problems(pl, fmt=fmt)
self.assertIn('User id', res)
self.assertIn('1234', res)
self.assertTrue(len(res.splitlines()) > len(pl))
def test_format_problems_custom_oneline(self):
'''
Test custom problem formatting
'''
pl = problem.list()
fmt = '''{short_id} {uid_username}'''
res = format_problems(pl, fmt=fmt)
self.assertIn('1234', res)
self.assertTrue(len(res.splitlines()) == len(pl))
def test_format_problems_empty_input(self):
'''
Test that format_problems handles None as problem list
'''
self.assertEqual(format_problems(None), '')
def test_get_problem_field(self):
p = problem.list()[0]
self.assertTrue(get_problem_field(p, 'count'), p.count)
self.assertEqual(get_problem_field(p, 'notavail'), None)
def test_get_human_identifier(self):
p0 = problem.list()[0]
p3 = problem.list()[3]
p4 = problem.list()[4]
p0_t, p0_v = get_human_identifier(p0)
p3_t, p3_v = get_human_identifier(p3)
p4_t, p4_v = get_human_identifier(p4)
self.assertEqual(p0_t, 'component')
self.assertEqual(p0_v, p0.component)
self.assertEqual(p3_t, 'executable')
self.assertEqual(p3_v, p3.executable)
self.assertEqual(p4_t, 'type')
self.assertEqual(p4_v, p4.type)
def test_sort_problems(self):
'''
Test if problems are sotred by time
'''
pl = problem.list()
spl = sort_problems(pl)
self.assertTrue(spl[-1] == pl[2])
self.assertTrue(spl[0] == pl[3])
def test_upcase_first_letter(self):
self.assertEqual('LaLa', upcase_first_letter('laLa'))
def test_remember_cwd(self):
cwd = os.getcwd()
with remember_cwd():
os.chdir('/tmp')
self.assertEqual(os.getcwd(), cwd)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
|
gpl-2.0
|
codrut3/tensorflow
|
tensorflow/contrib/keras/api/keras/preprocessing/__init__.py
|
132
|
1094
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras.api.keras.preprocessing import sequence
from tensorflow.contrib.keras.api.keras.preprocessing import text
del absolute_import
del division
del print_function
|
apache-2.0
|
supertom/ansible-modules-core
|
cloud/openstack/os_network.py
|
36
|
8352
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
external:
description:
- Whether this network is externally accessible.
required: false
default: false
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
provider_physical_network:
description:
- The physical network where this network object is implemented.
required: false
default: None
version_added: "2.1"
provider_network_type:
description:
- The type of physical network that maps to this network resource.
choices: ['flat', 'vlan', 'vxlan', 'gre']
required: false
default: None
version_added: "2.1"
provider_segmentation_id:
description:
- An isolated segment on the physical network. The I(network_type)
attribute defines the segmentation model. For example, if the
I(network_type) value is vlan, this ID is a vlan identifier. If
the I(network_type) value is gre, this ID is a gre key.
required: false
default: None
version_added: "2.1"
project:
description:
- Project name or ID containing the network (name admin-only)
required: false
default: None
version_added: "2.1"
requirements: ["shade"]
'''
EXAMPLES = '''
# Create an externally accessible network named 'ext_network'.
- os_network:
cloud: mycloud
state: present
name: ext_network
external: true
'''
RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when I(state) is 'present'.
type: dictionary
contains:
id:
description: Network ID.
type: string
sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
description: Network name.
type: string
sample: "ext_network"
shared:
description: Indicates whether this network is shared across all tenants.
type: bool
sample: false
status:
description: Network status.
type: string
sample: "ACTIVE"
mtu:
description: The MTU of a network resource.
type: integer
sample: 0
admin_state_up:
description: The administrative state of the network.
type: bool
sample: true
port_security_enabled:
description: The port security status
type: bool
sample: true
router:external:
description: Indicates whether this network is externally accessible.
type: bool
sample: true
tenant_id:
description: The tenant ID.
type: string
sample: "06820f94b9f54b119636be2728d216fc"
subnets:
description: The associated subnets.
type: list
sample: []
"provider:physical_network":
description: The physical network where this network object is implemented.
type: string
sample: my_vlan_net
"provider:network_type":
description: The type of physical network that maps to this network resource.
type: string
sample: vlan
"provider:segmentation_id":
description: An isolated segment on the physical network.
type: string
sample: 101
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
external=dict(default=False, type='bool'),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False, default=None,
choices=['flat', 'vlan', 'vxlan', 'gre']),
provider_segmentation_id=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) < StrictVersion('1.6.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be >=1.6.0")
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
external = module.params['external']
provider_physical_network = module.params['provider_physical_network']
provider_network_type = module.params['provider_network_type']
provider_segmentation_id = module.params['provider_segmentation_id']
project = module.params.pop('project')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
net = cloud.get_network(name, filters=filters)
if state == 'present':
if not net:
provider = {}
if provider_physical_network:
provider['physical_network'] = provider_physical_network
if provider_network_type:
provider['network_type'] = provider_network_type
if provider_segmentation_id:
provider['segmentation_id'] = provider_segmentation_id
if provider and StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 required to use provider options")
net = cloud.create_network(name, shared, admin_state_up,
external, provider, project_id)
changed = True
else:
changed = False
module.exit_json(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
|
gpl-3.0
|
luke-segars/regis
|
face/management/commands/parseall.py
|
2
|
3665
|
import face.models.models as regis
import face.offline.QuestionParser as qp
import face.offline.ParserTools.ParserTools as ParserTools
import json, datetime
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = 'none'
help = 'Parses all of the questions that havent been parsed for all users.'
# The number of unassigned users that should be ready at any point.
SETS_AVAILABLE = 10
def handle(self, *args, **options):
parser = qp.QuestionParser()
# Get all live templates. Templates can be made live in the database.
all_templates = regis.QuestionTemplate.objects.filter(live=True)
all_tids = [t.id for t in all_templates]
users = regis.RegisUser.objects.all()
avail_qsets = regis.QuestionSet.objects.filter(reserved_by=None)
print '%d users are registered in the system.' % len(users)
print '%d question sets are available of the requested %d' % (len(avail_qsets), self.SETS_AVAILABLE)
print ' Processing %d question templates for each set.' % len(all_templates)
records_added = 0
# Add new question sets
for i in xrange(self.SETS_AVAILABLE - len(avail_qsets)):
qset = regis.QuestionSet(reserved_by=None)
qset.save()
all_qsets = regis.QuestionSet.objects.all()
# Process all question sets and add missing questions.
for qset in all_qsets:
questions = qset.questions.exclude(status='retired')
target_tids = list(set(all_tids) - set([q.template.id for q in questions]))
parser.qset = qset
# The default position of this question will be LAST until the
# shuffler does its work.
if len(questions) > 0:
next_order = max([rq.order for rq in questions]) + 1
else:
next_order = 0
for t in target_tids:
template = regis.QuestionTemplate.objects.get(id=t)
parser.template = template
try:
text, values = parser.parse(template.text)
except Exception as e:
print '[ERROR] Error parsing template #%d' % template.id
print e
continue
# Save the information as a processed question. The solver processor
# will pick it up once it's been inserted.
try:
q = regis.Question(template=template, user=qset.reserved_by, text=text, variables=json.dumps(values), status='pending', order=next_order)
q.save()
# If we get this exception then we're got some cruft that needs to be cleared away.
# TODO: Catch exception for when qset.reserved_by doesn't exist. Delete the question set and all corresponding questions in this case.
except regis.RegisUser.DoesNotExist:
pass
# Delete the questions owned by the user.
# Delete the answers.
# Delete the question set.
# Add this question to the question set.
qset.questions.add(q)
records_added += 1
# Increment the value of 'order' that will be stored on the next record.
next_order += 1
# Save the question set.
qset.save()
print '%d records added.' % records_added
|
gpl-2.0
|
CubicERP/odoo
|
addons/hr_gamification/models/gamification.py
|
388
|
4836
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class hr_gamification_badge_user(osv.Model):
"""User having received a badge"""
_name = 'gamification.badge.user'
_inherit = ['gamification.badge.user']
_columns = {
'employee_id': fields.many2one("hr.employee", string='Employee'),
}
def _check_employee_related_user(self, cr, uid, ids, context=None):
for badge_user in self.browse(cr, uid, ids, context=context):
if badge_user.user_id and badge_user.employee_id:
if badge_user.employee_id not in badge_user.user_id.employee_ids:
return False
return True
_constraints = [
(_check_employee_related_user, "The selected employee does not correspond to the selected user.", ['employee_id']),
]
class gamification_badge(osv.Model):
_name = 'gamification.badge'
_inherit = ['gamification.badge']
def get_granted_employees(self, cr, uid, badge_ids, context=None):
if context is None:
context = {}
employee_ids = []
badge_user_ids = self.pool.get('gamification.badge.user').search(cr, uid, [('badge_id', 'in', badge_ids), ('employee_id', '!=', False)], context=context)
for badge_user in self.pool.get('gamification.badge.user').browse(cr, uid, badge_user_ids, context):
employee_ids.append(badge_user.employee_id.id)
# remove duplicates
employee_ids = list(set(employee_ids))
return {
'type': 'ir.actions.act_window',
'name': 'Granted Employees',
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'res_model': 'hr.employee',
'domain': [('id', 'in', employee_ids)]
}
class hr_employee(osv.osv):
_name = "hr.employee"
_inherit = "hr.employee"
def _get_employee_goals(self, cr, uid, ids, field_name, arg, context=None):
"""Return the list of goals assigned to the employee"""
res = {}
for employee in self.browse(cr, uid, ids, context=context):
res[employee.id] = self.pool.get('gamification.goal').search(cr,uid,[('user_id', '=', employee.user_id.id), ('challenge_id.category', '=', 'hr')], context=context)
return res
def _get_employee_badges(self, cr, uid, ids, field_name, arg, context=None):
"""Return the list of badge_users assigned to the employee"""
res = {}
for employee in self.browse(cr, uid, ids, context=context):
res[employee.id] = self.pool.get('gamification.badge.user').search(cr, uid, [
'|',
('employee_id', '=', employee.id),
'&',
('employee_id', '=', False),
('user_id', '=', employee.user_id.id)
], context=context)
return res
def _has_badges(self, cr, uid, ids, field_name, arg, context=None):
"""Return the list of badge_users assigned to the employee"""
res = {}
for employee in self.browse(cr, uid, ids, context=context):
employee_badge_ids = self.pool.get('gamification.badge.user').search(cr, uid, [
'|',
('employee_id', '=', employee.id),
'&',
('employee_id', '=', False),
('user_id', '=', employee.user_id.id)
], context=context)
res[employee.id] = len(employee_badge_ids) > 0
return res
_columns = {
'goal_ids': fields.function(_get_employee_goals, type="one2many", obj='gamification.goal', string="Employee HR Goals"),
'badge_ids': fields.function(_get_employee_badges, type="one2many", obj='gamification.badge.user', string="Employee Badges"),
'has_badges': fields.function(_has_badges, type="boolean", string="Has Badges"),
}
|
agpl-3.0
|
iXioN/django-all-access
|
allaccess/tests/test_views.py
|
2
|
10353
|
"Redirect and callback view tests."
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import override_settings, RequestFactory
from .base import AllAccessTestCase, AccountAccess, get_user_model, skipIfCustomUser
from ..compat import urlparse, parse_qs, patch, Mock
from ..views import OAuthRedirect, OAuthCallback
@override_settings(ROOT_URLCONF='allaccess.tests.urls', LOGIN_URL='/login/', LOGIN_REDIRECT_URL='/')
class BaseViewTestCase(AllAccessTestCase):
"Common view test functionality."
url_name = None
def setUp(self):
self.consumer_key = self.get_random_string()
self.consumer_secret = self.get_random_string()
self.provider = self.create_provider(
consumer_key=self.consumer_key, consumer_secret=self.consumer_secret)
self.url = reverse(self.url_name, kwargs={'provider': self.provider.name})
class OAuthRedirectTestCase(BaseViewTestCase):
"Initial redirect for user to sign log in with OAuth 1.0 provider."
url_name = 'allaccess-login'
def test_oauth_1_redirect_url(self):
"Redirect url for OAuth 1.0 provider."
self.provider.request_token_url = self.get_random_url()
self.provider.save()
with patch('allaccess.clients.OAuthClient.get_request_token') as request_token:
request_token.return_value = 'oauth_token=token&oauth_token_secret=secret'
response = self.client.get(self.url)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
self.assertEqual('%s://%s%s' % (scheme, netloc, path), self.provider.authorization_url)
def test_oauth_1_redirect_parameters(self):
"Redirect parameters for OAuth 1.0 provider."
self.provider.request_token_url = self.get_random_url()
self.provider.save()
with patch('allaccess.clients.OAuthClient.get_request_token') as request_token:
request_token.return_value = 'oauth_token=token&oauth_token_secret=secret'
response = self.client.get(self.url)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
query = parse_qs(query)
self.assertEqual(query['oauth_token'][0], 'token')
callback = reverse('allaccess-callback', kwargs={'provider': self.provider.name})
self.assertEqual(query['oauth_callback'][0], 'http://testserver' + callback)
def test_oauth_2_redirect_url(self):
"Redirect url for OAuth 2.0 provider."
self.provider.request_token_url = ''
self.provider.save()
response = self.client.get(self.url)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
self.assertEqual('%s://%s%s' % (scheme, netloc, path), self.provider.authorization_url)
def test_oauth_2_redirect_parameters(self):
"Redirect parameters for OAuth 2.0 provider."
self.provider.request_token_url = ''
self.provider.save()
response = self.client.get(self.url)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
query = parse_qs(query)
callback = reverse('allaccess-callback', kwargs={'provider': self.provider.name})
self.assertEqual(query['redirect_uri'][0], 'http://testserver' + callback)
self.assertEqual(query['response_type'][0], 'code')
self.assertEqual(query['client_id'][0], self.provider.consumer_key)
# State should be stored in the session and passed to the provider
key = 'allaccess-{0}-request-state'.format(self.provider.name)
state = self.client.session[key]
self.assertEqual(query['state'][0], state)
def test_unknown_provider(self):
"Return a 404 if unknown provider name is given."
self.provider.delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_disabled_provider(self):
"Return a 404 if provider does not have key/secret set."
self.provider.consumer_key = None
self.provider.consumer_secret = None
self.provider.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_redirect_params(self):
"Set additional redirect parameters in as_view."
view = OAuthRedirect.as_view(params={'scope': 'email'})
self.provider.request_token_url = ''
self.provider.save()
request = RequestFactory().get(self.url)
request.session = {}
response = view(request, provider=self.provider.name)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
self.assertEqual('%s://%s%s' % (scheme, netloc, path), self.provider.authorization_url)
query = parse_qs(query)
self.assertEqual(query['scope'][0], 'email')
class OAuthCallbackTestCase(BaseViewTestCase):
"Callback after user has authenticated with OAuth provider."
url_name = 'allaccess-callback'
def setUp(self):
super(OAuthCallbackTestCase, self).setUp()
# Patch OAuth client
self.patched_get_client = patch('allaccess.views.get_client')
self.get_client = self.patched_get_client.start()
self.mock_client = Mock()
self.get_client.return_value = self.mock_client
def tearDown(self):
super(OAuthCallbackTestCase, self).tearDown()
self.patched_get_client.stop()
def test_unknown_provider(self):
"Return a 404 if unknown provider name is given."
self.provider.delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_disabled_provider(self):
"Return a 404 if provider does not have key/secret set."
self.provider.consumer_key = None
self.provider.consumer_secret = None
self.provider.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_failed_access_token(self):
"Handle bad response when fetching access token."
self.mock_client.get_access_token.return_value = None
response = self.client.get(self.url)
# Errors redirect to LOGIN_URL by default
self.assertRedirects(response, settings.LOGIN_URL)
def test_failed_user_profile(self):
"Handle bad response when fetching user info."
self.mock_client.get_access_token.return_value = 'token'
self.mock_client.get_profile_info.return_value = None
response = self.client.get(self.url)
# Errors redirect to LOGIN_URL by default
self.assertRedirects(response, settings.LOGIN_URL)
def test_failed_user_id(self):
"Handle bad response when parsing user id from info."
self.mock_client.get_access_token.return_value = 'token'
self.mock_client.get_profile_info.return_value = {}
response = self.client.get(self.url)
# Errors redirect to LOGIN_URL by default
self.assertRedirects(response, settings.LOGIN_URL)
def _test_create_new_user(self):
"Base test case for both swapped and non-swapped user."
User = get_user_model()
User.objects.all().delete()
self.mock_client.get_access_token.return_value = 'token'
self.mock_client.get_profile_info.return_value = {'id': 100}
self.client.get(self.url)
access = AccountAccess.objects.get(
provider=self.provider, identifier=100
)
self.assertEqual(access.access_token, 'token')
self.assertTrue(access.user, "User should be created.")
self.assertFalse(access.user.has_usable_password(), "User created without password.")
@skipIfCustomUser
def test_create_new_user(self):
"Create a new user and associate them with the provider."
self._test_create_new_user()
def _test_existing_user(self):
"Base test case for both swapped and non-swapped user."
User = get_user_model()
user = self.create_user()
access = self.create_access(user=user, provider=self.provider)
user_count = User.objects.all().count()
access_count = AccountAccess.objects.all().count()
self.mock_client.get_access_token.return_value = 'token'
self.mock_client.get_profile_info.return_value = {'id': access.identifier}
self.client.get(self.url)
self.assertEqual(User.objects.all().count(), user_count, "No users created.")
self.assertEqual(AccountAccess.objects.all().count(), access_count, "No access records created.")
# Refresh from DB
access = AccountAccess.objects.get(pk=access.pk)
self.assertEqual(access.access_token, 'token')
@skipIfCustomUser
def test_existing_user(self):
"Authenticate existing user and update their access token."
self._test_existing_user()
def _test_authentication_redirect(self):
"Base test case for both swapped and non-swapped user."
self.mock_client.get_access_token.return_value = 'token'
self.mock_client.get_profile_info.return_value = {'id': 100}
response = self.client.get(self.url)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
@skipIfCustomUser
def test_authentication_redirect(self):
"Post-authentication redirect to LOGIN_REDIRECT_URL."
self._test_authentication_redirect()
def test_customized_provider_id(self):
"Change how to find the provider id in as_view."
view = OAuthCallback(provider_id='account_id')
result = view.get_user_id(self.provider, {'account_id': '123'})
self.assertEqual(result, '123')
result = view.get_user_id(self.provider, {'id': '123'})
self.assertIsNone(result)
def test_nested_provider_id(self):
"Allow easy access to nested provider ids."
view = OAuthCallback(provider_id='user.account_id')
result = view.get_user_id(self.provider, {'user': {'account_id': '123'}})
self.assertEqual(result, '123')
result = view.get_user_id(self.provider, {'id': '123'})
self.assertIsNone(result)
|
bsd-2-clause
|
apollo13/ansible
|
lib/ansible/utils/color.py
|
34
|
4647
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from ansible import constants as C
ANSIBLE_COLOR = True
if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR = False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR = False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR = False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR = False
if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR = True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': u'0;30', 'bright gray': u'0;37',
'blue': u'0;34', 'white': u'1;37',
'green': u'0;32', 'bright blue': u'1;34',
'cyan': u'0;36', 'bright green': u'1;32',
'red': u'0;31', 'bright cyan': u'1;36',
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0',
}
def parsecolor(color):
"""SGR parameter string for the specified color name."""
matches = re.match(r"color(?P<color>[0-9]+)"
r"|(?P<rgb>rgb(?P<red>[0-5])(?P<green>[0-5])(?P<blue>[0-5]))"
r"|gray(?P<gray>[0-9]+)", color)
if not matches:
return codeCodes[color]
if matches.group('color'):
return u'38;5;%d' % int(matches.group('color'))
if matches.group('rgb'):
return u'38;5;%d' % (16 + 36 * int(matches.group('red')) +
6 * int(matches.group('green')) +
int(matches.group('blue')))
if matches.group('gray'):
return u'38;5;%d' % (232 + int(matches.group('gray')))
def stringc(text, color, wrap_nonvisible_chars=False):
"""String in color."""
if ANSIBLE_COLOR:
color_code = parsecolor(color)
fmt = u"\033[%sm%s\033[0m"
if wrap_nonvisible_chars:
# This option is provided for use in cases when the
# formatting of a command line prompt is needed, such as
# `ansible-console`. As said in `readline` sources:
# readline/display.c:321
# /* Current implementation:
# \001 (^A) start non-visible characters
# \002 (^B) end non-visible characters
# all characters except \001 and \002 (following a \001) are copied to
# the returned string; all characters except those between \001 and
# \002 are assumed to be `visible'. */
fmt = u"\001\033[%sm\002%s\001\033[0m\002"
return u"\n".join([fmt % (color_code, t) for t in text.split(u'\n')])
else:
return text
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
s = u"%s=%-4s" % (lead, str(num))
if num != 0 and ANSIBLE_COLOR and color is not None:
s = stringc(s, color)
return s
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return u"%-37s" % stringc(host, C.COLOR_ERROR)
elif stats['changed'] != 0:
return u"%-37s" % stringc(host, C.COLOR_CHANGED)
else:
return u"%-37s" % stringc(host, C.COLOR_OK)
return u"%-26s" % host
|
gpl-3.0
|
ARTFL-Project/PhiloLogic4
|
python/philologic/TagCensus.py
|
3
|
4469
|
from philologic import shlaxtree as st
import sys
class TagCensus:
def __init__(self, debug=False, text_target=None):
self.tags = {}
self.debug = debug
self.text_target = text_target
def parse(self, text):
self.tags = {}
parser = st.ShlaxIngestor(target=self)
parser.feed(text)
self.close()
def __getitem__(self, key):
return self.tags[key]
def __setitem__(self, key, value):
self.tags[key] = value
def feed(self, *event):
(kind, content, offset, name, attributes) = event
if kind == "start":
if content[-2:] == "/>":
kind = "empty"
elif content[-1] == ">":
pass # a normal start tag
else:
kind = "malformed"
elif kind == "end":
if content:
if content[-1] != ">":
kind = "malformed"
else:
pass # a normal end tag
else:
return # a hypothetical end tag to balance an empty tag
elif kind == "text" and self.text_target:
self.text_target.feed(content)
return
else:
return
if self.debug:
print(kind, name, content, offset, file=sys.stderr)
if name not in self.tags:
self.tags[name] = {"start": 0, "end": 0, "empty": 0, "malformed": 0}
self.tags[name][kind] += 1
def __iadd__(self, other):
for tag in list(other.tags.keys()):
if tag not in self.tags:
self[tag] = {"start": 0, "end": 0, "empty": 0, "malformed": 0}
self[tag]["start"] += other.tags[tag]["start"]
self[tag]["end"] += other.tags[tag]["end"]
self[tag]["empty"] += other.tags[tag]["empty"]
self[tag]["malformed"] += other.tags[tag]["malformed"]
return self
def __sub__(self, other):
res = {}
for tag in list(self.tags.keys()):
if tag not in res:
res[tag] = {"start": 0, "end": 0, "empty": 0, "malformed": 0}
res[tag]["start"] += self.tags[tag]["start"]
res[tag]["end"] += self.tags[tag]["end"]
res[tag]["empty"] += self.tags[tag]["empty"]
res[tag]["malformed"] += self.tags[tag]["malformed"]
for tag in list(other.tags.keys()):
if tag not in res:
res[tag] = {"start": 0, "end": 0, "empty": 0, "malformed": 0}
res[tag]["start"] -= other.tags[tag]["start"]
res[tag]["end"] -= other.tags[tag]["end"]
res[tag]["empty"] -= other.tags[tag]["empty"]
res[tag]["malformed"] -= other.tags[tag]["malformed"]
for tag in list(res.keys()):
if res[tag] == {"start": 0, "end": 0, "empty": 0, "malformed": 0}:
del res[tag]
return res
def __str__(self):
longest = max(len(k) for k in list(self.tags.keys())) + 4 # 3 possible flags + space
res = " tag%s\tstart\tend\tempty\tmalformed\n" % (" " * (longest - len("tag")))
for tag in sorted(self.tags.keys()):
status = ""
if self[tag]["start"] != self[tag]["end"]:
status += "*"
else:
status += " "
if (self[tag]["end"] != 0) and (self[tag]["empty"] != 0):
status += "E"
else:
status += " "
if self[tag]["malformed"]:
status += "X"
else:
status += " "
padding = " " * (longest - (len(tag) + len(status)))
res += "%s %s%s\t%d\t%d\t%d\t%d\n" % (
status,
tag,
padding,
self[tag]["start"],
self[tag]["end"],
self[tag]["empty"],
self[tag]["malformed"],
)
return res
def close(self):
if self.text_target:
self.text_target.close()
pass
if __name__ == "__main__":
file_count = 0
total = None
for fn in sys.argv[1:]:
file_count += 1
census = TagCensus()
census.parse(open(fn).read())
print(fn)
print(census)
if total:
total += census
else:
total = census
if file_count > 1:
print("TOTAL: %d FILES" % (file_count))
print(total)
|
gpl-3.0
|
kemalakyol48/python-for-android
|
python-modules/twisted/twisted/python/text.py
|
49
|
6637
|
# -*- test-case-name: twisted.test.test_text -*-
#
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Miscellany of text-munging functions.
"""
import string, types
from twisted.python import deprecate, versions
def stringyString(object, indentation=''):
"""
Expansive string formatting for sequence types.
list.__str__ and dict.__str__ use repr() to display their
elements. This function also turns these sequence types
into strings, but uses str() on their elements instead.
Sequence elements are also displayed on seperate lines,
and nested sequences have nested indentation.
"""
braces = ''
sl = []
if type(object) is types.DictType:
braces = '{}'
for key, value in object.items():
value = stringyString(value, indentation + ' ')
if isMultiline(value):
if endsInNewline(value):
value = value[:-len('\n')]
sl.append("%s %s:\n%s" % (indentation, key, value))
else:
# Oops. Will have to move that indentation.
sl.append("%s %s: %s" % (indentation, key,
value[len(indentation) + 3:]))
elif type(object) in (types.TupleType, types.ListType):
if type(object) is types.TupleType:
braces = '()'
else:
braces = '[]'
for element in object:
element = stringyString(element, indentation + ' ')
sl.append(string.rstrip(element) + ',')
else:
sl[:] = map(lambda s, i=indentation: i+s,
string.split(str(object),'\n'))
if not sl:
sl.append(indentation)
if braces:
sl[0] = indentation + braces[0] + sl[0][len(indentation) + 1:]
sl[-1] = sl[-1] + braces[-1]
s = string.join(sl, "\n")
if isMultiline(s) and not endsInNewline(s):
s = s + '\n'
return s
def isMultiline(s):
"""Returns True if this string has a newline in it."""
return (string.find(s, '\n') != -1)
def endsInNewline(s):
"""Returns True if this string ends in a newline."""
return (s[-len('\n'):] == '\n')
deprecate.deprecatedModuleAttribute(
versions.Version("Twisted", 10, 2, 0),
"Please use inspect.getdoc instead.",
__name__, "docstringLStrip")
def docstringLStrip(docstring):
"""
Gets rid of unsightly lefthand docstring whitespace residue.
You'd think someone would have done this already, but apparently
not in 1.5.2.
BUT since we're all using Python 2.1 now, use L{inspect.getdoc}
instead. I{This function should go away soon.}
"""
if not docstring:
return docstring
docstring = string.replace(docstring, '\t', ' ' * 8)
lines = string.split(docstring,'\n')
leading = 0
for l in xrange(1,len(lines)):
line = lines[l]
if string.strip(line):
while 1:
if line[leading] == ' ':
leading = leading + 1
else:
break
if leading:
break
outlines = lines[0:1]
for l in xrange(1,len(lines)):
outlines.append(lines[l][leading:])
return string.join(outlines, '\n')
def greedyWrap(inString, width=80):
"""Given a string and a column width, return a list of lines.
Caveat: I'm use a stupid greedy word-wrapping
algorythm. I won't put two spaces at the end
of a sentence. I don't do full justification.
And no, I've never even *heard* of hypenation.
"""
outLines = []
#eww, evil hacks to allow paragraphs delimited by two \ns :(
if inString.find('\n\n') >= 0:
paragraphs = string.split(inString, '\n\n')
for para in paragraphs:
outLines.extend(greedyWrap(para) + [''])
return outLines
inWords = string.split(inString)
column = 0
ptr_line = 0
while inWords:
column = column + len(inWords[ptr_line])
ptr_line = ptr_line + 1
if (column > width):
if ptr_line == 1:
# This single word is too long, it will be the whole line.
pass
else:
# We've gone too far, stop the line one word back.
ptr_line = ptr_line - 1
(l, inWords) = (inWords[0:ptr_line], inWords[ptr_line:])
outLines.append(string.join(l,' '))
ptr_line = 0
column = 0
elif not (len(inWords) > ptr_line):
# Clean up the last bit.
outLines.append(string.join(inWords, ' '))
del inWords[:]
else:
# Space
column = column + 1
# next word
return outLines
wordWrap = greedyWrap
def removeLeadingBlanks(lines):
ret = []
for line in lines:
if ret or line.strip():
ret.append(line)
return ret
def removeLeadingTrailingBlanks(s):
lines = removeLeadingBlanks(s.split('\n'))
lines.reverse()
lines = removeLeadingBlanks(lines)
lines.reverse()
return '\n'.join(lines)+'\n'
def splitQuoted(s):
"""Like string.split, but don't break substrings inside quotes.
>>> splitQuoted('the \"hairy monkey\" likes pie')
['the', 'hairy monkey', 'likes', 'pie']
Another one of those \"someone must have a better solution for
this\" things. This implementation is a VERY DUMB hack done too
quickly.
"""
out = []
quot = None
phrase = None
for word in s.split():
if phrase is None:
if word and (word[0] in ("\"", "'")):
quot = word[0]
word = word[1:]
phrase = []
if phrase is None:
out.append(word)
else:
if word and (word[-1] == quot):
word = word[:-1]
phrase.append(word)
out.append(" ".join(phrase))
phrase = None
else:
phrase.append(word)
return out
def strFile(p, f, caseSensitive=True):
"""Find whether string p occurs in a read()able object f
@rtype: C{bool}
"""
buf = ""
buf_len = max(len(p), 2**2**2**2)
if not caseSensitive:
p = p.lower()
while 1:
r = f.read(buf_len-len(p))
if not caseSensitive:
r = r.lower()
bytes_read = len(r)
if bytes_read == 0:
return False
l = len(buf)+bytes_read-buf_len
if l <= 0:
buf = buf + r
else:
buf = buf[l:] + r
if buf.find(p) != -1:
return True
|
apache-2.0
|
akesandgren/easybuild-framework
|
test/framework/config.py
|
1
|
32361
|
# #
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for EasyBuild configuration.
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import os
import re
import shutil
import sys
import tempfile
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, init_config
from unittest import TextTestRunner
import easybuild.tools.options as eboptions
from easybuild.tools import run
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option, build_path, get_build_log_path, get_log_filename, get_repositorypath
from easybuild.tools.config import install_path, log_file_format, log_path, source_paths
from easybuild.tools.config import BuildOptions, ConfigurationVariables
from easybuild.tools.config import DEFAULT_PATH_SUBDIRS, init_build_options
from easybuild.tools.filetools import copy_dir, mkdir, write_file
from easybuild.tools.options import CONFIG_ENV_VAR_PREFIX
from easybuild.tools.py2vs3 import reload
class EasyBuildConfigTest(EnhancedTestCase):
"""Test cases for EasyBuild configuration."""
tmpdir = None
def setUp(self):
"""Prepare for running a config test."""
reload(eboptions)
super(EasyBuildConfigTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
def purge_environment(self):
"""Remove any leftover easybuild variables"""
for var in os.environ.keys():
# retain $EASYBUILD_IGNORECONFIGFILES, to make sure the test is isolated from system-wide config files!
if var.startswith('EASYBUILD_') and var != 'EASYBUILD_IGNORECONFIGFILES':
del os.environ[var]
def tearDown(self):
"""Clean up after a config test."""
super(EasyBuildConfigTest, self).tearDown()
try:
shutil.rmtree(self.tmpdir)
except OSError:
pass
tempfile.tempdir = None
def configure(self, args=None):
"""(re)Configure and return configfile"""
options = init_config(args=args)
return options.config
def test_default_config(self):
"""Test default configuration."""
self.purge_environment()
eb_go = eboptions.parse_options(args=[])
config_options = eb_go.get_options_by_section('config')
# check default subdirs
self.assertEqual(DEFAULT_PATH_SUBDIRS['buildpath'], 'build')
self.assertEqual(DEFAULT_PATH_SUBDIRS['installpath'], '')
self.assertEqual(DEFAULT_PATH_SUBDIRS['subdir_modules'], 'modules')
self.assertEqual(DEFAULT_PATH_SUBDIRS['repositorypath'], 'ebfiles_repo')
self.assertEqual(DEFAULT_PATH_SUBDIRS['sourcepath'], 'sources')
self.assertEqual(DEFAULT_PATH_SUBDIRS['subdir_software'], 'software')
# check whether defaults are honored, use hardcoded paths/subdirs
eb_homedir = os.path.join(os.path.expanduser('~'), '.local', 'easybuild')
self.assertEqual(config_options['buildpath'], os.path.join(eb_homedir, 'build'))
self.assertEqual(config_options['sourcepath'], os.path.join(eb_homedir, 'sources'))
self.assertEqual(config_options['installpath'], eb_homedir)
self.assertEqual(config_options['subdir_software'], 'software')
self.assertEqual(config_options['subdir_modules'], 'modules')
self.assertEqual(config_options['repository'], 'FileRepository')
self.assertEqual(config_options['repositorypath'], [os.path.join(eb_homedir, 'ebfiles_repo')])
self.assertEqual(config_options['logfile_format'][0], 'easybuild')
self.assertEqual(config_options['logfile_format'][1], "easybuild-%(name)s-%(version)s-%(date)s.%(time)s.log")
self.assertEqual(config_options['tmpdir'], None)
self.assertEqual(config_options['tmp_logdir'], None)
def test_generaloption_config(self):
"""Test new-style configuration (based on generaloption)."""
self.purge_environment()
# check whether configuration via environment variables works as expected
prefix = os.path.join(self.tmpdir, 'testprefix')
buildpath_env_var = os.path.join(self.tmpdir, 'envvar', 'build', 'path')
os.environ['EASYBUILD_PREFIX'] = prefix
os.environ['EASYBUILD_BUILDPATH'] = buildpath_env_var
options = init_config(args=[])
self.assertEqual(build_path(), buildpath_env_var)
self.assertEqual(install_path(), os.path.join(prefix, 'software'))
self.assertEqual(get_repositorypath(), [os.path.join(prefix, 'ebfiles_repo')])
del os.environ['EASYBUILD_PREFIX']
del os.environ['EASYBUILD_BUILDPATH']
# check whether configuration via command line arguments works
prefix = os.path.join(self.tmpdir, 'test1')
install = os.path.join(self.tmpdir, 'test2', 'install')
repopath = os.path.join(self.tmpdir, 'test2', 'repo')
config_file = os.path.join(self.tmpdir, 'nooldconfig.py')
write_file(config_file, '')
args = [
'--configfiles', config_file, # force empty config file
'--prefix', prefix,
'--installpath', install,
'--repositorypath', repopath,
'--subdir-software', 'APPS',
]
options = init_config(args=args)
self.assertEqual(build_path(), os.path.join(prefix, 'build'))
self.assertEqual(install_path(), os.path.join(install, 'APPS'))
self.assertEqual(install_path(typ='mod'), os.path.join(install, 'modules'))
self.assertEqual(options.installpath, install)
self.assertTrue(config_file in options.configfiles)
# check mixed command line/env var configuration
prefix = os.path.join(self.tmpdir, 'test3')
install = os.path.join(self.tmpdir, 'test4', 'install')
subdir_software = 'eb-soft'
args = [
'--configfiles', config_file, # force empty config file
'--installpath', install,
]
os.environ['EASYBUILD_PREFIX'] = prefix
os.environ['EASYBUILD_SUBDIR_SOFTWARE'] = subdir_software
installpath_modules = tempfile.mkdtemp(prefix='installpath-modules')
os.environ['EASYBUILD_INSTALLPATH_MODULES'] = installpath_modules
options = init_config(args=args)
self.assertEqual(build_path(), os.path.join(prefix, 'build'))
self.assertEqual(install_path(), os.path.join(install, subdir_software))
self.assertEqual(install_path('mod'), installpath_modules)
# subdir options *must* be relative (to --installpath)
installpath_software = tempfile.mkdtemp(prefix='installpath-software')
os.environ['EASYBUILD_SUBDIR_SOFTWARE'] = installpath_software
error_regex = r"Found problems validating the options.*'subdir_software' must specify a \*relative\* path"
self.assertErrorRegex(EasyBuildError, error_regex, init_config)
del os.environ['EASYBUILD_PREFIX']
del os.environ['EASYBUILD_SUBDIR_SOFTWARE']
def test_error_env_var_typo(self):
"""Test error reporting on use of known $EASYBUILD-prefixed env vars."""
# all is well
init_config()
os.environ['EASYBUILD_FOO'] = 'foo'
os.environ['EASYBUILD_THERESNOSUCHCONFIGURATIONOPTION'] = 'whatever'
error = r"Found 2 environment variable\(s\) that are prefixed with %s " % CONFIG_ENV_VAR_PREFIX
error += r"but do not match valid option\(s\): "
error += r','.join(['EASYBUILD_FOO', 'EASYBUILD_THERESNOSUCHCONFIGURATIONOPTION'])
self.assertErrorRegex(EasyBuildError, error, init_config)
del os.environ['EASYBUILD_THERESNOSUCHCONFIGURATIONOPTION']
del os.environ['EASYBUILD_FOO']
def test_install_path(self):
"""Test install_path function."""
# defaults
self.assertEqual(install_path(), os.path.join(self.test_installpath, 'software'))
self.assertEqual(install_path('software'), os.path.join(self.test_installpath, 'software'))
self.assertEqual(install_path(typ='mod'), os.path.join(self.test_installpath, 'modules'))
self.assertEqual(install_path('modules'), os.path.join(self.test_installpath, 'modules'))
self.assertErrorRegex(EasyBuildError, "Unknown type specified", install_path, typ='foo')
args = [
'--subdir-software', 'SOFT',
'--installpath', '/foo',
]
os.environ['EASYBUILD_SUBDIR_MODULES'] = 'MOD'
init_config(args=args)
self.assertEqual(install_path(), os.path.join('/foo', 'SOFT'))
self.assertEqual(install_path(typ='mod'), os.path.join('/foo', 'MOD'))
del os.environ['EASYBUILD_SUBDIR_MODULES']
args = [
'--installpath', '/prefix',
'--installpath-modules', '/foo',
]
os.environ['EASYBUILD_INSTALLPATH_SOFTWARE'] = '/bar/baz'
init_config(args=args)
self.assertEqual(install_path(), os.path.join('/bar', 'baz'))
self.assertEqual(install_path(typ='mod'), '/foo')
del os.environ['EASYBUILD_INSTALLPATH_SOFTWARE']
init_config(args=args)
self.assertEqual(install_path(), os.path.join('/prefix', 'software'))
self.assertEqual(install_path(typ='mod'), '/foo')
def test_generaloption_config_file(self):
"""Test use of new-style configuration file."""
self.purge_environment()
config_file = os.path.join(self.tmpdir, 'testconfig.cfg')
testpath1 = os.path.join(self.tmpdir, 'test1')
testpath2 = os.path.join(self.tmpdir, 'testtwo')
# test with config file passed via command line
cfgtxt = '\n'.join([
'[config]',
'installpath = %s' % testpath2,
])
write_file(config_file, cfgtxt)
installpath_software = tempfile.mkdtemp(prefix='installpath-software')
args = [
'--configfiles', config_file,
'--debug',
'--buildpath', testpath1,
'--installpath-software', installpath_software,
]
options = init_config(args=args)
self.assertEqual(build_path(), testpath1) # via command line
self.assertEqual(source_paths(), [os.path.join(os.getenv('HOME'), '.local', 'easybuild', 'sources')]) # default
self.assertEqual(install_path(), installpath_software) # via cmdline arg
self.assertEqual(install_path('mod'), os.path.join(testpath2, 'modules')) # via config file
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
copy_dir(test_ecs_dir, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir) # prepend to give it preference over possible other installed easyconfigs pkgs
# test with config file passed via environment variable
# also test for existence of HOME and USER by adding paths to robot-paths
installpath_modules = tempfile.mkdtemp(prefix='installpath-modules')
cfgtxt = '\n'.join([
'[config]',
'buildpath = %s' % testpath1,
'sourcepath = %(DEFAULT_REPOSITORYPATH)s',
'repositorypath = %(DEFAULT_REPOSITORYPATH)s,somesubdir',
'robot-paths=/tmp/foo:%(sourcepath)s:%(HOME)s:/tmp/%(USER)s:%(DEFAULT_ROBOT_PATHS)s',
'installpath-modules=%s' % installpath_modules,
])
write_file(config_file, cfgtxt)
os.environ['EASYBUILD_CONFIGFILES'] = config_file
args = [
'--debug',
'--sourcepath', testpath2,
]
options = init_config(args=args)
topdir = os.path.join(os.getenv('HOME'), '.local', 'easybuild')
self.assertEqual(install_path(), os.path.join(topdir, 'software')) # default
self.assertEqual(install_path('mod'), installpath_modules), # via config file
self.assertEqual(source_paths(), [testpath2]) # via command line
self.assertEqual(build_path(), testpath1) # via config file
self.assertEqual(get_repositorypath(), [os.path.join(topdir, 'ebfiles_repo'), 'somesubdir']) # via config file
# hardcoded first entry
self.assertEqual(options.robot_paths[0], '/tmp/foo')
# resolved value for %(sourcepath)s template
self.assertEqual(options.robot_paths[1], os.path.join(os.getenv('HOME'), '.local', 'easybuild', 'ebfiles_repo'))
# resolved value for HOME constant
self.assertEqual(options.robot_paths[2], os.getenv('HOME'))
# resolved value that uses USER constant
self.assertEqual(options.robot_paths[3], os.path.join('/tmp', os.getenv('USER')))
# first path in DEFAULT_ROBOT_PATHS
self.assertEqual(options.robot_paths[4], os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
testpath3 = os.path.join(self.tmpdir, 'testTHREE')
os.environ['EASYBUILD_SOURCEPATH'] = testpath2
args = [
'--debug',
'--installpath', testpath3,
]
options = init_config(args=args)
self.assertEqual(source_paths(), [testpath2]) # via environment variable $EASYBUILD_SOURCEPATHS
self.assertEqual(install_path(), os.path.join(testpath3, 'software')) # via command line
self.assertEqual(install_path('mod'), installpath_modules), # via config file
self.assertEqual(build_path(), testpath1) # via config file
del os.environ['EASYBUILD_CONFIGFILES']
sys.path[:] = orig_sys_path
def test_configuration_variables(self):
"""Test usage of ConfigurationVariables."""
# delete instance of ConfigurationVariables
ConfigurationVariables.__class__._instances.clear()
# make sure ConfigurationVariables is a singleton class (only one available instance)
cv1 = ConfigurationVariables()
cv2 = ConfigurationVariables()
cv3 = ConfigurationVariables({'foo': 'bar'}) # note: argument is ignored, an instance is already available
self.assertTrue(cv1 is cv2)
self.assertTrue(cv1 is cv3)
def test_build_options(self):
"""Test usage of BuildOptions."""
# delete instance of BuildOptions
BuildOptions.__class__._instances.clear()
# make sure BuildOptions is a singleton class
bo1 = BuildOptions()
bo2 = BuildOptions()
bo3 = BuildOptions({'foo': 'bar'}) # note: argument is ignored, an instance is already available
self.assertTrue(bo1 is bo2)
self.assertTrue(bo1 is bo3)
# test basic functionality
BuildOptions.__class__._instances.clear()
bo = BuildOptions({
'debug': False,
'force': True
})
self.assertTrue(not bo['debug'])
self.assertTrue(bo['force'])
# updating is impossible (methods are not even available)
self.assertErrorRegex(Exception, '.*(item assignment|no attribute).*', lambda x: bo.update(x), {'debug': True})
self.assertErrorRegex(AttributeError, '.*no attribute.*', lambda x: bo.__setitem__(*x), ('debug', True))
# only valid keys can be set
BuildOptions.__class__._instances.clear()
msg = r"Encountered unknown keys .* \(known keys: .*"
self.assertErrorRegex(KeyError, msg, BuildOptions, {'thisisclearlynotavalidbuildoption': 'FAIL'})
# test init_build_options and build_option functions
self.assertErrorRegex(KeyError, msg, init_build_options, {'thisisclearlynotavalidbuildoption': 'FAIL'})
bo = init_build_options({
'robot_path': '/some/robot/path',
'stop': 'configure',
})
# specific build options should be set
self.assertEqual(bo['robot_path'], '/some/robot/path')
self.assertEqual(bo['stop'], 'configure')
# all possible build options should be set (defaults are used where needed)
self.assertEqual(sorted(bo.keys()), sorted(BuildOptions.KNOWN_KEYS))
# there should be only one BuildOptions instance
bo2 = BuildOptions()
self.assertTrue(bo is bo2)
def test_XDG_CONFIG_env_vars(self):
"""Test effect of XDG_CONFIG* environment variables on default configuration."""
self.purge_environment()
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
xdg_config_dirs = os.environ.get('XDG_CONFIG_DIRS')
cfg_template = '\n'.join([
'[config]',
'prefix=%s',
])
homedir = os.path.join(self.test_prefix, 'homedir', '.config')
mkdir(os.path.join(homedir, 'easybuild'), parents=True)
write_file(os.path.join(homedir, 'easybuild', 'config.cfg'), cfg_template % '/home')
dir1 = os.path.join(self.test_prefix, 'dir1')
mkdir(os.path.join(dir1, 'easybuild.d'), parents=True)
write_file(os.path.join(dir1, 'easybuild.d', 'foo.cfg'), cfg_template % '/foo')
write_file(os.path.join(dir1, 'easybuild.d', 'bar.cfg'), cfg_template % '/bar')
dir2 = os.path.join(self.test_prefix, 'dir2') # empty on purpose
mkdir(os.path.join(dir2, 'easybuild.d'), parents=True)
dir3 = os.path.join(self.test_prefix, 'dir3')
mkdir(os.path.join(dir3, 'easybuild.d'), parents=True)
write_file(os.path.join(dir3, 'easybuild.d', 'foobarbaz.cfg'), cfg_template % '/foobarbaz')
# set $XDG_CONFIG_DIRS to non-existing dir to isolate ourselves from possible system-wide config files
os.environ['XDG_CONFIG_DIRS'] = '/there/should/be/no/such/directory/we/hope'
# only $XDG_CONFIG_HOME set (to existing path)
os.environ['XDG_CONFIG_HOME'] = homedir
cfg_files = [os.path.join(homedir, 'easybuild', 'config.cfg')]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.configfiles, cfg_files)
self.assertEqual(eb_go.options.prefix, '/home')
# $XDG_CONFIG_HOME set, one directory listed in $XDG_CONFIG_DIRS
os.environ['XDG_CONFIG_DIRS'] = dir1
cfg_files = [
os.path.join(dir1, 'easybuild.d', 'bar.cfg'),
os.path.join(dir1, 'easybuild.d', 'foo.cfg'),
os.path.join(homedir, 'easybuild', 'config.cfg'), # $XDG_CONFIG_HOME goes last
]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.configfiles, cfg_files)
self.assertEqual(eb_go.options.prefix, '/home') # last cfgfile wins
# $XDG_CONFIG_HOME not set, multiple directories listed in $XDG_CONFIG_DIRS
del os.environ['XDG_CONFIG_HOME'] # unset, so should become default
os.environ['XDG_CONFIG_DIRS'] = os.pathsep.join([dir1, dir2, dir3])
cfg_files = [
os.path.join(dir1, 'easybuild.d', 'bar.cfg'),
os.path.join(dir1, 'easybuild.d', 'foo.cfg'),
os.path.join(dir3, 'easybuild.d', 'foobarbaz.cfg'),
]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
# note: there may be a config file in $HOME too, so don't use a strict comparison
self.assertEqual(cfg_files, eb_go.options.configfiles[:3])
# $XDG_CONFIG_HOME set to non-existing directory, multiple directories listed in $XDG_CONFIG_DIRS
os.environ['XDG_CONFIG_HOME'] = os.path.join(self.test_prefix, 'nosuchdir')
cfg_files = [
os.path.join(dir1, 'easybuild.d', 'bar.cfg'),
os.path.join(dir1, 'easybuild.d', 'foo.cfg'),
os.path.join(dir3, 'easybuild.d', 'foobarbaz.cfg'),
]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.configfiles, cfg_files)
self.assertEqual(eb_go.options.prefix, '/foobarbaz') # last cfgfile wins
# restore $XDG_CONFIG env vars to original state
if xdg_config_home is None:
del os.environ['XDG_CONFIG_HOME']
else:
os.environ['XDG_CONFIG_HOME'] = xdg_config_home
if xdg_config_dirs is None:
del os.environ['XDG_CONFIG_DIRS']
else:
os.environ['XDG_CONFIG_DIRS'] = xdg_config_dirs
reload(eboptions)
def test_flex_robot_paths(self):
"""Test prepend/appending to default robot search path via --robot-paths."""
# unset $EASYBUILD_ROBOT_PATHS that was defined in setUp
del os.environ['EASYBUILD_ROBOT_PATHS']
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
tmp_ecs_dir = os.path.join(tmpdir, 'easybuild', 'easyconfigs')
copy_dir(test_ecs_path, tmp_ecs_dir)
# prepend path to test easyconfigs into Python search path, so it gets picked up as --robot-paths default
orig_sys_path = sys.path[:]
sys.path = [tmpdir] + [p for p in sys.path if not os.path.exists(os.path.join(p, 'easybuild', 'easyconfigs'))]
# default: only pick up installed easyconfigs via sys.path
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.robot_paths, [tmp_ecs_dir])
# prepend to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=/foo:'])
self.assertEqual(eb_go.options.robot_paths, ['/foo', tmp_ecs_dir])
eb_go = eboptions.parse_options(args=['--robot-paths=/foo:/bar/baz/:'])
self.assertEqual(eb_go.options.robot_paths, ['/foo', '/bar/baz/', tmp_ecs_dir])
# append to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=:/bar/baz'])
self.assertEqual(eb_go.options.robot_paths, [tmp_ecs_dir, '/bar/baz'])
# append to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=:/bar/baz:/foo'])
self.assertEqual(eb_go.options.robot_paths, [tmp_ecs_dir, '/bar/baz', '/foo'])
# prepend and append to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar::/baz'])
self.assertEqual(eb_go.options.robot_paths, ['/foo/bar', tmp_ecs_dir, '/baz'])
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar::/baz:/trala'])
self.assertEqual(eb_go.options.robot_paths, ['/foo/bar', tmp_ecs_dir, '/baz', '/trala'])
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar:/trala::/baz'])
self.assertEqual(eb_go.options.robot_paths, ['/foo/bar', '/trala', tmp_ecs_dir, '/baz'])
# also via $EASYBUILD_ROBOT_PATHS
os.environ['EASYBUILD_ROBOT_PATHS'] = '/foo::/bar/baz'
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.robot_paths, ['/foo', tmp_ecs_dir, '/bar/baz'])
# --robot-paths overrides $EASYBUILD_ROBOT_PATHS
os.environ['EASYBUILD_ROBOT_PATHS'] = '/foobar::/barbar/baz/baz'
eb_go = eboptions.parse_options(args=['--robot-paths=/one::/last'])
self.assertEqual(eb_go.options.robot_paths, ['/one', tmp_ecs_dir, '/last'])
del os.environ['EASYBUILD_ROBOT_PATHS']
# also works with a cfgfile in the mix
config_file = os.path.join(self.tmpdir, 'testconfig.cfg')
cfgtxt = '\n'.join([
'[config]',
'robot-paths=/cfgfirst::/cfglast',
])
write_file(config_file, cfgtxt)
eb_go = eboptions.parse_options(args=['--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/cfgfirst', tmp_ecs_dir, '/cfglast'])
# cfgfile entry is lost when env var and/or cmdline options are used
os.environ['EASYBUILD_ROBOT_PATHS'] = '/envfirst::/envend'
eb_go = eboptions.parse_options(args=['--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/envfirst', tmp_ecs_dir, '/envend'])
del os.environ['EASYBUILD_ROBOT_PATHS']
eb_go = eboptions.parse_options(args=['--robot-paths=/veryfirst:', '--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/veryfirst', tmp_ecs_dir])
os.environ['EASYBUILD_ROBOT_PATHS'] = ':/envend'
eb_go = eboptions.parse_options(args=['--robot-paths=/veryfirst:', '--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/veryfirst', tmp_ecs_dir])
del os.environ['EASYBUILD_ROBOT_PATHS']
# override default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=/foo:/bar/baz'])
self.assertEqual(eb_go.options.robot_paths, ['/foo', '/bar/baz'])
# paths specified via --robot still get preference
first = os.path.join(self.test_prefix, 'first')
mkdir(first)
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar::/baz', '--robot=%s' % first])
self.assertEqual(eb_go.options.robot_paths, [first, '/foo/bar', tmp_ecs_dir, '/baz'])
sys.path[:] = orig_sys_path
def test_strict(self):
"""Test use of --strict."""
# check default
self.assertEqual(build_option('strict'), run.WARN)
for strict_str, strict_val in [('error', run.ERROR), ('ignore', run.IGNORE), ('warn', run.WARN)]:
options = init_config(args=['--strict=%s' % strict_str])
init_config(build_options={'strict': options.strict})
self.assertEqual(build_option('strict'), strict_val)
def test_get_log_filename(self):
"""Test for get_log_filename()."""
tmpdir = tempfile.gettempdir()
res = get_log_filename('foo', '1.2.3')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-[0-9]{8}\.[0-9]{6}\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
res = get_log_filename('foo', '1.2.3', date='19700101')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.[0-9]{6}\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
res = get_log_filename('foo', '1.2.3', timestamp='094651')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-[0-9]{8}\.094651\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
res = get_log_filename('foo', '1.2.3', date='19700101', timestamp='094651')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.094651\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
# if log file already exists, numbers are added to the filename to obtain a new file path
write_file(res, '')
res = get_log_filename('foo', '1.2.3', date='19700101', timestamp='094651')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.094651\.log\.1$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
# adding salt ensures a unique filename (pretty much)
prev_log_filenames = []
for i in range(10):
res = get_log_filename('foo', '1.2.3', date='19700101', timestamp='094651', add_salt=True)
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.094651\.[a-zA-Z]{5}\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
self.assertTrue(res not in prev_log_filenames)
prev_log_filenames.append(res)
def test_log_file_format(self):
"""Test for log_file_format()."""
# first test defaults -> no templating when no values are provided
self.assertEqual(log_file_format(), 'easybuild-%(name)s-%(version)s-%(date)s.%(time)s.log')
self.assertEqual(log_file_format(return_directory=True), 'easybuild')
# test whether provided values are used to complete template
ec = {'name': 'foo', 'version': '1.2.3'}
res = log_file_format(ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'easybuild-foo-1.2.3-20190322.094356.log')
res = log_file_format(return_directory=True, ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'easybuild')
# partial templating is done when only some values are provided...
self.assertEqual(log_file_format(ec=ec), 'easybuild-foo-1.2.3-%(date)s.%(time)s.log')
res = log_file_format(date='20190322', timestamp='094356')
self.assertEqual(res, 'easybuild-%(name)s-%(version)s-20190322.094356.log')
# also try with a custom setting
init_config(args=['--logfile-format=eb-%(name)s-%(date)s,log-%(version)s-%(date)s-%(time)s.out'])
self.assertEqual(log_file_format(), 'log-%(version)s-%(date)s-%(time)s.out')
self.assertEqual(log_file_format(return_directory=True), 'eb-%(name)s-%(date)s')
res = log_file_format(ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'log-1.2.3-20190322-094356.out')
res = log_file_format(return_directory=True, ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'eb-foo-20190322')
# test handling of incorrect setting for --logfile-format
init_config(args=['--logfile-format=easybuild,log.txt,thisiswrong'])
error_pattern = "Incorrect log file format specification, should be 2-tuple"
self.assertErrorRegex(EasyBuildError, error_pattern, log_file_format)
def test_log_path(self):
"""Test for log_path()."""
# default
self.assertEqual(log_path(), 'easybuild')
# providing template values doesn't affect the default
ec = {'name': 'foo', 'version': '1.2.3'}
res = log_path(ec=ec)
self.assertEqual(res, 'easybuild')
# reconfigure with value for log directory that includes templates
init_config(args=['--logfile-format=easybuild-%(name)s-%(version)s-%(date)s-%(time)s,log.txt'])
regex = re.compile(r'^easybuild-foo-1\.2\.3-[0-9-]{8}-[0-9]{6}$')
res = log_path(ec=ec)
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
self.assertEqual(log_file_format(), 'log.txt')
def test_get_build_log_path(self):
"""Test for build_log_path()"""
init_config()
self.assertEqual(get_build_log_path(), tempfile.gettempdir())
build_log_path = os.path.join(self.test_prefix, 'chicken')
init_config(args=['--tmp-logdir=%s' % build_log_path])
self.assertEqual(get_build_log_path(), build_log_path)
def suite():
return TestLoaderFiltered().loadTestsFromTestCase(EasyBuildConfigTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
|
gpl-2.0
|
deter-project/magi
|
magi/messaging/transportTCP.py
|
1
|
2821
|
import socket
import logging
import time
from asyncore import dispatcher
from transport import Transport
import transportStream
from magimessage import DefaultCodec
log = logging.getLogger(__name__)
class TCPServer(Transport):
""" Simple TCP Server that returns new TCP clients as 'messages' """
def __init__(self, address = None, port = None):
Transport.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((address, port))
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is None:
return
sock, addr = pair
log.info('Incoming connection from %s', repr(addr))
newTrans = TCPTransport(sock)
newTrans.saveHost = addr[0]
newTrans.savePort = addr[1]
self.inmessages.append(newTrans)
def serverOnly(self):
return True
def __repr__(self):
return "TCPServer %s:%d" % (self.addr[0], self.addr[1])
__str__ = __repr__
class TCPTransport(transportStream.StreamTransport):
"""
This class implements a TCP connection that streams MAGI messages back and forth. It
uses the StreamTransport for most work, extending it just for the connecting and reconnecting
portion.
"""
def __init__(self, sock = None, codec=DefaultCodec, address = None, port = None):
"""
Create a new TCP Transport. If sock is provided, it is used, otherwise starts with
an unconnected socket.
"""
transportStream.StreamTransport.__init__(self, sock=sock, codec=codec)
self.closed = False
self.saveHost = ""
self.savePort = -1
if address is not None and port is not None:
self.connect(address, port)
def connect(self, host, port):
"""
Attempt to connect this socket.
"""
self.saveHost = host
self.savePort = port
self.closed = False
log.info("connect %s:%d", self.saveHost, self.savePort)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
log.info("If connection fails, it will retry shortly.")
dispatcher.connect(self, (self.saveHost, self.savePort))
def reconnect(self):
"""
Attempt a reconnect of a socket that was closed or never fully connected
"""
self.connect(self.saveHost, self.savePort)
def handle_write(self):
"""
Override stream version so we can add hosttime to outgoing packets
"""
if self.txMessage.isDone():
try:
msg = self.outmessages.pop(0)
msg.hosttime = int(time.time())
self.txMessage = transportStream.TXTracker(codec=self.codec, msg=msg)
except IndexError:
return
#keep sending till you can
while not self.txMessage.isDone():
bytesWritten = self.send(self.txMessage.getData())
self.txMessage.sent(bytesWritten)
#if no more can be written, break out
if bytesWritten == 0:
break
def __repr__(self):
return "TCPTransport %s:%d" % (self.saveHost, self.savePort)
__str__ = __repr__
|
gpl-2.0
|
taichatha/youtube-dl
|
youtube_dl/downloader/fragment.py
|
77
|
3754
|
from __future__ import division, unicode_literals
import os
import time
from .common import FileDownloader
from .http import HttpFD
from ..utils import (
encodeFilename,
sanitize_open,
)
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class FragmentFD(FileDownloader):
"""
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
"""
def _prepare_and_start_frag_download(self, ctx):
self._prepare_frag_download(ctx)
self._start_frag_download(ctx)
def _prepare_frag_download(self, ctx):
self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags']))
self.report_destination(ctx['filename'])
dl = HttpQuietDownloader(
self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'ratelimit': self.params.get('ratelimit', None),
'retries': self.params.get('retries', 0),
'test': self.params.get('test', False),
}
)
tmpfilename = self.temp_name(ctx['filename'])
dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb')
ctx.update({
'dl': dl,
'dest_stream': dest_stream,
'tmpfilename': tmpfilename,
})
def _start_frag_download(self, ctx):
total_frags = ctx['total_frags']
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'status': 'downloading',
'downloaded_bytes': 0,
'frag_index': 0,
'frag_count': total_frags,
'filename': ctx['filename'],
'tmpfilename': ctx['tmpfilename'],
}
start = time.time()
ctx['started'] = start
def frag_progress_hook(s):
if s['status'] not in ('downloading', 'finished'):
return
frag_total_bytes = s.get('total_bytes', 0)
if s['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_index'] += 1
estimated_size = (
(state['downloaded_bytes'] + frag_total_bytes) /
(state['frag_index'] + 1) * total_frags)
time_now = time.time()
state['total_bytes_estimate'] = estimated_size
state['elapsed'] = time_now - start
if s['status'] == 'finished':
progress = self.calc_percent(state['frag_index'], total_frags)
else:
frag_downloaded_bytes = s['downloaded_bytes']
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
progress = self.calc_percent(state['frag_index'], total_frags)
progress += frag_progress / float(total_frags)
state['eta'] = self.calc_eta(
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
state['speed'] = s.get('speed')
self._hook_progress(state)
ctx['dl'].add_progress_hook(frag_progress_hook)
return start
def _finish_frag_download(self, ctx):
ctx['dest_stream'].close()
elapsed = time.time() - ctx['started']
self.try_rename(ctx['tmpfilename'], ctx['filename'])
fsize = os.path.getsize(encodeFilename(ctx['filename']))
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': ctx['filename'],
'status': 'finished',
'elapsed': elapsed,
})
|
unlicense
|
hastexo/edx-platform
|
pavelib/paver_tests/test_assets.py
|
1
|
7878
|
"""Unit tests for the Paver asset tasks."""
import os
from unittest import TestCase
import ddt
from mock import patch
from paver.easy import call_task, path
from watchdog.observers import Observer
from pavelib.assets import COLLECTSTATIC_LOG_DIR_ARG, collect_assets
from ..utils.envs import Env
from .utils import PaverTestCase
ROOT_PATH = path(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
TEST_THEME_DIR = ROOT_PATH / "common/test/test-theme" # pylint: disable=invalid-name
class TestPaverWatchAssetTasks(TestCase):
"""
Test the Paver watch asset tasks.
"""
def setUp(self):
self.expected_sass_directories = [
path('common/static/sass'),
path('common/static'),
path('node_modules/@edx'),
path('node_modules'),
path('node_modules/edx-pattern-library/node_modules'),
path('lms/static/sass/partials'),
path('lms/static/sass'),
path('lms/static/certificates/sass'),
path('cms/static/sass'),
path('cms/static/sass/partials'),
]
super(TestPaverWatchAssetTasks, self).setUp()
def tearDown(self):
self.expected_sass_directories = []
super(TestPaverWatchAssetTasks, self).tearDown()
def test_watch_assets(self):
"""
Test the "compile_sass" task.
"""
with patch('pavelib.assets.SassWatcher.register') as mock_register:
with patch('pavelib.assets.Observer.start'):
with patch('pavelib.assets.execute_webpack_watch') as mock_webpack:
call_task(
'pavelib.assets.watch_assets',
options={"background": True},
)
self.assertEqual(mock_register.call_count, 2)
self.assertEqual(mock_webpack.call_count, 1)
sass_watcher_args = mock_register.call_args_list[0][0]
self.assertIsInstance(sass_watcher_args[0], Observer)
self.assertIsInstance(sass_watcher_args[1], list)
self.assertItemsEqual(sass_watcher_args[1], self.expected_sass_directories)
def test_watch_theme_assets(self):
"""
Test the Paver watch asset tasks with theming enabled.
"""
self.expected_sass_directories.extend([
path(TEST_THEME_DIR) / 'lms/static/sass',
path(TEST_THEME_DIR) / 'lms/static/sass/partials',
path(TEST_THEME_DIR) / 'cms/static/sass',
path(TEST_THEME_DIR) / 'cms/static/sass/partials',
])
with patch('pavelib.assets.SassWatcher.register') as mock_register:
with patch('pavelib.assets.Observer.start'):
with patch('pavelib.assets.execute_webpack_watch') as mock_webpack:
call_task(
'pavelib.assets.watch_assets',
options={
"background": True,
"theme_dirs": [TEST_THEME_DIR.dirname()],
"themes": [TEST_THEME_DIR.basename()]
},
)
self.assertEqual(mock_register.call_count, 2)
self.assertEqual(mock_webpack.call_count, 1)
sass_watcher_args = mock_register.call_args_list[0][0]
self.assertIsInstance(sass_watcher_args[0], Observer)
self.assertIsInstance(sass_watcher_args[1], list)
self.assertItemsEqual(sass_watcher_args[1], self.expected_sass_directories)
@ddt.ddt
class TestCollectAssets(PaverTestCase):
"""
Test the collectstatic process call.
ddt data is organized thusly:
* debug: whether or not collect_assets is called with the debug flag
* specified_log_location: used when collect_assets is called with a specific
log location for collectstatic output
* expected_log_location: the expected string to be used for piping collectstatic logs
"""
@ddt.data(
[{
"collect_log_args": {}, # Test for default behavior
"expected_log_location": "> /dev/null"
}],
[{
"collect_log_args": {COLLECTSTATIC_LOG_DIR_ARG: "/foo/bar"},
"expected_log_location": "> /foo/bar/lms-collectstatic.log"
}], # can use specified log location
[{
"systems": ["lms", "cms"],
"collect_log_args": {},
"expected_log_location": "> /dev/null"
}], # multiple systems can be called
)
@ddt.unpack
def test_collect_assets(self, options):
"""
Ensure commands sent to the environment for collect_assets are as expected
"""
specified_log_loc = options.get("collect_log_args", {})
specified_log_dict = specified_log_loc
log_loc = options.get("expected_log_location", "> /dev/null")
systems = options.get("systems", ["lms"])
if specified_log_loc is None:
collect_assets(
systems,
Env.DEVSTACK_SETTINGS
)
else:
collect_assets(
systems,
Env.DEVSTACK_SETTINGS,
**specified_log_dict
)
self._assert_correct_messages(log_location=log_loc, systems=systems)
def test_collect_assets_debug(self):
"""
When the method is called specifically with None for the collectstatic log dir, then
it should run in debug mode and pipe to console.
"""
expected_log_loc = ""
systems = ["lms"]
kwargs = {COLLECTSTATIC_LOG_DIR_ARG: None}
collect_assets(systems, Env.DEVSTACK_SETTINGS, **kwargs)
self._assert_correct_messages(log_location=expected_log_loc, systems=systems)
def _assert_correct_messages(self, log_location, systems):
"""
Asserts that the expected commands were run.
We just extract the pieces we care about here instead of specifying an
exact command, so that small arg changes don't break this test.
"""
for i, sys in enumerate(systems):
msg = self.task_messages[i]
self.assertTrue(msg.startswith('python manage.py {}'.format(sys)))
self.assertIn(' collectstatic '.format(Env.DEVSTACK_SETTINGS), msg)
self.assertIn('--settings={}'.format(Env.DEVSTACK_SETTINGS), msg)
self.assertTrue(msg.endswith(' {}'.format(log_location)))
@ddt.ddt
class TestUpdateAssetsTask(PaverTestCase):
"""
These are nearly end-to-end tests, because they observe output from the commandline request,
but do not actually execute the commandline on the terminal/process
"""
@ddt.data(
[{"expected_substring": "> /dev/null"}], # go to /dev/null by default
[{"cmd_args": ["--debug"], "expected_substring": "collectstatic"}] # TODO: make this regex
)
@ddt.unpack
def test_update_assets_task_collectstatic_log_arg(self, options):
"""
Scoped test that only looks at what is passed to the collecstatic options
"""
cmd_args = options.get("cmd_args", [""])
expected_substring = options.get("expected_substring", None)
call_task('pavelib.assets.update_assets', args=cmd_args)
self.assertTrue(
self._is_substring_in_list(self.task_messages, expected_substring),
msg="{substring} not found in messages".format(substring=expected_substring)
)
def _is_substring_in_list(self, messages_list, expected_substring):
"""
Return true a given string is somewhere in a list of strings
"""
for message in messages_list:
if expected_substring in message:
return True
return False
|
agpl-3.0
|
sugartom/tensorflow-alien
|
tensorflow/contrib/training/python/training/evaluation.py
|
9
|
16040
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
****************************************
* Evaluating a Checkpointed Model Once *
****************************************
Once we've trained a model, we'll want to evaluate it. The simplest way to do
this is to evaluate the performance of a saved model a single time. In order
to do this, we can specify a number of metrics we'll want to evaluate as well
as specify the summaries we want to save to disk. Furthermore, we can print
out the metrics values to stdout:
# Specify where the checkpoint is stored:
checkpoint_path = ...
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.contrib.metrics.streaming_accuracy(predictions, labels),
"mse": tf.contrib.metrics.streaming_mean_squared_error(
predictions, labels),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
names_to_values = evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=names_to_updates.values(),
final_ops=names_to_values,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
config=None)
for name in names_to_values:
print('Metric %s has value %f.' % (name, names_to_values[name]))
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluate_repeatedly method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.contrib.metrics.streaming_accuracy(predictions, labels),
"mse": tf.contrib.metrics.streaming_mean_squared_error(
predictions, labels),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
eval_ops=names_to_updates.values(),
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
*******************************************************
* Evaluating a Checkpointed Model with Summaries Only *
*******************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_ops' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.summary.scalar(...)
tf.summary.histogram(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
hooks=[
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
__all__ = [
'StopAfterNEvalsHook',
'SummaryAtEndHook',
'checkpoints_iterator',
'evaluate_once',
'evaluate_repeatedly',
'get_or_create_eval_step',
'wait_for_new_checkpoint',
]
# pylint: disable=protected-access
# pylint: disable=invalid-name
StopAfterNEvalsHook = evaluation._StopAfterNEvalsHook
evaluate_once = evaluation._evaluate_once
get_or_create_eval_step = evaluation._get_or_create_eval_step
# pylint: enable=invalid-name
# pylint: enable=protected-access
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum amount of time to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info('Found new checkpoint at %s', checkpoint_path)
return checkpoint_path
def checkpoints_iterator(checkpoint_dir, min_interval_secs=0, timeout=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
Yields:
String paths to latest checkpoint files as they arrive. Stops yielding only
if/when waiting for a checkpoint times out.
"""
checkpoint_path = None
while True:
checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if checkpoint_path is None:
# timed out
return
start = time.time()
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
class SummaryAtEndHook(session_run_hook.SessionRunHook):
"""A run hook that saves a summary with the results of evaluation."""
def __init__(self, log_dir=None, summary_writer=None,
summary_op=None, feed_dict=None):
"""Constructs the Summary Hook.
Args:
log_dir: The directory where the summary events are saved to. Used only
when `summary_writer` is not specified.
summary_writer: A `tf.summary.FileWriter` to write summary events with.
summary_op: The summary op to run. If left as `None`, then all summaries
in the tf.GraphKeys.SUMMARIES collection are used.
feed_dict: An optional feed dictionary to use when evaluating the
summaries.
Raises:
ValueError: If both `log_dir` and `summary_writer` are `None`.
"""
self._summary_op = summary_op
self._feed_dict = feed_dict
self._summary_writer = summary_writer
self._log_dir = log_dir
self._summary_writer = summary_writer
if self._log_dir is None and self._summary_writer is None:
raise ValueError('One of log_dir or summary_writer should be used.')
self._global_step = variables.get_or_create_global_step()
def begin(self):
if self._summary_writer is None and self._log_dir:
self._summary_writer = summary.FileWriterCache.get(self._log_dir)
if self._summary_op is None:
self._summary_op = summary.merge_all()
def end(self, session):
global_step = training_util.global_step(session, self._global_step)
summary_str = session.run(self._summary_op, self._feed_dict)
if self._summary_writer:
self._summary_writer.add_summary(summary_str, global_step)
self._summary_writer.flush()
def _scaffold_with_init(scaffold, saver, checkpoint_path):
"""Creates a scaffold that loads the given checkpoint using an init_fn.
Args:
scaffold: The scaffold to copy.
saver: The saver to use when restoring the checkpoint.
checkpoint_path: An absolute path to a checkpoint.
Returns:
A scaffold with an init_fn that loads the given checkpoint. If the scaffold
provided already has an init_fn, the scaffold is returned unchanged.
"""
def restore_checkpoint(_, session):
saver.restore(session, checkpoint_path)
if not scaffold.init_fn:
scaffold = monitored_session.Scaffold(
init_op=scaffold.init_op,
init_feed_dict=scaffold.init_feed_dict,
init_fn=restore_checkpoint,
ready_op=scaffold.ready_op,
local_init_op=scaffold.local_init_op,
summary_op=scaffold.summary_op,
saver=scaffold.saver)
return scaffold
def evaluate_repeatedly(checkpoint_dir,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
eval_interval_secs=60,
hooks=None,
config=None,
max_number_of_evaluations=None,
timeout=None):
"""Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immedietly after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_dir: The directory where checkpoints are stored.
master: The BNS address of the TensorFlow master.
scaffold: An tf.train.Scaffold instance for initializing variables and
restoring variables. Note that `scaffold.init_fn` is used by the function
to restore the checkpoint. If you supply a custom init_fn, then it must
also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`, which is run until the session is requested to stop,
commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
eval_interval_secs: The minimum number of seconds between evaluations.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
evaluation loop.
config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
max_number_of_evaluations: The maximum times to run the evaluation. If left
as `None`, then evaluation runs indefinitely.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = get_or_create_eval_step()
# Prepare the run hooks.
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hooks.append(final_ops_hook)
num_evaluations = 0
for checkpoint_path in checkpoints_iterator(checkpoint_dir,
eval_interval_secs, timeout):
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
num_evaluations += 1
if max_number_of_evaluations is not None and num_evaluations >= max_number_of_evaluations:
return final_ops_hook.final_ops_values
logging.info('Timed-out waiting for a checkpoint.')
return final_ops_hook.final_ops_values
|
apache-2.0
|
turtleloveshoes/kitsune
|
kitsune/wiki/badges.py
|
23
|
1658
|
from django.conf import settings
from django.db.models.signals import post_save
from kitsune.wiki.models import Revision
# Yo ******! These are year-agnostic badge templates which code uses
# to get-or-create the actual Badge instances. These strings should
# not be l10n-ized here--the badge title and description strings get
# l10n-ized elsewhere. Peace!
WIKI_BADGES = {
'kb-badge': {
'slug': '{year}-kb-badge',
'title': '{year} KB Badge',
'description': 'This badge is awarded to contributors with 10 '
'approved English edits during {year}.',
},
'l10n-badge': {
'slug': '{year}-l10n-badge',
'title': '{year} L10n Badge',
'description': 'This badge is awarded to contributors with 10 '
'approved translations edits during {year}.',
},
}
def on_revision_save(sender, instance, **kwargs):
"""Handle the revision save signal.
* We award the KB badge on 10 approved en-US edits.
* We award the L10n badge on 10 approved translation edits.
"""
rev = instance
year = rev.created.year
creator = rev.creator
# We only care about approved revisions.
if not rev.is_approved:
return
# The badge to be awarded depends on the locale.
if rev.document.locale == settings.WIKI_DEFAULT_LANGUAGE:
badge_template = WIKI_BADGES['kb-badge']
else:
badge_template = WIKI_BADGES['l10n-badge']
from kitsune.wiki.tasks import maybe_award_badge
maybe_award_badge.delay(badge_template, year, creator)
def register_signals():
post_save.connect(on_revision_save, sender=Revision)
|
bsd-3-clause
|
endlessm/chromium-browser
|
tools/grit/minimize_css_unittest.py
|
10
|
1879
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import minimize_css
class CSSMinimizerTest(unittest.TestCase):
def test_simple(self):
source = """
div {
color: blue;
}
"""
minimized = minimize_css.CSSMinimizer.minimize_css(source)
self.assertEquals(minimized, "div{color: blue}")
def test_attribute_selectors(self):
source = """
input[type="search" i]::-webkit-textfield-decoration-container {
direction: ltr;
}
"""
minimized = minimize_css.CSSMinimizer.minimize_css(source)
self.assertEquals(
minimized,
# pylint: disable=line-too-long
"""input[type="search" i]::-webkit-textfield-decoration-container{direction: ltr}""")
def test_strip_comment(self):
source = """
/* header */
html {
/* inside block */
display: block;
}
/* footer */
"""
minimized = minimize_css.CSSMinimizer.minimize_css(source)
self.assertEquals(minimized, "html{ display: block}")
def test_no_strip_inside_quotes(self):
source = """div[foo=' bar ']"""
minimized = minimize_css.CSSMinimizer.minimize_css(source)
self.assertEquals(minimized, source)
source = """div[foo=" bar "]"""
minimized = minimize_css.CSSMinimizer.minimize_css(source)
self.assertEquals(minimized, source)
def test_escape_string(self):
source = """content: " <a onclick=\\\"javascript: alert ( 'foobar' ); \\\">";"""
minimized = minimize_css.CSSMinimizer.minimize_css(source)
self.assertEquals(minimized, source)
|
bsd-3-clause
|
fangxingli/hue
|
desktop/core/ext-py/pysaml2-2.4.0/example/idp2_repoze/idp.py
|
29
|
35282
|
#!/usr/bin/env python
import argparse
import base64
import re
import logging
import time
from hashlib import sha1
from urlparse import parse_qs
from Cookie import SimpleCookie
import os
from saml2 import server
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_URI
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import time_util
from saml2.authn import is_equal
from saml2.authn_context import AuthnBroker
from saml2.authn_context import PASSWORD
from saml2.authn_context import UNSPECIFIED
from saml2.authn_context import authn_context_class_ref
from saml2.httputil import Response
from saml2.httputil import NotFound
from saml2.httputil import geturl
from saml2.httputil import get_post
from saml2.httputil import Redirect
from saml2.httputil import Unauthorized
from saml2.httputil import BadRequest
from saml2.httputil import ServiceError
from saml2.ident import Unknown
from saml2.metadata import create_metadata_string
from saml2.s_utils import rndstr, exception_trace
from saml2.s_utils import UnknownPrincipal
from saml2.s_utils import UnsupportedBinding
from saml2.s_utils import PolicyError
from saml2.sigver import verify_redirect_signature
logger = logging.getLogger("saml2.idp")
class Cache(object):
def __init__(self):
self.user2uid = {}
self.uid2user = {}
def _expiration(timeout, tformat="%a, %d-%b-%Y %H:%M:%S GMT"):
"""
:param timeout:
:param tformat:
:return:
"""
if timeout == "now":
return time_util.instant(tformat)
elif timeout == "dawn":
return time.strftime(tformat, time.gmtime(0))
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, format=tformat)
def get_eptid(idp, req_info, session):
return idp.eptid.get(idp.config.entityid,
req_info.sender(), session["permanent_id"],
session["authn_auth"])
# -----------------------------------------------------------------------------
def dict2list_of_tuples(d):
return [(k, v) for k, v in d.items()]
# -----------------------------------------------------------------------------
class Service(object):
def __init__(self, environ, start_response, user=None):
self.environ = environ
logger.debug("ENVIRON: %s" % environ)
self.start_response = start_response
self.user = user
def unpack_redirect(self):
if "QUERY_STRING" in self.environ:
_qs = self.environ["QUERY_STRING"]
return dict([(k, v[0]) for k, v in parse_qs(_qs).items()])
else:
return None
def unpack_post(self):
_dict = parse_qs(get_post(self.environ))
logger.debug("unpack_post:: %s" % _dict)
try:
return dict([(k, v[0]) for k, v in _dict.items()])
except Exception:
return None
def unpack_soap(self):
try:
query = get_post(self.environ)
return {"SAMLRequest": query, "RelayState": ""}
except Exception:
return None
def unpack_either(self):
if self.environ["REQUEST_METHOD"] == "GET":
_dict = self.unpack_redirect()
elif self.environ["REQUEST_METHOD"] == "POST":
_dict = self.unpack_post()
else:
_dict = None
logger.debug("_dict: %s" % _dict)
return _dict
def operation(self, _dict, binding):
logger.debug("_operation: %s" % _dict)
if not _dict or not 'SAMLRequest' in _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
else:
try:
return self.do(_dict["SAMLRequest"], binding,
_dict["RelayState"])
except KeyError:
# Can live with no relay state
return self.do(_dict["SAMLRequest"], binding)
def artifact_operation(self, _dict):
if not _dict:
resp = BadRequest("Missing query")
return resp(self.environ, self.start_response)
else:
# exchange artifact for request
request = IDP.artifact2message(_dict["SAMLart"], "spsso")
try:
return self.do(request, BINDING_HTTP_ARTIFACT,
_dict["RelayState"])
except KeyError:
return self.do(request, BINDING_HTTP_ARTIFACT)
def response(self, binding, http_args):
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
else:
resp = Response(http_args["data"], headers=http_args["headers"])
return resp(self.environ, self.start_response)
def do(self, query, binding, relay_state=""):
pass
def redirect(self):
""" Expects a HTTP-redirect request """
_dict = self.unpack_redirect()
return self.operation(_dict, BINDING_HTTP_REDIRECT)
def post(self):
""" Expects a HTTP-POST request """
_dict = self.unpack_post()
return self.operation(_dict, BINDING_HTTP_POST)
def artifact(self):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = self.unpack_either()
return self.artifact_operation(_dict)
def soap(self):
"""
Single log out using HTTP_SOAP binding
"""
logger.debug("- SOAP -")
_dict = self.unpack_soap()
logger.debug("_dict: %s" % _dict)
return self.operation(_dict, BINDING_SOAP)
def uri(self):
_dict = self.unpack_either()
return self.operation(_dict, BINDING_SOAP)
# def not_authn(self, key):
# """
#
#
# :return:
# """
# loc = "http://%s/login" % (self.environ["HTTP_HOST"])
# loc += "?%s" % urllib.urlencode({"came_from": self.environ[
# "PATH_INFO"], "key": key})
# headers = [('Content-Type', 'text/plain')]
#
# logger.debug("location: %s" % loc)
# logger.debug("headers: %s" % headers)
#
# resp = Redirect(loc, headers=headers)
#
# return resp(self.environ, self.start_response)
def not_authn(self, key, requested_authn_context):
ruri = geturl(self.environ, query=False)
return do_authentication(self.environ, self.start_response,
authn_context=requested_authn_context,
key=key, redirect_uri=ruri)
# -----------------------------------------------------------------------------
REPOZE_ID_EQUIVALENT = "uid"
FORM_SPEC = """<form name="myform" method="post" action="%s">
<input type="hidden" name="SAMLResponse" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
</form>"""
# -----------------------------------------------------------------------------
# === Single log in ====
# -----------------------------------------------------------------------------
class AuthenticationNeeded(Exception):
def __init__(self, authn_context=None, *args, **kwargs):
Exception.__init__(*args, **kwargs)
self.authn_context = authn_context
class SSO(Service):
def __init__(self, environ, start_response, user=None):
Service.__init__(self, environ, start_response, user)
self.binding = ""
self.response_bindings = None
self.resp_args = {}
self.binding_out = None
self.destination = None
self.req_info = None
def verify_request(self, query, binding):
"""
:param query: The SAML query, transport encoded
:param binding: Which binding the query came in over
"""
resp_args = {}
if not query:
logger.info("Missing QUERY")
resp = Unauthorized('Unknown user')
return resp_args, resp(self.environ, self.start_response)
if not self.req_info:
self.req_info = IDP.parse_authn_request(query, binding)
logger.info("parsed OK")
_authn_req = self.req_info.message
logger.debug("%s" % _authn_req)
self.binding_out, self.destination = IDP.pick_binding(
"assertion_consumer_service",
bindings=self.response_bindings,
entity_id=_authn_req.issuer.text)
logger.debug("Binding: %s, destination: %s" % (self.binding_out,
self.destination))
resp_args = {}
try:
resp_args = IDP.response_args(_authn_req)
_resp = None
except UnknownPrincipal, excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
except UnsupportedBinding, excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
return resp_args, _resp
def do(self, query, binding_in, relay_state=""):
try:
resp_args, _resp = self.verify_request(query, binding_in)
except UnknownPrincipal, excp:
logger.error("UnknownPrincipal: %s" % (excp,))
resp = ServiceError("UnknownPrincipal: %s" % (excp,))
return resp(self.environ, self.start_response)
except UnsupportedBinding, excp:
logger.error("UnsupportedBinding: %s" % (excp,))
resp = ServiceError("UnsupportedBinding: %s" % (excp,))
return resp(self.environ, self.start_response)
if not _resp:
identity = USERS[self.user].copy()
#identity["eduPersonTargetedID"] = get_eptid(IDP, query, session)
logger.info("Identity: %s" % (identity,))
if REPOZE_ID_EQUIVALENT:
identity[REPOZE_ID_EQUIVALENT] = self.user
try:
sign_assertion = IDP.config.getattr("sign_assertion", "idp")
if sign_assertion is None:
sign_assertion = False
_resp = IDP.create_authn_response(
identity, userid=self.user,
authn=AUTHN_BROKER[self.environ["idp.authn_ref"]], sign_assertion=sign_assertion,
sign_response=False, **resp_args)
except Exception, excp:
logging.error(exception_trace(excp))
resp = ServiceError("Exception: %s" % (excp,))
return resp(self.environ, self.start_response)
logger.info("AuthNResponse: %s" % _resp)
http_args = IDP.apply_binding(self.binding_out,
"%s" % _resp, self.destination,
relay_state, response=True)
logger.debug("HTTPargs: %s" % http_args)
return self.response(self.binding_out, http_args)
def _store_request(self, _dict):
logger.debug("_store_request: %s" % _dict)
key = sha1(_dict["SAMLRequest"]).hexdigest()
# store the AuthnRequest
IDP.ticket[key] = _dict
return key
def redirect(self):
""" This is the HTTP-redirect endpoint """
logger.info("--- In SSO Redirect ---")
_info = self.unpack_redirect()
try:
_key = _info["key"]
_info = IDP.ticket[_key]
self.req_info = _info["req_info"]
del IDP.ticket[_key]
except KeyError:
try:
self.req_info = IDP.parse_authn_request(_info["SAMLRequest"],
BINDING_HTTP_REDIRECT)
except KeyError:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
_req = self.req_info.message
if "SigAlg" in _info and "Signature" in _info: # Signed request
issuer = _req.issuer.text
_certs = IDP.metadata.certs(issuer, "any", "signing")
verified_ok = False
for cert in _certs:
if verify_redirect_signature(_info, cert):
verified_ok = True
break
if not verified_ok:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
if self.user:
if _req.force_authn:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(_info, BINDING_HTTP_REDIRECT)
else:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(_info, BINDING_HTTP_REDIRECT)
def post(self):
"""
The HTTP-Post endpoint
"""
logger.info("--- In SSO POST ---")
_info = self.unpack_either()
self.req_info = IDP.parse_authn_request(
_info["SAMLRequest"], BINDING_HTTP_POST)
_req = self.req_info.message
if self.user:
if _req.force_authn:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(_info, BINDING_HTTP_POST)
else:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
# def artifact(self):
# # Can be either by HTTP_Redirect or HTTP_POST
# _req = self._store_request(self.unpack_either())
# if isinstance(_req, basestring):
# return self.not_authn(_req)
# return self.artifact_operation(_req)
def ecp(self):
# The ECP interface
logger.info("--- ECP SSO ---")
resp = None
try:
authz_info = self.environ["HTTP_AUTHORIZATION"]
if authz_info.startswith("Basic "):
try:
_info = base64.b64decode(authz_info[6:])
except TypeError:
resp = Unauthorized()
else:
logger.debug("Authz_info: %s" % _info)
try:
(user, passwd) = _info.split(":")
if is_equal(PASSWD[user], passwd):
resp = Unauthorized()
self.user = user
except (ValueError, TypeError):
resp = Unauthorized()
else:
resp = Unauthorized()
except KeyError:
resp = Unauthorized()
if resp:
return resp(self.environ, self.start_response)
_dict = self.unpack_soap()
self.response_bindings = [BINDING_PAOS]
# Basic auth ?!
return self.operation(_dict, BINDING_SOAP)
# -----------------------------------------------------------------------------
# === Authentication ====
# -----------------------------------------------------------------------------
def do_authentication(environ, start_response, authn_context, key,
redirect_uri):
"""
Display the login form
"""
logger.debug("Do authentication")
auth_info = AUTHN_BROKER.pick(authn_context)
if len(auth_info):
method, reference = auth_info[0]
logger.debug("Authn chosen: %s (ref=%s)" % (method, reference))
return method(environ, start_response, reference, key, redirect_uri)
else:
resp = Unauthorized("No usable authentication method")
return resp(environ, start_response)
# -----------------------------------------------------------------------------
PASSWD = {"haho0032": "qwerty",
"roland": "dianakra",
"babs": "howes",
"upper": "crust"}
def username_password_authn(environ, start_response, reference, key,
redirect_uri):
"""
Display the login form
"""
logger.info("The login page")
headers = []
resp = Response(mako_template="login.mako", template_lookup=LOOKUP,
headers=headers)
argv = {
"action": "/verify",
"login": "",
"password": "",
"key": key,
"authn_reference": reference,
"redirect_uri": redirect_uri
}
logger.info("do_authentication argv: %s" % argv)
return resp(environ, start_response, **argv)
def verify_username_and_password(dic):
global PASSWD
# verify username and password
if PASSWD[dic["login"][0]] == dic["password"][0]:
return True, dic["login"][0]
else:
return False, ""
def do_verify(environ, start_response, _):
query = parse_qs(get_post(environ))
logger.debug("do_verify: %s" % query)
try:
_ok, user = verify_username_and_password(query)
except KeyError:
_ok = False
user = None
if not _ok:
resp = Unauthorized("Unknown user or wrong password")
else:
uid = rndstr(24)
IDP.cache.uid2user[uid] = user
IDP.cache.user2uid[user] = uid
logger.debug("Register %s under '%s'" % (user, uid))
kaka = set_cookie("idpauthn", "/", uid, query["authn_reference"][0])
lox = "%s?id=%s&key=%s" % (query["redirect_uri"][0], uid,
query["key"][0])
logger.debug("Redirect => %s" % lox)
resp = Redirect(lox, headers=[kaka], content="text/html")
return resp(environ, start_response)
def not_found(environ, start_response):
"""Called if no URL matches."""
resp = NotFound()
return resp(environ, start_response)
# -----------------------------------------------------------------------------
# === Single log out ===
# -----------------------------------------------------------------------------
#def _subject_sp_info(req_info):
# # look for the subject
# subject = req_info.subject_id()
# subject = subject.text.strip()
# sp_entity_id = req_info.message.issuer.text.strip()
# return subject, sp_entity_id
class SLO(Service):
def do(self, request, binding, relay_state=""):
logger.info("--- Single Log Out Service ---")
try:
_, body = request.split("\n")
logger.debug("req: '%s'" % body)
req_info = IDP.parse_logout_request(body, binding)
except Exception, exc:
logger.error("Bad request: %s" % exc)
resp = BadRequest("%s" % exc)
return resp(self.environ, self.start_response)
msg = req_info.message
if msg.name_id:
lid = IDP.ident.find_local_id(msg.name_id)
logger.info("local identifier: %s" % lid)
if lid in IDP.cache.user2uid:
uid = IDP.cache.user2uid[lid]
if uid in IDP.cache.uid2user:
del IDP.cache.uid2user[uid]
del IDP.cache.user2uid[lid]
# remove the authentication
try:
IDP.session_db.remove_authn_statements(msg.name_id)
except KeyError, exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
resp = IDP.create_logout_response(msg, [binding])
try:
hinfo = IDP.apply_binding(binding, "%s" % resp, "", relay_state)
except Exception, exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
#_tlh = dict2list_of_tuples(hinfo["headers"])
delco = delete_cookie(self.environ, "idpauthn")
if delco:
hinfo["headers"].append(delco)
logger.info("Header: %s" % (hinfo["headers"],))
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Manage Name ID service
# ----------------------------------------------------------------------------
class NMI(Service):
def do(self, query, binding, relay_state=""):
logger.info("--- Manage Name ID Service ---")
req = IDP.parse_manage_name_id_request(query, binding)
request = req.message
# Do the necessary stuff
name_id = IDP.ident.handle_manage_name_id_request(
request.name_id, request.new_id, request.new_encrypted_id,
request.terminate)
logger.debug("New NameID: %s" % name_id)
_resp = IDP.create_manage_name_id_response(request)
# It's using SOAP binding
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "",
relay_state, response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Assertion ID request ===
# ----------------------------------------------------------------------------
# Only URI binding
class AIDR(Service):
def do(self, aid, binding, relay_state=""):
logger.info("--- Assertion ID Service ---")
try:
assertion = IDP.create_assertion_id_request_response(aid)
except Unknown:
resp = NotFound(aid)
return resp(self.environ, self.start_response)
hinfo = IDP.apply_binding(BINDING_URI, "%s" % assertion, response=True)
logger.debug("HINFO: %s" % hinfo)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
def operation(self, _dict, binding, **kwargs):
logger.debug("_operation: %s" % _dict)
if not _dict or "ID" not in _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
return self.do(_dict["ID"], binding, **kwargs)
# ----------------------------------------------------------------------------
# === Artifact resolve service ===
# ----------------------------------------------------------------------------
class ARS(Service):
def do(self, request, binding, relay_state=""):
_req = IDP.parse_artifact_resolve(request, binding)
msg = IDP.create_artifact_response(_req, _req.artifact.text)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Authn query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class AQS(Service):
def do(self, request, binding, relay_state=""):
logger.info("--- Authn Query Service ---")
_req = IDP.parse_authn_query(request, binding)
_query = _req.message
msg = IDP.create_authn_query_response(_query.subject,
_query.requested_authn_context,
_query.session_index)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Attribute query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class ATTR(Service):
def do(self, request, binding, relay_state=""):
logger.info("--- Attribute Query Service ---")
_req = IDP.parse_attribute_query(request, binding)
_query = _req.message
name_id = _query.subject.name_id
uid = name_id.text
logger.debug("Local uid: %s" % uid)
identity = EXTRA[uid]
# Comes in over SOAP so only need to construct the response
args = IDP.response_args(_query, [BINDING_SOAP])
msg = IDP.create_attribute_response(identity,
name_id=name_id, **args)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Name ID Mapping service
# When an entity that shares an identifier for a principal with an identity
# provider wishes to obtain a name identifier for the same principal in a
# particular format or federation namespace, it can send a request to
# the identity provider using this protocol.
# ----------------------------------------------------------------------------
class NIM(Service):
def do(self, query, binding, relay_state=""):
req = IDP.parse_name_id_mapping_request(query, binding)
request = req.message
# Do the necessary stuff
try:
name_id = IDP.ident.handle_name_id_mapping_request(
request.name_id, request.name_id_policy)
except Unknown:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
except PolicyError:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
info = IDP.response_args(request)
_resp = IDP.create_name_id_mapping_response(name_id, **info)
# Only SOAP
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Cookie handling
# ----------------------------------------------------------------------------
def info_from_cookie(kaka):
logger.debug("KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get("idpauthn", None)
if morsel:
try:
key, ref = base64.b64decode(morsel.value).split(":")
return IDP.cache.uid2user[key], ref
except (KeyError, TypeError):
return None, None
else:
logger.debug("No idpauthn cookie")
return None, None
def delete_cookie(environ, name):
kaka = environ.get("HTTP_COOKIE", '')
logger.debug("delete KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(name, None)
cookie = SimpleCookie()
cookie[name] = ""
cookie[name]['path'] = "/"
logger.debug("Expire: %s" % morsel)
cookie[name]["expires"] = _expiration("dawn")
return tuple(cookie.output().split(": ", 1))
return None
def set_cookie(name, _, *args):
cookie = SimpleCookie()
cookie[name] = base64.b64encode(":".join(args))
cookie[name]['path'] = "/"
cookie[name]["expires"] = _expiration(5) # 5 minutes from now
logger.debug("Cookie expires: %s" % cookie[name]["expires"])
return tuple(cookie.output().split(": ", 1))
# ----------------------------------------------------------------------------
# map urls to functions
AUTHN_URLS = [
# sso
(r'sso/post$', (SSO, "post")),
(r'sso/post/(.*)$', (SSO, "post")),
(r'sso/redirect$', (SSO, "redirect")),
(r'sso/redirect/(.*)$', (SSO, "redirect")),
(r'sso/art$', (SSO, "artifact")),
(r'sso/art/(.*)$', (SSO, "artifact")),
# slo
(r'slo/redirect$', (SLO, "redirect")),
(r'slo/redirect/(.*)$', (SLO, "redirect")),
(r'slo/post$', (SLO, "post")),
(r'slo/post/(.*)$', (SLO, "post")),
(r'slo/soap$', (SLO, "soap")),
(r'slo/soap/(.*)$', (SLO, "soap")),
#
(r'airs$', (AIDR, "uri")),
(r'ars$', (ARS, "soap")),
# mni
(r'mni/post$', (NMI, "post")),
(r'mni/post/(.*)$', (NMI, "post")),
(r'mni/redirect$', (NMI, "redirect")),
(r'mni/redirect/(.*)$', (NMI, "redirect")),
(r'mni/art$', (NMI, "artifact")),
(r'mni/art/(.*)$', (NMI, "artifact")),
(r'mni/soap$', (NMI, "soap")),
(r'mni/soap/(.*)$', (NMI, "soap")),
# nim
(r'nim$', (NIM, "soap")),
(r'nim/(.*)$', (NIM, "soap")),
#
(r'aqs$', (AQS, "soap")),
(r'attr$', (ATTR, "soap"))
]
NON_AUTHN_URLS = [
#(r'login?(.*)$', do_authentication),
(r'verify?(.*)$', do_verify),
(r'sso/ecp$', (SSO, "ecp")),
]
# ----------------------------------------------------------------------------
def metadata(environ, start_response):
try:
path = args.path
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath( __file__ ))
if path[-1] != "/":
path += "/"
metadata = create_metadata_string(path+args.config, IDP.config,
args.valid, args.cert, args.keyfile,
args.id, args.name, args.sign)
start_response('200 OK', [('Content-Type', "text/xml")])
return metadata
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def staticfile(environ, start_response):
try:
path = args.path
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath(__file__))
if path[-1] != "/":
path += "/"
path += environ.get('PATH_INFO', '').lstrip('/')
start_response('200 OK', [('Content-Type', "text/xml")])
return open(path, 'r').read()
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
if path == "metadata":
return metadata(environ, start_response)
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s" % path)
if kaka:
logger.info("= KAKA =")
user, authn_ref = info_from_cookie(kaka)
environ["idp.authn_ref"] = authn_ref
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s" % query)
user = IDP.cache.uid2user[query["id"][0]]
except KeyError:
user = None
url_patterns = AUTHN_URLS
if not user:
logger.info("-- No USER --")
# insert NON_AUTHN_URLS first in case there is no user
url_patterns = NON_AUTHN_URLS + url_patterns
for regex, callback in url_patterns:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except IndexError:
environ['myapp.url_args'] = path
logger.debug("Callback: %s" % (callback,))
if isinstance(callback, tuple):
cls = callback[0](environ, start_response, user)
func = getattr(cls, callback[1])
return func()
return callback(environ, start_response, user)
if re.search(r'static/.*', path) is not None:
return staticfile(environ, start_response)
return not_found(environ, start_response)
# ----------------------------------------------------------------------------
# allow uwsgi or gunicorn mount
# by moving some initialization out of __name__ == '__main__' section.
# uwsgi -s 0.0.0.0:8088 --protocol http --callable application --module idp
args = type('Config', (object,), { })
args.config = 'idp_conf'
args.mako_root = './'
args.path = None
import socket
from idp_user import USERS
from idp_user import EXTRA
from mako.lookup import TemplateLookup
AUTHN_BROKER = AuthnBroker()
AUTHN_BROKER.add(authn_context_class_ref(PASSWORD),
username_password_authn, 10,
"http://%s" % socket.gethostname())
AUTHN_BROKER.add(authn_context_class_ref(UNSPECIFIED),
"", 0, "http://%s" % socket.gethostname())
IDP = server.Server(args.config, cache=Cache())
IDP.ticket = {}
# ----------------------------------------------------------------------------
if __name__ == '__main__':
from wsgiref.simple_server import make_server
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='path', help='Path to configuration file.')
parser.add_argument('-v', dest='valid',
help="How long, in days, the metadata is valid from the time of creation")
parser.add_argument('-c', dest='cert', help='certificate')
parser.add_argument('-i', dest='id',
help="The ID of the entities descriptor")
parser.add_argument('-k', dest='keyfile',
help="A file with a key to sign the metadata with")
parser.add_argument('-n', dest='name')
parser.add_argument('-s', dest='sign', action='store_true',
help="sign the metadata")
parser.add_argument('-m', dest='mako_root', default="./")
parser.add_argument(dest="config")
args = parser.parse_args()
_rot = args.mako_root
LOOKUP = TemplateLookup(directories=[_rot + 'templates', _rot + 'htdocs'],
module_directory=_rot + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
HOST = '127.0.0.1'
PORT = 8088
SRV = make_server(HOST, PORT, application)
print "IdP listening on %s:%s" % (HOST, PORT)
SRV.serve_forever()
else:
_rot = args.mako_root
LOOKUP = TemplateLookup(directories=[_rot + 'templates', _rot + 'htdocs'],
module_directory=_rot + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
|
apache-2.0
|
aisipos/django
|
tests/gis_tests/maps/tests.py
|
322
|
2099
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.test import SimpleTestCase
from django.test.utils import modify_settings, override_settings
from django.utils.encoding import force_text
GOOGLE_MAPS_API_KEY = 'XXXX'
@skipUnless(HAS_GEOS, 'Geos is required.')
@modify_settings(
INSTALLED_APPS={'append': 'django.contrib.gis'},
)
class GoogleMapsTest(SimpleTestCase):
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_google_map_scripts(self):
"""
Testing GoogleMap.scripts() output. See #20773.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap
google_map = GoogleMap()
scripts = google_map.scripts
self.assertIn(GOOGLE_MAPS_API_KEY, scripts)
self.assertIn("new GMap2", scripts)
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_unicode_in_google_maps(self):
"""
Test that GoogleMap doesn't crash with non-ASCII content.
"""
from django.contrib.gis.geos import Point
from django.contrib.gis.maps.google.gmap import GoogleMap, GMarker
center = Point(6.146805, 46.227574)
marker = GMarker(center,
title='En français !')
google_map = GoogleMap(center=center, zoom=18, markers=[marker])
self.assertIn("En français", google_map.scripts)
def test_gevent_html_safe(self):
from django.contrib.gis.maps.google.overlays import GEvent
event = GEvent('click', 'function() {location.href = "http://www.google.com"}')
self.assertTrue(hasattr(GEvent, '__html__'))
self.assertEqual(force_text(event), event.__html__())
def test_goverlay_html_safe(self):
from django.contrib.gis.maps.google.overlays import GOverlayBase
overlay = GOverlayBase()
overlay.js_params = '"foo", "bar"'
self.assertTrue(hasattr(GOverlayBase, '__html__'))
self.assertEqual(force_text(overlay), overlay.__html__())
|
bsd-3-clause
|
horacio3/troposphere
|
tests/test_ec2.py
|
3
|
1571
|
import unittest
import troposphere.ec2 as ec2
class TestEC2(unittest.TestCase):
def test_securitygroupegress(self):
egress = ec2.SecurityGroupEgress(
'egress',
ToPort='80',
FromPort='80',
IpProtocol="tcp",
GroupId="id",
CidrIp="0.0.0.0/0",
)
egress.to_dict()
egress = ec2.SecurityGroupEgress(
'egress',
ToPort='80',
FromPort='80',
IpProtocol="tcp",
GroupId="id",
DestinationPrefixListId='id',
)
egress.to_dict()
egress = ec2.SecurityGroupEgress(
'egress',
ToPort='80',
FromPort='80',
IpProtocol="tcp",
GroupId="id",
DestinationSecurityGroupId='id',
)
egress.to_dict()
egress = ec2.SecurityGroupEgress(
'egress',
ToPort='80',
FromPort='80',
IpProtocol="tcp",
GroupId="id",
CidrIp="0.0.0.0/0",
DestinationPrefixListId='id',
)
with self.assertRaises(ValueError):
egress.to_dict()
egress = ec2.SecurityGroupEgress(
'egress',
ToPort='80',
FromPort='80',
IpProtocol="tcp",
GroupId="id",
CidrIp="0.0.0.0/0",
DestinationPrefixListId='id',
DestinationSecurityGroupId='id',
)
with self.assertRaises(ValueError):
egress.to_dict()
|
bsd-2-clause
|
theblacklion/pyglet
|
experimental/buffer/bars.py
|
28
|
2673
|
#!/usr/bin/python
# $Id:$
import random
import sys
from pyglet.gl import *
from pyglet import clock
from pyglet import font
from pyglet import graphics
from pyglet import window
BARS = 100
if len(sys.argv) > 1:
BARS = int(sys.argv[1])
MIN_BAR_LENGTH = 4
MAX_BAR_LENGTH = 100
BAR_SEGMENT_HEIGHT = 10
UPDATE_PERIOD = 0.01
win = window.Window(vsync=False)
batch = graphics.Batch()
bars = []
colors = [
[170, 0, 0],
[0, 255, 100],
[80, 100, 255],
[40, 180, 180],
[200, 255, 100],
[255, 70, 200],
]
def create_bars():
width = win.width / float(BARS)
for i in range(BARS):
position = [i * width, 0, # degenerate
i * width, 0,
(i + 1) * width, 0,
(i + 1) * width, 0 # degenerate
]
color = colors[i % len(colors)] * 4
bar = batch.add(4, GL_TRIANGLE_STRIP, None,
('v2f/dynamic', position),
('c3B/dynamic', color))
bars.append(bar)
def update_bars():
for bar in bars:
old_length = bar.count
length = random.randint(MIN_BAR_LENGTH, MAX_BAR_LENGTH)
bar.resize(length)
vertices = bar.vertices
# Update new vertices (overwrite old degenerate)
for i in range((old_length - 1) * 2, length * 2):
if i & 1: # y
vertices[i] = BAR_SEGMENT_HEIGHT * (i // 4)
else: # x
vertices[i] = vertices[i - 4]
# Update top degenerate (first degenerate is never modified)
vertices[-2:] = vertices[-4:-2]
# Update colors
if length > old_length:
bar.colors[old_length*3:length*3] = \
bar.colors[:3] * (length - old_length)
stats_text = font.Text(font.load('', 12, bold=True), '',
x=win.width, y=0,
halign='right')
def update_stats(dt):
np = len(bars)
usage = bars[0].domain.allocator.get_usage()
fragmentation = bars[0].domain.allocator.get_fragmentation()
blocks = len(bars[0].domain.allocator.starts)
stats_text.text = \
'Bars: %d Blocks: %d Usage: %d%% Fragmentation: %d%%' % \
(np, blocks, usage * 100, fragmentation * 100)
clock.schedule_interval(update_stats, 1)
fps_text = clock.ClockDisplay(color=(1, 1, 1, 1))
create_bars()
update_time = 0.
while not win.has_exit:
win.dispatch_events()
dt = clock.tick()
dt = min(dt, 0.05)
update_time += dt
if update_time > UPDATE_PERIOD:
update_bars()
update_time -= UPDATE_PERIOD
win.clear()
batch.draw()
stats_text.draw()
fps_text.draw()
win.flip()
|
bsd-3-clause
|
MTASZTAKI/ApertusVR
|
plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/examples/dumpLog.py
|
4
|
1702
|
import time
import math
from datetime import datetime
import struct
import sys
import os, fnmatch
import argparse
from time import sleep
def readLogFile(filename, verbose=True):
f = open(filename, 'rb')
print('Opened'),
print(filename)
keys = f.readline().decode('utf8').rstrip('\n').split(',')
fmt = f.readline().decode('utf8').rstrip('\n')
# The byte number of one record
sz = struct.calcsize(fmt)
# The type number of one record
ncols = len(fmt)
if verbose:
print('Keys:'),
print(keys)
print('Format:'),
print(fmt)
print('Size:'),
print(sz)
print('Columns:'),
print(ncols)
lenChunk = sz
log = list()
chunkIndex = 0
while (lenChunk):
check = f.read(2)
lenChunk = 0
if (check == b'\xaa\xbb'):
mychunk = f.read(sz)
lenChunk = len(mychunk)
chunks = [mychunk]
if verbose:
print("num chunks:")
print(len(chunks))
for chunk in chunks:
print("len(chunk)=", len(chunk), " sz = ", sz)
if len(chunk) == sz:
print("chunk #", chunkIndex)
chunkIndex = chunkIndex + 1
values = struct.unpack(fmt, chunk)
record = list()
for i in range(ncols):
record.append(values[i])
if verbose:
print(" ", keys[i], "=", values[i])
log.append(record)
else:
print("Error, expected aabb terminal")
return log
numArgs = len(sys.argv)
print('Number of arguments:', numArgs, 'arguments.')
print('Argument List:', str(sys.argv))
fileName = "log.bin"
if (numArgs > 1):
fileName = sys.argv[1]
print("filename=")
print(fileName)
verbose = True
readLogFile(fileName, verbose)
|
mit
|
ryfeus/lambda-packs
|
Spacy/source2.7/spacy/lang/nl/tag_map.py
|
3
|
49792
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import POS, PUNCT, ADJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import NOUN, PROPN, SPACE, PRON, CONJ
TAG_MAP = {
"ADJ__Number=Sing": {POS: ADJ},
"ADJ___": {POS: ADJ},
"ADP__AdpType=Prep": {POS: ADP},
"ADP__AdpType=Preppron|Gender=Fem|Number=Sing": {POS: ADP},
"ADP__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: ADP},
"ADP__AdpType=Preppron|Gender=Masc|Number=Sing": {POS: ADP},
"ADV__Number=Sing": {POS: ADV},
"ADV__PunctType=Comm": {POS: ADV},
"ADV___": {POS: ADV},
"Adj_Adj_N_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_Adj_N__Degree=Pos|Number=Plur|Variant=Short": {POS: ADJ},
"Adj_Adj_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_Adj__Case=Nom|Degree=Pos": {POS: ADJ},
"Adj_Adj__Degree=Pos": {POS: ADJ},
"Adj_Adj__Degree=Pos|Variant=Short": {POS: ADJ},
"Adj_Adv__Degree=Pos|Variant=Short": {POS: ADJ},
"Adj_Adv|adv|stell|onverv_deelv__Degree=Pos|Variant=Short": {POS: ADJ},
"Adj_Art__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_Art__Degree=Pos|Number=Sing|Variant=Short": {POS: ADJ},
"Adj_Conj_V__Degree=Pos|Mood=Sub|VerbForm=Fin": {POS: ADJ},
"Adj_Int|attr|stell|vervneut__Case=Nom|Degree=Pos": {POS: ADJ},
"Adj_Misc_Misc__Degree=Pos": {POS: ADJ},
"Adj_N_Conj_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_N_N_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_N_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_Num__Definite=Def|Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_Prep_Art_Adj_N__Degree=Pos|Gender=Neut|Number=Sing": {POS: ADJ},
"Adj_N_Prep_N_Conj_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_Prep_N_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_Prep_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N_Punc__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N__Degree=Pos|Number=Plur": {POS: ADJ},
"Adj_N__Degree=Pos|Number=Sing": {POS: ADJ},
"Adj_N__Degree=Pos|Number=Sing|Variant=Short": {POS: ADJ},
"Adj_Num__Definite=Def|Degree=Pos": {POS: ADJ},
"Adj_Num__Definite=Def|Degree=Pos|Variant=Short": {POS: ADJ},
"Adj_Prep|adv|stell|vervneut_voor__Degree=Pos|Variant=Short": {POS: ADJ},
"Adj_Prep|adv|vergr|onverv_voor__Degree=Cmp|Variant=Short": {POS: ADJ},
"Adj_V_Conj_V__Degree=Pos|VerbForm=Inf": {POS: ADJ},
"Adj_V_N__Degree=Pos|Number=Sing|Tense=Past|VerbForm=Part": {POS: ADJ},
"Adj_V|adv|stell|onverv_intrans|inf__Degree=Pos|Variant=Short|VerbForm=Inf": {POS: ADJ},
"Adj_V|adv|stell|onverv_trans|imp__Degree=Pos|Mood=Imp|Variant=Short|VerbForm=Fin": {POS: ADJ},
"Adj|adv|stell|onverv__Degree=Pos|Variant=Short": {POS: ADJ},
"Adj|adv|stell|vervneut__Case=Nom|Degree=Pos|Variant=Short": {POS: ADJ},
"Adj|adv|vergr|onverv__Degree=Cmp|Variant=Short": {POS: ADJ},
"Adj|adv|vergr|vervneut__Case=Nom|Degree=Cmp|Variant=Short": {POS: ADJ},
"Adj|attr|overtr|onverv__Degree=Sup": {POS: ADJ},
"Adj|attr|overtr|vervneut__Case=Nom|Degree=Sup": {POS: ADJ},
"Adj|attr|stell|onverv__Degree=Pos": {POS: ADJ},
"Adj|attr|stell|vervgen__Case=Gen|Degree=Pos": {POS: ADJ},
"Adj|attr|stell|vervneut__Case=Nom|Degree=Pos": {POS: ADJ},
"Adj|attr|vergr|onverv__Degree=Cmp": {POS: ADJ},
"Adj|attr|vergr|vervgen__Case=Gen|Degree=Cmp": {POS: ADJ},
"Adj|attr|vergr|vervneut__Case=Nom|Degree=Cmp": {POS: ADJ},
"Adj|zelfst|overtr|vervneut__Case=Nom|Degree=Sup": {POS: ADJ},
"Adj|zelfst|stell|onverv__Degree=Pos": {POS: ADJ},
"Adj|zelfst|stell|vervmv__Degree=Pos|Number=Plur": {POS: ADJ},
"Adj|zelfst|stell|vervneut__Case=Nom|Degree=Pos": {POS: ADJ},
"Adj|zelfst|vergr|vervneut__Case=Nom|Degree=Cmp": {POS: ADJ},
"Adv_Adj_Conj__Degree=Pos": {POS: ADV},
"Adv_Adj__Degree=Cmp": {POS: ADV},
"Adv_Adj__Degree=Pos": {POS: ADV},
"Adv_Adv_Conj_Adv__PronType=Dem": {POS: ADV},
"Adv_Adv__AdpType=Prep": {POS: ADV},
"Adv_Adv__Degree=Pos": {POS: ADV},
"Adv_Adv__Degree=Pos|PronType=Dem": {POS: ADV},
"Adv_Adv|pron|vrag_deeladv___": {POS: ADV},
"Adv_Art__Degree=Pos|Number=Sing": {POS: ADV},
"Adv_Art__Number=Sing": {POS: ADV},
"Adv_Conj_Adv__AdpType=Preppron|Gender=Masc|Number=Sing": {POS: ADV},
"Adv_Conj_Adv__Degree=Pos": {POS: ADV},
"Adv_Conj_Adv|gew|aanw_neven_gew|aanw__PronType=Dem": {POS: ADV},
"Adv_Conj_Adv|gew|onbep_neven_gew|onbep__PronType=Ind": {POS: ADV},
"Adv_Conj_N__Degree=Pos|Number=Sing": {POS: ADV},
"Adv_Conj__Degree=Pos": {POS: ADV},
"Adv_N__Degree=Pos|Number=Sing": {POS: ADV},
"Adv_Num__Degree=Cmp|PronType=Ind": {POS: ADV},
"Adv_N|gew|aanw_soort|ev|neut__Number=Sing": {POS: ADV},
"Adv_Prep_N__Case=Dat|Degree=Pos|Number=Sing": {POS: ADV},
"Adv_Prep_Pron__AdpType=Preppron|Gender=Masc|Number=Sing": {POS: ADV},
"Adv_Prep__Degree=Pos": {POS: ADV},
"Adv_Prep|gew|aanw_voor__AdpType=Prep": {POS: ADV},
"Adv_Prep|gew|aanw_voor___": {POS: ADV},
"Adv_Pron__Degree=Pos": {POS: ADV},
"Adv|deeladv__PartType=Vbp": {POS: ADV},
"Adv|deelv__PartType=Vbp": {POS: ADV},
"Adv|gew|aanw__PronType=Dem": {POS: ADV},
"Adv|gew|betr__PronType=Rel": {POS: ADV},
"Adv|gew|er__AdvType=Ex": {POS: ADV},
"Adv|gew|geenfunc|overtr|onverv__Degree=Sup": {POS: ADV},
"Adv|gew|geenfunc|stell|onverv__Degree=Pos": {POS: ADV},
"Adv|gew|geenfunc|vergr|onverv__Degree=Cmp": {POS: ADV},
"Adv|gew|onbep__PronType=Ind": {POS: ADV},
"Adv|gew|vrag__PronType=Int": {POS: ADV},
"Adv|pron|aanw__PronType=Dem": {POS: ADV},
"Adv|pron|betr__PronType=Rel": {POS: ADV},
"Adv|pron|er__AdvType=Ex": {POS: ADV},
"Adv|pron|onbep__PronType=Ind": {POS: ADV},
"Adv|pron|vrag__PronType=Int": {POS: ADV},
"Art_Adj_N__AdpType=Prep": {POS: DET},
"Art_Adj_N__Definite=Def|Degree=Sup|Gender=Neut|Number=Sing": {POS: DET},
"Art_Adj__Case=Nom|Definite=Def|Degree=Cmp|Gender=Neut": {POS: DET},
"Art_Adj__Case=Nom|Definite=Def|Degree=Sup|Gender=Neut": {POS: DET},
"Art_Adj__Definite=Def|Degree=Cmp|Gender=Neut": {POS: DET},
"Art_Adj__Definite=Def|Degree=Sup|Gender=Neut": {POS: DET},
"Art_Adv__Definite=Def|Degree=Sup|Gender=Neut": {POS: DET},
"Art_Conj_Pron__Number=Sing|PronType=Ind": {POS: DET},
"Art_N_Conj_Art_N__Definite=Def|Gender=Neut|Number=Sing": {POS: DET},
"Art_N_Conj_Art_V__AdpType=Prep": {POS: DET},
"Art_N_Conj_Pron_N__Definite=Def|Gender=Neut|Number=Plur|Person=3": {POS: DET},
"Art_N_Conj__Number=Sing|PronType=Ind": {POS: DET},
"Art_N_N__AdpType=Prep": {POS: DET},
"Art_N_Prep_Adj__Degree=Pos|Number=Sing|PronType=Ind": {POS: DET},
"Art_N_Prep_Art_N__Number=Sing|PronType=Ind": {POS: DET},
"Art_N_Prep_N__AdpType=Prep": {POS: DET},
"Art_N_Prep_N__Definite=Def|Gender=Neut|Number=Sing": {POS: DET},
"Art_N_Prep_N__Number=Sing|PronType=Ind": {POS: DET},
"Art_N_Prep_Pron_N__AdpType=Prep": {POS: DET},
"Art_N__AdpType=Prep": {POS: DET},
"Art_N__Case=Gen|Definite=Def|Number=Sing": {POS: DET},
"Art_N__Number=Sing|PronType=Ind": {POS: DET},
"Art_Num_Art_Adj__AdpType=Prep": {POS: DET},
"Art_Num_N__AdpType=Prep": {POS: DET},
"Art_Num__Definite=Def|Degree=Sup|Gender=Neut|PronType=Ind": {POS: DET},
"Art_Num__Definite=Def|Gender=Neut": {POS: DET},
"Art_Num__Degree=Pos|Number=Sing|PronType=Ind": {POS: DET},
"Art_N|bep|onzijd|neut_eigen|ev|neut__Definite=Def|Gender=Neut|Number=Sing": {POS: DET},
"Art_N|bep|onzijd|neut_soort|ev|neut__Definite=Def|Gender=Neut|Number=Sing": {POS: DET},
"Art_Pron_N__Case=Gen|Number=Plur|PronType=Ind": {POS: DET},
"Art_Pron__Number=Sing|PronType=Ind": {POS: DET},
"Art_V_N__AdpType=Prep": {POS: DET},
"Art|bep|onzijd|neut__Definite=Def|Gender=Neut|PronType=Art": {POS: DET},
"Art|bep|zijdofmv|gen__Case=Gen|Definite=Def|PronType=Art": {POS: DET},
"Art|bep|zijdofmv|neut__Definite=Def|PronType=Art": {POS: DET},
"Art|bep|zijdofonzijd|gen__Case=Gen|Definite=Def|Number=Sing|PronType=Art": {POS: DET},
"Art|bep|zijd|dat__Case=Dat|Definite=Def|Gender=Com|PronType=Art": {POS: DET},
"Art|onbep|zijdofonzijd|neut__Definite=Ind|Number=Sing|PronType=Art": {POS: DET},
"CCONJ___": {POS: CONJ},
"Conj_Adj|neven_adv|vergr|onverv__Degree=Cmp": {POS: CONJ},
"Conj_Adj|neven_attr|stell|onverv__Degree=Pos": {POS: CONJ},
"Conj_Adv_Adv__Degree=Pos": {POS: CONJ},
"Conj_Adv__AdpType=Prep": {POS: CONJ},
"Conj_Adv__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj_Adv__Degree=Pos": {POS: CONJ},
"Conj_Adv|neven_gew|aanw__PronType=Dem": {POS: CONJ},
"Conj_Art_N__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj_Art_N__Gender=Neut|Number=Sing": {POS: CONJ},
"Conj_Conj|neven_onder|metfin___": {POS: CONJ},
"Conj_Int|neven___": {POS: CONJ},
"Conj_Int|onder|metfin___": {POS: CONJ},
"Conj_N_Adv__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj_N_Prep__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj_N|onder|metfin_soort|ev|neut__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj_Pron_Adv__Degree=Pos|Number=Sing|Person=3": {POS: CONJ},
"Conj_Pron_V__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj_Pron|neven_aanw|neut|zelfst__AdpType=Prep": {POS: CONJ},
"Conj_Punc_Conj|neven_schuinstreep_neven__AdpType=Prep": {POS: CONJ},
"Conj_V|onder|metfin_intrans|ott|3|ev__AdpType=Preppron|Gender=Masc|Number=Plur": {POS: CONJ},
"Conj|neven___": {POS: CONJ},
"Conj|onder|metfin___": {POS: CONJ},
"Conj|onder|metinf___": {POS: CONJ},
"DET__Degree=Cmp|NumType=Card|PronType=Ind": {POS: DET},
"DET__Gender=Fem|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {POS: DET},
"DET__Gender=Fem|Number=Sing|PronType=Art": {POS: DET},
"DET__Gender=Masc|Number=Plur|PronType=Art": {POS: DET},
"DET__Gender=Masc|Number=Sing|PronType=Tot": {POS: DET},
"Int_Adv|gew|aanw___": {POS: X},
"Int_Int__NumType=Card": {POS: X},
"Int_Int___": {POS: X},
"Int_N_N_Misc_N___": {POS: X},
"Int_N_Punc_Int_N__Number=Sing": {POS: X},
"Int_Punc_Int|komma__PunctType=Comm": {POS: X},
"Int___": {POS: X},
"Misc_Misc_Misc_Misc_Misc_Misc_Misc_Misc_Misc___": {POS: X},
"Misc_Misc_Misc_Misc_Misc_Misc_Misc___": {POS: X},
"Misc_Misc_Misc_Misc_Misc_Misc_Punc_Misc_Misc_Misc___": {POS: X},
"Misc_Misc_Misc_Misc_Misc_Misc___": {POS: X},
"Misc_Misc_Misc_Misc_Misc_N_Misc_Misc_Misc_Misc_Misc_Misc___": {POS: X},
"Misc_Misc_Misc_Misc|vreemd_vreemd_vreemd_vreemd__AdpType=Preppron|Gender=Masc|Number=Sing": {POS: X},
"Misc_Misc_Misc_Misc|vreemd_vreemd_vreemd_vreemd___": {POS: X},
"Misc_Misc_Misc_N__Number=Sing": {POS: X},
"Misc_Misc_Misc|vreemd_vreemd_vreemd___": {POS: X},
"Misc_Misc_N_N__Number=Sing": {POS: X},
"Misc_Misc_N|vreemd_vreemd_soort|mv|neut__Number=Plur": {POS: X},
"Misc_Misc_Punc_N_N__Number=Sing": {POS: X},
"Misc_Misc|vreemd_vreemd__AdpType=Prep": {POS: X},
"Misc_Misc|vreemd_vreemd__NumType=Card": {POS: X},
"Misc_Misc|vreemd_vreemd___": {POS: X},
"Misc_N_Misc_Misc__Number=Sing": {POS: X},
"Misc_N_N__Number=Sing": {POS: X},
"Misc_N|vreemd_eigen|ev|neut__Number=Sing": {POS: X},
"Misc_N|vreemd_soort|ev|neut__Number=Sing": {POS: X},
"Misc|vreemd__Foreign=Yes": {POS: X},
"NUM__Case=Nom|Definite=Def|Degree=Pos|NumType=Card": {POS: NUM},
"NUM__Definite=Def|Degree=Pos|NumType=Card": {POS: NUM},
"NUM__Definite=Def|Degree=Pos|Number=Sing|NumType=Card": {POS: NUM},
"NUM__Definite=Def|NumType=Card": {POS: NUM},
"NUM__Definite=Def|Number=Plur|NumType=Card": {POS: NUM},
"NUM__Definite=Def|Number=Sing|NumType=Card": {POS: NUM},
"NUM__NumForm=Digit|NumType=Card": {POS: NUM},
"NUM__NumType=Card": {POS: NUM},
"N_Adj_N_Num__Definite=Def|Degree=Pos|Number=Sing": {POS: NOUN},
"N_Adj_N__Degree=Pos|Number=Plur": {POS: NOUN},
"N_Adj_N___": {POS: NOUN},
"N_Adj__AdpType=Prep": {POS: NOUN},
"N_Adj__Case=Nom|Degree=Pos|Number=Plur": {POS: NOUN},
"N_Adj__Case=Nom|Degree=Pos|Number=Sing": {POS: NOUN},
"N_Adj__Degree=Pos|Number=Plur": {POS: NOUN},
"N_Adj__Degree=Pos|Number=Sing": {POS: NOUN},
"N_Adj___": {POS: NOUN},
"N_Adv_Punc_V_Pron_V__Aspect=Imp|Degree=Pos|Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Inf": {POS: NOUN},
"N_Adv__Degree=Pos|Number=Sing": {POS: NOUN},
"N_Adv___": {POS: NOUN},
"N_Adv|soort|ev|neut_deelv__Number=Sing": {POS: NOUN},
"N_Art_Adj_Prep_N___": {POS: NOUN},
"N_Art_N__Case=Gen|Number=Sing": {POS: NOUN},
"N_Art_N__Number=Plur": {POS: NOUN},
"N_Art_N__Number=Sing": {POS: NOUN},
"N_Art_N___": {POS: NOUN},
"N_Conj_Adv__Degree=Pos|Number=Sing": {POS: NOUN},
"N_Conj_Art_N___": {POS: NOUN},
"N_Conj_N_N__Number=Sing": {POS: NOUN},
"N_Conj_N_N___": {POS: NOUN},
"N_Conj_N__Number=Plur": {POS: NOUN},
"N_Conj_N__Number=Sing": {POS: NOUN},
"N_Conj_N___": {POS: NOUN},
"N_Conj|soort|ev|neut_neven__Number=Sing": {POS: NOUN},
"N_Int_N|eigen|ev|neut_eigen|ev|neut___": {POS: NOUN},
"N_Misc_Misc_Misc_Misc___": {POS: NOUN},
"N_Misc_Misc_N___": {POS: NOUN},
"N_Misc_Misc|eigen|ev|neut_vreemd_vreemd___": {POS: NOUN},
"N_Misc_Misc|soort|mv|neut_vreemd_vreemd__Number=Plur": {POS: NOUN},
"N_Misc_N_N_N_N___": {POS: NOUN},
"N_Misc_N_N___": {POS: NOUN},
"N_Misc_N___": {POS: NOUN},
"N_Misc_Num___": {POS: NOUN},
"N_Misc|eigen|ev|neut_vreemd___": {POS: NOUN},
"N_Misc|soort|ev|neut_vreemd__Number=Sing": {POS: NOUN},
"N_N_Adj_Art_N_N__Gender=Masc|Number=Plur|PronType=Art": {POS: NOUN},
"N_N_Adj_N___": {POS: NOUN},
"N_N_Adj__Degree=Pos|Number=Sing": {POS: NOUN},
"N_N_Adj___": {POS: NOUN},
"N_N_Art_Adv___": {POS: NOUN},
"N_N_Art_N___": {POS: NOUN},
"N_N_Conj_N_N_N_N_N___": {POS: NOUN},
"N_N_Conj_N_N___": {POS: NOUN},
"N_N_Conj_N__Number=Sing": {POS: NOUN},
"N_N_Conj_N___": {POS: NOUN},
"N_N_Conj___": {POS: NOUN},
"N_N_Int_N_N___": {POS: NOUN},
"N_N_Misc___": {POS: NOUN},
"N_N_N_Adj_N___": {POS: NOUN},
"N_N_N_Adv___": {POS: NOUN},
"N_N_N_Int__AdpType=Prep": {POS: NOUN},
"N_N_N_Misc___": {POS: NOUN},
"N_N_N_N_Conj_N___": {POS: NOUN},
"N_N_N_N_Misc___": {POS: NOUN},
"N_N_N_N_N_N_Int__AdpType=Prep": {POS: NOUN},
"N_N_N_N_N_N_N__AdpType=Prep": {POS: NOUN},
"N_N_N_N_N_N_N__Gender=Fem|Number=Sing|PronType=Art": {POS: NOUN},
"N_N_N_N_N_N_N___": {POS: NOUN},
"N_N_N_N_N_N_Prep_N___": {POS: NOUN},
"N_N_N_N_N_N__AdpType=Prep": {POS: NOUN},
"N_N_N_N_N_N___": {POS: NOUN},
"N_N_N_N_N_Prep_N___": {POS: NOUN},
"N_N_N_N_N__AdpType=Prep": {POS: NOUN},
"N_N_N_N_N__Number=Sing": {POS: NOUN},
"N_N_N_N_N___": {POS: NOUN},
"N_N_N_N_Prep_N___": {POS: NOUN},
"N_N_N_N_Punc_N_Punc___": {POS: NOUN},
"N_N_N_N_V___": {POS: NOUN},
"N_N_N_N__Gender=Fem|Number=Plur|PronType=Art": {POS: NOUN},
"N_N_N_N__Gender=Fem|Number=Sing|PronType=Art": {POS: NOUN},
"N_N_N_N__NumType=Card": {POS: NOUN},
"N_N_N_N__Number=Plur": {POS: NOUN},
"N_N_N_N__Number=Sing": {POS: NOUN},
"N_N_N_N___": {POS: NOUN},
"N_N_N_Prep_Art_Adj_N___": {POS: NOUN},
"N_N_N_Prep_N_N___": {POS: NOUN},
"N_N_N_Prep_N___": {POS: NOUN},
"N_N_N_Punc_N___": {POS: NOUN},
"N_N_N_Punc___": {POS: NOUN},
"N_N_N__AdpType=Prep": {POS: NOUN},
"N_N_N__Gender=Fem|Number=Sing|PronType=Art": {POS: NOUN},
"N_N_N__Gender=Masc|Number=Plur|PronType=Art": {POS: NOUN},
"N_N_N__Number=Plur": {POS: NOUN},
"N_N_N__Number=Sing": {POS: NOUN},
"N_N_N___": {POS: NOUN},
"N_N_Num_N___": {POS: NOUN},
"N_N_Num__Definite=Def|Number=Sing": {POS: NOUN},
"N_N_Num___": {POS: NOUN},
"N_N_Prep_Art_Adj_N__Degree=Pos|Gender=Neut|Number=Sing": {POS: NOUN},
"N_N_Prep_Art_N_Prep_Art_N___": {POS: NOUN},
"N_N_Prep_Art_N___": {POS: NOUN},
"N_N_Prep_N_N__AdpType=Prep": {POS: NOUN},
"N_N_Prep_N_Prep_Adj_N___": {POS: NOUN},
"N_N_Prep_N__AdpType=Prep": {POS: NOUN},
"N_N_Prep_N__Number=Sing": {POS: NOUN},
"N_N_Prep_N___": {POS: NOUN},
"N_N_Punc_N_Punc___": {POS: NOUN},
"N_Num_N_N__Definite=Def|Number=Sing": {POS: NOUN},
"N_Num_N_Num___": {POS: NOUN},
"N_Num_N___": {POS: NOUN},
"N_Num_Num__Definite=Def|Number=Sing": {POS: NOUN},
"N_Num__Definite=Def|Number=Plur": {POS: NOUN},
"N_Num__Definite=Def|Number=Sing": {POS: NOUN},
"N_Num___": {POS: NOUN},
"N_N|eigen|ev|gen_eigen|ev|gen___": {POS: NOUN},
"N_N|eigen|ev|gen_eigen|ev|neut___": {POS: NOUN},
"N_N|eigen|ev|gen_soort|ev|neut___": {POS: NOUN},
"N_N|eigen|ev|gen_soort|mv|neut___": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|gen___": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__AdpType=Prep": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__AdpType=Preppron|Gender=Fem|Number=Plur": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__AdpType=Preppron|Gender=Masc|Number=Sing": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__Gender=Fem|Number=Plur|PronType=Art": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__Gender=Fem|Number=Sing|PronType=Art": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__Gender=Masc|Number=Plur|PronType=Art": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__Gender=Masc|Number=Sing|PronType=Art": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__NumType=Card": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut__Number=Sing": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|ev|neut___": {POS: NOUN},
"N_N|eigen|ev|neut_eigen|mv|neut___": {POS: NOUN},
"N_N|eigen|ev|neut_soort|ev|neut__AdpType=Prep": {POS: NOUN},
"N_N|eigen|ev|neut_soort|ev|neut___": {POS: NOUN},
"N_N|eigen|ev|neut_soort|mv|neut___": {POS: NOUN},
"N_N|eigen|mv|neut_eigen|mv|neut___": {POS: NOUN},
"N_N|soort|ev|neut_eigen|ev|neut__Number=Sing": {POS: NOUN},
"N_N|soort|ev|neut_soort|ev|neut__Gender=Masc|Number=Plur|PronType=Art": {POS: NOUN},
"N_N|soort|ev|neut_soort|ev|neut__NumForm=Digit|NumType=Card": {POS: NOUN},
"N_N|soort|ev|neut_soort|ev|neut__Number=Sing": {POS: NOUN},
"N_N|soort|ev|neut_soort|mv|neut__Number=Plur": {POS: NOUN},
"N_N|soort|mv|neut_eigen|ev|neut__Number=Sing": {POS: NOUN},
"N_N|soort|mv|neut_soort|ev|neut__Number=Sing": {POS: NOUN},
"N_N|soort|mv|neut_soort|mv|neut__Number=Plur": {POS: NOUN},
"N_Prep_Adj_Adj_N__Degree=Pos|Number=Plur": {POS: NOUN},
"N_Prep_Adj_N___": {POS: NOUN},
"N_Prep_Art_N_Art_N__Number=Plur": {POS: NOUN},
"N_Prep_Art_N_N__Number=Sing": {POS: NOUN},
"N_Prep_Art_N_Prep_Art_N__Gender=Neut|Number=Sing": {POS: NOUN},
"N_Prep_Art_N__Number=Plur": {POS: NOUN},
"N_Prep_Art_N__Number=Sing": {POS: NOUN},
"N_Prep_Art_N___": {POS: NOUN},
"N_Prep_N_Art_Adj___": {POS: NOUN},
"N_Prep_N_N__Number=Sing": {POS: NOUN},
"N_Prep_N_N___": {POS: NOUN},
"N_Prep_N_Prep_Art_N___": {POS: NOUN},
"N_Prep_N_Prep_N_Conj_N_Prep_Art_N_N__Number=Sing": {POS: NOUN},
"N_Prep_N_Punc_N_Conj_N__Number=Sing": {POS: NOUN},
"N_Prep_N__Number=Plur": {POS: NOUN},
"N_Prep_N__Number=Sing": {POS: NOUN},
"N_Prep_N___": {POS: NOUN},
"N_Prep_Num__Definite=Def|Number=Sing": {POS: NOUN},
"N_Prep_Pron_N___": {POS: NOUN},
"N_Prep|soort|ev|neut_voor__Number=Sing": {POS: NOUN},
"N_Pron___": {POS: NOUN},
"N_Punc_Adj_N___": {POS: NOUN},
"N_Punc_Adj_Pron_Punc__Degree=Pos|Number=Sing|Person=2": {POS: NOUN},
"N_Punc_Adv_V_Pron_N__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: NOUN},
"N_Punc_Misc_Punc_N___": {POS: NOUN},
"N_Punc_N_N_N_N__Number=Sing": {POS: NOUN},
"N_Punc_N_Punc_N__Number=Sing": {POS: NOUN},
"N_Punc_N_Punc__Number=Sing": {POS: NOUN},
"N_Punc_N__Number=Sing": {POS: NOUN},
"N_Punc_Punc_N_N_Punc_Punc_N___": {POS: NOUN},
"N_V_N_N___": {POS: NOUN},
"N_V_N___": {POS: NOUN},
"N_V__Aspect=Imp|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin": {POS: NOUN},
"N_V__Number=Sing|Tense=Past|VerbForm=Part": {POS: NOUN},
"N_V___": {POS: NOUN},
"N_V|eigen|ev|neut_trans|imp___": {POS: NOUN},
"N_V|soort|ev|neut_hulpofkopp|conj__Mood=Sub|Number=Sing|VerbForm=Fin": {POS: NOUN},
"N_V|soort|ev|neut_intrans|conj__Mood=Sub|Number=Sing|VerbForm=Fin": {POS: NOUN},
"Num_Adj_Adj_N___": {POS: NUM},
"Num_Adj_N___": {POS: NUM},
"Num_Adj__Definite=Def|Degree=Pos|NumType=Card": {POS: NUM},
"Num_Adj__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_Adj___": {POS: NUM},
"Num_Conj_Adj__Case=Nom|Definite=Def|Degree=Pos|NumType=Card": {POS: NUM},
"Num_Conj_Art_Adj__Definite=Def|Degree=Pos|Number=Sing|NumType=Card": {POS: NUM},
"Num_Conj_Num_N__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_Conj_Num__Degree=Cmp|NumType=Card|PronType=Ind": {POS: NUM},
"Num_N_N__Definite=Def|Number=Sing|NumType=Card": {POS: NUM},
"Num_N_Num_Num_N__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_N_Num__Definite=Def|Number=Sing|NumType=Card": {POS: NUM},
"Num_N_Num__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_N__Definite=Def|Number=Plur|NumType=Card": {POS: NUM},
"Num_N__Definite=Def|Number=Sing|NumType=Card": {POS: NUM},
"Num_N__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_N___": {POS: NUM},
"Num_Num_N__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_Num__Definite=Def|NumType=Card": {POS: NUM},
"Num_Num__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_Prep_Num__Definite=Def|NumType=Card": {POS: NUM},
"Num_Punc_Num_N_N__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_Punc_Num__NumForm=Digit|NumType=Card": {POS: NUM},
"Num_Punc__NumForm=Digit|NumType=Card": {POS: NUM},
"Num__Case=Nom|Degree=Cmp|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Case=Nom|Degree=Pos|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Case=Nom|Degree=Sup|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Degree=Cmp|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Degree=Pos|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Degree=Pos|Number=Plur|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Degree=Sup|NumType=Card|PronType=Ind": {POS: NUM},
"Num__Degree=Sup|Number=Plur|NumType=Card|PronType=Ind": {POS: NUM},
"Num|hoofd|bep|attr|onverv__Definite=Def|NumType=Card": {POS: NUM},
"Num|hoofd|bep|zelfst|onverv__Definite=Def|NumType=Card": {POS: NUM},
"Num|hoofd|bep|zelfst|vervmv__Definite=Def|Number=Plur|NumType=Card": {POS: NUM},
"Num|hoofd|onbep|attr|stell|onverv__Degree=Pos|NumType=Card|PronType=Ind": {POS: NUM},
"Num|hoofd|onbep|attr|vergr|onverv__Degree=Cmp|NumType=Card|PronType=Ind": {POS: NUM},
"Num|rang|bep|attr|onverv__Definite=Def|NumType=Ord": {POS: NUM},
"Num|rang|bep|zelfst|onverv__Definite=Def|NumType=Ord": {POS: NUM},
"N|eigen|ev|gen__Case=Gen|Number=Sing": {POS: NOUN},
"N|eigen|ev|neut__Number=Sing": {POS: NOUN},
"N|eigen|mv|neut__Number=Plur": {POS: NOUN},
"N|soort|ev|dat__Case=Dat|Number=Sing": {POS: NOUN},
"N|soort|ev|gen__Case=Gen|Number=Sing": {POS: NOUN},
"N|soort|ev|neut__Number=Sing": {POS: NOUN},
"N|soort|mv|neut__Number=Plur": {POS: NOUN},
"PROPN___": {POS: PROPN},
"PUNCT___": {POS: PUNCT},
"Prep_Adj_Conj_Prep_N__Degree=Pos|Number=Sing": {POS: ADP},
"Prep_Adj_N__Degree=Pos|Number=Plur": {POS: ADP},
"Prep_Adj|voor_adv|vergr|vervneut__Case=Nom|Degree=Cmp": {POS: ADP},
"Prep_Adj|voor_attr|stell|onverv__Degree=Pos": {POS: ADP},
"Prep_Adj|voor_attr|stell|vervneut__Case=Nom|Degree=Pos": {POS: ADP},
"Prep_Adv__AdpType=Prep": {POS: ADP},
"Prep_Adv__Case=Nom|Degree=Pos": {POS: ADP},
"Prep_Adv__Case=Nom|Degree=Sup": {POS: ADP},
"Prep_Adv__Degree=Pos": {POS: ADP},
"Prep_Adv|voor_gew|aanw__AdpType=Prep": {POS: ADP},
"Prep_Adv|voor_gew|aanw__Gender=Masc|Number=Sing|PronType=Tot": {POS: ADP},
"Prep_Adv|voor_gew|aanw__PronType=Dem": {POS: ADP},
"Prep_Adv|voor_pron|vrag__PronType=Int": {POS: ADP},
"Prep_Art_Adj_N__Degree=Pos|Number=Sing": {POS: ADP},
"Prep_Art_Adj__AdpType=Prep": {POS: ADP},
"Prep_Art_Adj__Case=Nom|Degree=Pos": {POS: ADP},
"Prep_Art_Adj__Degree=Cmp|Gender=Neut": {POS: ADP},
"Prep_Art_Misc_Misc___": {POS: ADP},
"Prep_Art_N_Adv__Number=Sing": {POS: ADP},
"Prep_Art_N_Adv__Number=Sing|PronType=Int": {POS: ADP},
"Prep_Art_N_Art_N__AdpType=Prep": {POS: ADP},
"Prep_Art_N_Prep_Art_N__AdpType=Prep": {POS: ADP},
"Prep_Art_N_Prep__AdpType=Prep": {POS: ADP},
"Prep_Art_N_Prep__Gender=Neut|Number=Sing": {POS: ADP},
"Prep_Art_N_Prep__Number=Sing": {POS: ADP},
"Prep_Art_N_V__Number=Plur|Tense=Past|VerbForm=Part": {POS: ADP},
"Prep_Art_N__AdpType=Prep": {POS: ADP},
"Prep_Art_N__Gender=Com|Number=Sing": {POS: ADP},
"Prep_Art_N__Gender=Neut|Number=Sing": {POS: ADP},
"Prep_Art_N__Number=Plur": {POS: ADP},
"Prep_Art_N__Number=Sing": {POS: ADP},
"Prep_Art_V__AdpType=Prep": {POS: ADP},
"Prep_Art_V__Gender=Neut|VerbForm=Inf": {POS: ADP},
"Prep_Art|voor_bep|onzijd|neut__Gender=Neut": {POS: ADP},
"Prep_Art|voor_onbep|zijdofonzijd|neut__Number=Sing": {POS: ADP},
"Prep_Conj_Prep|voor_neven_voor__Gender=Masc|Number=Sing|PronType=Tot": {POS: ADP},
"Prep_Misc|voor_vreemd___": {POS: ADP},
"Prep_N_Adv|voor_soort|ev|neut_deeladv__Number=Sing": {POS: ADP},
"Prep_N_Adv|voor_soort|ev|neut_pron|aanw__AdpType=Prep": {POS: ADP},
"Prep_N_Adv|voor_soort|ev|neut_pron|aanw__Number=Sing|PronType=Dem": {POS: ADP},
"Prep_N_Adv|voor_soort|ev|neut_pron|vrag__Number=Sing|PronType=Int": {POS: ADP},
"Prep_N_Adv|voor_soort|mv|neut_deelv__Gender=Masc|Number=Sing|PronType=Tot": {POS: ADP},
"Prep_N_Conj_N__Number=Sing": {POS: ADP},
"Prep_N_Conj__AdpType=Prep": {POS: ADP},
"Prep_N_Prep_N__Number=Sing": {POS: ADP},
"Prep_N_Prep|voor_soort|ev|dat_voor__Number=Sing": {POS: ADP},
"Prep_N_Prep|voor_soort|ev|neut_voor__AdpType=Prep": {POS: ADP},
"Prep_N_Prep|voor_soort|ev|neut_voor__Number=Sing": {POS: ADP},
"Prep_N_Prep|voor_soort|mv|neut_voor__Number=Plur": {POS: ADP},
"Prep_N_V__Case=Nom|Number=Sing|Tense=Past|VerbForm=Part": {POS: ADP},
"Prep_Num_N__Definite=Def|Number=Sing": {POS: ADP},
"Prep_Num__Case=Nom|Degree=Sup|PronType=Ind": {POS: ADP},
"Prep_Num__Degree=Cmp|PronType=Ind": {POS: ADP},
"Prep_N|voor_eigen|ev|neut__Number=Sing": {POS: ADP},
"Prep_N|voor_soort|ev|dat__AdpType=Prep": {POS: ADP},
"Prep_N|voor_soort|ev|dat__Case=Dat|Number=Sing": {POS: ADP},
"Prep_N|voor_soort|ev|neut__AdpType=Prep": {POS: ADP},
"Prep_N|voor_soort|ev|neut__Gender=Masc|Number=Sing|PronType=Tot": {POS: ADP},
"Prep_N|voor_soort|ev|neut__Number=Sing": {POS: ADP},
"Prep_N|voor_soort|mv|neut__AdpType=Prep": {POS: ADP},
"Prep_N|voor_soort|mv|neut__Number=Plur": {POS: ADP},
"Prep_Prep_Adj|voor_voor_adv|stell|onverv__Gender=Masc|Number=Sing|PronType=Tot": {POS: ADP},
"Prep_Prep_Adv__Degree=Pos": {POS: ADP},
"Prep_Pron_Adj__Degree=Cmp|Number=Sing|Person=3": {POS: ADP},
"Prep_Pron_N_Adv__Number=Plur": {POS: ADP},
"Prep_Pron_N__AdpType=Prep": {POS: ADP},
"Prep_Pron_N__Case=Dat|Number=Sing": {POS: ADP},
"Prep_Pron|voor_aanw|neut|zelfst___": {POS: ADP},
"Prep_Pron|voor_onbep|neut|attr___": {POS: ADP},
"Prep_Pron|voor_onbep|neut|zelfst___": {POS: ADP},
"Prep_Pron|voor_rec|neut__AdpType=Prep": {POS: ADP},
"Prep_Pron|voor_rec|neut___": {POS: ADP},
"Prep_Pron|voor_ref|3|evofmv__Number=Plur,Sing|Person=3": {POS: ADP},
"Prep_Punc_N_Conj_N__AdpType=Prep": {POS: ADP},
"Prep_V_N__Number=Sing|Tense=Pres|VerbForm=Part": {POS: ADP},
"Prep_V_Pron_Pron_Adv__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|PronType=Dem|Tense=Pres|VerbForm=Fin": {POS: ADP},
"Prep_V|voor_intrans|inf__VerbForm=Inf": {POS: ADP},
"Prep_V|voorinf_trans|inf__VerbForm=Inf": {POS: ADP},
"Prep|achter__AdpType=Post": {POS: ADP},
"Prep|comb__AdpType=Circ": {POS: ADP},
"Prep|voor__AdpType=Prep": {POS: ADP},
"Prep|voorinf__AdpType=Prep|PartType=Inf": {POS: ADP},
"Pron_Adj_N_Punc_Art_Adj_N_Prep_Art_Adj_N__NumType=Card": {POS: PRON},
"Pron_Adj__Case=Nom|Degree=Sup|Number=Sing|Person=2|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron_Adj__Degree=Cmp|PronType=Ind": {POS: PRON},
"Pron_Adv|vrag|neut|attr_deelv__PronType=Int": {POS: PRON},
"Pron_Art_N_N__Number=Plur|PronType=Ind": {POS: PRON},
"Pron_Art__Number=Sing|PronType=Int": {POS: PRON},
"Pron_N_Adv__Number=Sing|PronType=Ind": {POS: PRON},
"Pron_N_V_Adv_Num_Punc__Aspect=Imp|Definite=Def|Mood=Ind|Number=Sing|Person=3|PronType=Ind|Tense=Pres|VerbForm=Fin": {POS: PRON},
"Pron_N_V_Conj_N__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|PronType=Ind|Tense=Pres|VerbForm=Fin": {POS: PRON},
"Pron_N__Case=Gen|Number=Sing|PronType=Ind": {POS: PRON},
"Pron_N__Number=Sing|PronType=Ind": {POS: PRON},
"Pron_N|aanw|gen|attr_soort|mv|neut__Case=Gen|Number=Plur|PronType=Dem": {POS: PRON},
"Pron_N|onbep|neut|attr_soort|ev|neut__Number=Sing|PronType=Ind": {POS: PRON},
"Pron_Prep_Art__Number=Sing|PronType=Int": {POS: PRON},
"Pron_Prep_Art__Number=Sing|PronType=Rel": {POS: PRON},
"Pron_Prep_N__Number=Plur|PronType=Int": {POS: PRON},
"Pron_Prep|betr|neut|zelfst_voor__PronType=Rel": {POS: PRON},
"Pron_Prep|onbep|neut|zelfst_voor__PronType=Ind": {POS: PRON},
"Pron_Prep|vrag|neut|attr_voor__PronType=Int": {POS: PRON},
"Pron_Pron_V__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|PronType=Rel|Tense=Pres|VerbForm=Fin": {POS: PRON},
"Pron_Pron__Person=3|PronType=Prs|Reflex=Yes": {POS: PRON},
"Pron_V_V__Aspect=Imp|Mood=Ind|Person=3|PronType=Dem|Tense=Pres|VerbForm=Inf": {POS: PRON},
"Pron_V__Case=Gen|Number=Sing|Person=3|Poss=Yes|PronType=Prs|VerbForm=Inf": {POS: PRON},
"Pron_V__Number=Plur|Person=1|Poss=Yes|PronType=Prs|VerbForm=Inf": {POS: PRON},
"Pron|aanw|dat|attr__Case=Dat|PronType=Dem": {POS: PRON},
"Pron|aanw|gen|attr__Case=Gen|PronType=Dem": {POS: PRON},
"Pron|aanw|neut|attr__PronType=Dem": {POS: PRON},
"Pron|aanw|neut|attr|weigen__PronType=Dem": {POS: PRON},
"Pron|aanw|neut|attr|wzelf__PronType=Dem": {POS: PRON},
"Pron|aanw|neut|zelfst__PronType=Dem": {POS: PRON},
"Pron|betr|gen|zelfst__Case=Gen|PronType=Rel": {POS: PRON},
"Pron|betr|neut|attr__PronType=Rel": {POS: PRON},
"Pron|betr|neut|zelfst__PronType=Rel": {POS: PRON},
"Pron|bez|1|ev|neut|attr__Number=Sing|Person=1|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|1|mv|neut|attr__Number=Plur|Person=1|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|2|ev|neut|attr__Number=Sing|Person=2|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|2|mv|neut|attr__Number=Plur|Person=2|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|3|ev|gen|attr__Case=Gen|Number=Sing|Person=3|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|3|ev|neut|attr__Number=Sing|Person=3|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|3|ev|neut|zelfst__Number=Sing|Person=3|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|bez|3|mv|neut|attr__Number=Plur|Person=3|Poss=Yes|PronType=Prs": {POS: PRON},
"Pron|onbep|gen|attr__Case=Gen|PronType=Ind": {POS: PRON},
"Pron|onbep|gen|zelfst__Case=Gen|PronType=Ind": {POS: PRON},
"Pron|onbep|neut|attr__PronType=Ind": {POS: PRON},
"Pron|onbep|neut|zelfst__PronType=Ind": {POS: PRON},
"Pron|per|1|ev|datofacc__Case=Acc,Dat|Number=Sing|Person=1|PronType=Prs": {POS: PRON},
"Pron|per|1|ev|nom__Case=Nom|Number=Sing|Person=1|PronType=Prs": {POS: PRON},
"Pron|per|1|mv|datofacc__Case=Acc,Dat|Number=Plur|Person=1|PronType=Prs": {POS: PRON},
"Pron|per|1|mv|nom__Case=Nom|Number=Plur|Person=1|PronType=Prs": {POS: PRON},
"Pron|per|2|ev|datofacc__Case=Acc,Dat|Number=Sing|Person=2|PronType=Prs": {POS: PRON},
"Pron|per|2|ev|nom__Case=Nom|Number=Sing|Person=2|PronType=Prs": {POS: PRON},
"Pron|per|2|mv|datofacc__Case=Acc,Dat|Number=Plur|Person=2|PronType=Prs": {POS: PRON},
"Pron|per|2|mv|nom__Case=Nom|Number=Plur|Person=2|PronType=Prs": {POS: PRON},
"Pron|per|3|evofmv|datofacc__Case=Acc,Dat|Number=Plur,Sing|Person=3|PronType=Prs": {POS: PRON},
"Pron|per|3|evofmv|nom__Case=Nom|Number=Plur,Sing|Person=3|PronType=Prs": {POS: PRON},
"Pron|per|3|ev|datofacc__Case=Acc,Dat|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"Pron|per|3|ev|nom__Case=Nom|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"Pron|per|3|mv|datofacc__Case=Acc,Dat|Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"Pron|rec|gen__Case=Gen|PronType=Rcp": {POS: PRON},
"Pron|rec|neut__PronType=Rcp": {POS: PRON},
"Pron|ref|1|ev__Number=Sing|Person=1|PronType=Prs|Reflex=Yes": {POS: PRON},
"Pron|ref|1|mv__Number=Plur|Person=1|PronType=Prs|Reflex=Yes": {POS: PRON},
"Pron|ref|2|ev__Number=Sing|Person=2|PronType=Prs|Reflex=Yes": {POS: PRON},
"Pron|ref|3|evofmv__Number=Plur,Sing|Person=3|PronType=Prs|Reflex=Yes": {POS: PRON},
"Pron|vrag|neut|attr__PronType=Int": {POS: PRON},
"Pron|vrag|neut|zelfst__PronType=Int": {POS: PRON},
"Punc_Int_Punc_N_N_N_Punc_Pron_V_Pron_Adj_V_Punc___": {POS: PUNCT},
"Punc_N_Punc_N___": {POS: PUNCT},
"Punc_Num_Num___": {POS: PUNCT},
"Punc_Num___": {POS: PUNCT},
"Punc|aanhaaldubb__PunctType=Quot": {POS: PUNCT},
"Punc|aanhaalenk__PunctType=Quot": {POS: PUNCT},
"Punc|dubbpunt__PunctType=Colo": {POS: PUNCT},
"Punc|haakopen__PunctSide=Ini|PunctType=Brck": {POS: PUNCT},
"Punc|haaksluit__PunctSide=Fin|PunctType=Brck": {POS: PUNCT},
"Punc|hellip__PunctType=Peri": {POS: PUNCT},
"Punc|isgelijk___": {POS: PUNCT},
"Punc|komma__PunctType=Comm": {POS: PUNCT},
"Punc|liggstreep___": {POS: PUNCT},
"Punc|maal___": {POS: PUNCT},
"Punc|punt__PunctType=Peri": {POS: PUNCT},
"Punc|puntkomma__PunctType=Semi": {POS: PUNCT},
"Punc|schuinstreep___": {POS: PUNCT},
"Punc|uitroep__PunctType=Excl": {POS: PUNCT},
"Punc|vraag__PunctType=Qest": {POS: PUNCT},
"V_Adv_Art_N_Prep_Pron_N__Degree=Pos|Number=Plur|Person=2|Subcat=Tran": {POS: VERB},
"V_Adv__Degree=Pos|Subcat=Tran": {POS: VERB},
"V_Art_N_Num_N__Aspect=Imp|Definite=Def|Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V_Art_N__Number=Sing|Subcat=Tran": {POS: VERB},
"V_Conj_N_N__Number=Sing|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V_Conj_Pron__Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V_N_Conj_Adj_N_Prep_Art_N__Degree=Pos|Number=Sing|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V_N_N__Number=Sing|Subcat=Intr|Tense=Pres|VerbForm=Part": {POS: VERB},
"V_N_N__Number=Sing|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V_N_V__Aspect=Imp|Mood=Ind|Number=Sing|Subcat=Intr|Tense=Pres|VerbForm=Inf": {POS: VERB},
"V_N__Number=Plur|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V_N|trans|imp_eigen|ev|neut__Number=Sing|Subcat=Tran": {POS: VERB},
"V_Prep|intrans|verldw|onverv_voor__Subcat=Intr|Tense=Past|VerbForm=Part": {POS: VERB},
"V_Pron_Adv_Adv_Pron_V__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V_Pron_Adv__Aspect=Imp|Degree=Pos|Mood=Ind|Number=Sing|Person=2|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V_Pron_V__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V_Pron__VerbType=Aux,Cop": {POS: VERB},
"V_V|hulp|imp_intrans|inf__VerbForm=Inf|VerbType=Mod": {POS: VERB},
"V|hulpofkopp|conj__Mood=Sub|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|conj__Mood=Sub|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|imp__Mood=Imp|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|imp__Mood=Imp|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|inf__VerbForm=Inf": {POS: VERB},
"V|hulpofkopp|inf__VerbForm=Inf|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|inf|subst__VerbForm=Inf": {POS: VERB},
"V|hulpofkopp|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Pres|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|hulpofkopp|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Past|VerbForm=Fin|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|tegdw|vervneut__Case=Nom|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|hulpofkopp|tegdw|vervneut__Case=Nom|Tense=Pres|VerbForm=Part|VerbType=Aux,Cop": {POS: VERB},
"V|hulpofkopp|verldw|onverv__Tense=Past|VerbForm=Part": {POS: VERB},
"V|hulpofkopp|verldw|onverv__Tense=Past|VerbForm=Part|VerbType=Aux,Cop": {POS: VERB},
"V|hulp|conj__Mood=Sub|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|inf__VerbForm=Inf": {POS: VERB},
"V|hulp|inf__VerbForm=Inf|VerbType=Mod": {POS: VERB},
"V|hulp|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulp|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Pres|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulp|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulp|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|hulp|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|hulp|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|hulp|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Tense=Past|VerbForm=Fin|VerbType=Mod": {POS: VERB},
"V|hulp|verldw|onverv__Tense=Past|VerbForm=Part": {POS: VERB},
"V|hulp|verldw|onverv__Tense=Past|VerbForm=Part|VerbType=Mod": {POS: VERB},
"V|intrans|conj__Mood=Sub|Subcat=Intr|VerbForm=Fin": {POS: VERB},
"V|intrans|imp__Mood=Imp|Subcat=Intr|VerbForm=Fin": {POS: VERB},
"V|intrans|inf__Subcat=Intr|VerbForm=Inf": {POS: VERB},
"V|intrans|inf|subst__Subcat=Intr|VerbForm=Inf": {POS: VERB},
"V|intrans|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Subcat=Intr|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|intrans|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Subcat=Intr|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|intrans|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Subcat=Intr|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|intrans|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Subcat=Intr|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|intrans|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Subcat=Intr|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|intrans|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Subcat=Intr|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|intrans|tegdw|onverv__Subcat=Intr|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|intrans|tegdw|vervmv__Number=Plur|Subcat=Intr|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|intrans|tegdw|vervneut__Case=Nom|Subcat=Intr|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|intrans|tegdw|vervvergr__Degree=Cmp|Subcat=Intr|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|intrans|verldw|onverv__Subcat=Intr|Tense=Past|VerbForm=Part": {POS: VERB},
"V|intrans|verldw|vervmv__Number=Plur|Subcat=Intr|Tense=Past|VerbForm=Part": {POS: VERB},
"V|intrans|verldw|vervneut__Case=Nom|Subcat=Intr|Tense=Past|VerbForm=Part": {POS: VERB},
"V|refl|imp__Mood=Imp|Reflex=Yes|VerbForm=Fin": {POS: VERB},
"V|refl|inf__Reflex=Yes|VerbForm=Inf": {POS: VERB},
"V|refl|inf|subst__Reflex=Yes|VerbForm=Inf": {POS: VERB},
"V|refl|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Reflex=Yes|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|refl|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Reflex=Yes|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|refl|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Reflex=Yes|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|refl|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Reflex=Yes|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|refl|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Reflex=Yes|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|refl|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Reflex=Yes|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|refl|tegdw|vervneut__Case=Nom|Reflex=Yes|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|refl|verldw|onverv__Reflex=Yes|Tense=Past|VerbForm=Part": {POS: VERB},
"V|trans|conj__Mood=Sub|Subcat=Tran|VerbForm=Fin": {POS: VERB},
"V|trans|imp__Mood=Imp|Subcat=Tran|VerbForm=Fin": {POS: VERB},
"V|trans|inf__Subcat=Tran|VerbForm=Inf": {POS: VERB},
"V|trans|inf|subst__Subcat=Tran|VerbForm=Inf": {POS: VERB},
"V|trans|ott|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|trans|ott|1|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|trans|ott|2|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|trans|ott|3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V|trans|ovt|1of2of3|ev__Aspect=Imp|Mood=Ind|Number=Sing|Subcat=Tran|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|trans|ovt|1of2of3|mv__Aspect=Imp|Mood=Ind|Number=Plur|Subcat=Tran|Tense=Past|VerbForm=Fin": {POS: VERB},
"V|trans|tegdw|onverv__Subcat=Tran|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|trans|tegdw|vervneut__Case=Nom|Subcat=Tran|Tense=Pres|VerbForm=Part": {POS: VERB},
"V|trans|verldw|onverv__Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V|trans|verldw|vervmv__Number=Plur|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V|trans|verldw|vervneut__Case=Nom|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"V|trans|verldw|vervvergr__Degree=Cmp|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: VERB},
"X__Aspect=Imp|Definite=Def|Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin|VerbType=Mod": {POS: X},
"X__Aspect=Imp|Definite=Def|Mood=Ind|Number=Sing|Person=3|PronType=Ind|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Degree=Pos|Mood=Ind|Number=Sing|Person=2|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Degree=Pos|Mood=Ind|Number=Sing|Person=2|Tense=Past|VerbForm=Part": {POS: X},
"X__Aspect=Imp|Degree=Pos|Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Inf": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|PronType=Dem|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|PronType=Rel|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=2|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|PronType=Ind|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Subcat=Tran|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Subcat=Intr|Tense=Pres|VerbForm=Inf": {POS: X},
"X__Aspect=Imp|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin": {POS: X},
"X__Aspect=Imp|Mood=Ind|Person=3|PronType=Dem|Tense=Pres|VerbForm=Inf": {POS: X},
"X__Case=Dat|Degree=Pos|Number=Sing": {POS: X},
"X__Case=Dat|Number=Sing": {POS: X},
"X__Case=Gen|Definite=Def|Number=Sing": {POS: X},
"X__Case=Gen|Number=Plur|PronType=Dem": {POS: X},
"X__Case=Gen|Number=Plur|PronType=Ind": {POS: X},
"X__Case=Gen|Number=Sing": {POS: X},
"X__Case=Gen|Number=Sing|Person=3|Poss=Yes|PronType=Prs|VerbForm=Inf": {POS: X},
"X__Case=Gen|Number=Sing|PronType=Ind": {POS: X},
"X__Case=Nom|Definite=Def|Degree=Cmp|Gender=Neut": {POS: X},
"X__Case=Nom|Definite=Def|Degree=Sup": {POS: X},
"X__Case=Nom|Definite=Def|Degree=Sup|Gender=Neut": {POS: X},
"X__Case=Nom|Degree=Cmp": {POS: X},
"X__Case=Nom|Degree=Pos": {POS: X},
"X__Case=Nom|Degree=Pos|Gender=Neut": {POS: X},
"X__Case=Nom|Degree=Pos|Number=Plur": {POS: X},
"X__Case=Nom|Degree=Pos|Number=Sing": {POS: X},
"X__Case=Nom|Degree=Sup": {POS: X},
"X__Case=Nom|Degree=Sup|Number=Sing|Person=2|Poss=Yes|PronType=Prs": {POS: X},
"X__Case=Nom|Degree=Sup|PronType=Ind": {POS: X},
"X__Case=Nom|Number=Sing|Tense=Past|VerbForm=Part": {POS: X},
"X__Definite=Def": {POS: X},
"X__Definite=Def|Degree=Cmp|Gender=Neut": {POS: X},
"X__Definite=Def|Degree=Pos": {POS: X},
"X__Definite=Def|Degree=Pos|Number=Sing": {POS: X},
"X__Definite=Def|Degree=Pos|Variant=Short": {POS: X},
"X__Definite=Def|Degree=Sup|Gender=Neut": {POS: X},
"X__Definite=Def|Degree=Sup|Gender=Neut|Number=Sing": {POS: X},
"X__Definite=Def|Degree=Sup|Gender=Neut|PronType=Ind": {POS: X},
"X__Definite=Def|Gender=Neut": {POS: X},
"X__Definite=Def|Gender=Neut|Number=Plur|Person=3": {POS: X},
"X__Definite=Def|Gender=Neut|Number=Sing": {POS: X},
"X__Definite=Def|Number=Plur": {POS: X},
"X__Definite=Def|Number=Sing": {POS: X},
"X__Definite=Def|Number=Sing|Person=1": {POS: X},
"X__Definite=Def|Number=Sing|Tense=Past|VerbForm=Part": {POS: X},
"X__Definite=Def|Number=Sing|Tense=Pres|VerbForm=Part": {POS: X},
"X__Degree=Cmp": {POS: X},
"X__Degree=Cmp|Gender=Neut": {POS: X},
"X__Degree=Cmp|Number=Sing|Person=3": {POS: X},
"X__Degree=Cmp|PronType=Ind": {POS: X},
"X__Degree=Cmp|Variant=Short": {POS: X},
"X__Degree=Pos": {POS: X},
"X__Degree=Pos|Gender=Neut|Number=Sing": {POS: X},
"X__Degree=Pos|Mood=Imp|Variant=Short|VerbForm=Fin": {POS: X},
"X__Degree=Pos|Mood=Sub|VerbForm=Fin": {POS: X},
"X__Degree=Pos|Number=Plur": {POS: X},
"X__Degree=Pos|Number=Plur|Person=2|Subcat=Tran": {POS: X},
"X__Degree=Pos|Number=Plur|Variant=Short": {POS: X},
"X__Degree=Pos|Number=Sing": {POS: X},
"X__Degree=Pos|Number=Sing|Person=1|Poss=Yes|PronType=Prs": {POS: X},
"X__Degree=Pos|Number=Sing|Person=2": {POS: X},
"X__Degree=Pos|Number=Sing|Person=3": {POS: X},
"X__Degree=Pos|Number=Sing|PronType=Ind": {POS: X},
"X__Degree=Pos|Number=Sing|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: X},
"X__Degree=Pos|Number=Sing|Tense=Past|VerbForm=Part": {POS: X},
"X__Degree=Pos|Number=Sing|Variant=Short": {POS: X},
"X__Degree=Pos|PronType=Dem": {POS: X},
"X__Degree=Pos|Subcat=Tran": {POS: X},
"X__Degree=Pos|Variant=Short": {POS: X},
"X__Degree=Pos|Variant=Short|VerbForm=Inf": {POS: X},
"X__Degree=Pos|VerbForm=Inf": {POS: X},
"X__Gender=Com|Number=Sing": {POS: X},
"X__Gender=Neut": {POS: X},
"X__Gender=Neut|Number=Sing": {POS: X},
"X__Gender=Neut|VerbForm=Inf": {POS: X},
"X__Mood=Sub|Number=Sing|VerbForm=Fin": {POS: X},
"X__Mood=Sub|VerbForm=Fin": {POS: X},
"X__Number=Plur": {POS: X},
"X__Number=Plur,Sing|Person=3": {POS: X},
"X__Number=Plur|Person=1|Poss=Yes|PronType=Prs|VerbForm=Inf": {POS: X},
"X__Number=Plur|PronType=Ind": {POS: X},
"X__Number=Plur|PronType=Int": {POS: X},
"X__Number=Plur|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: X},
"X__Number=Plur|Tense=Past|VerbForm=Part": {POS: X},
"X__Number=Sing": {POS: X},
"X__Number=Sing|Person=3": {POS: X},
"X__Number=Sing|PronType=Dem": {POS: X},
"X__Number=Sing|PronType=Ind": {POS: X},
"X__Number=Sing|PronType=Int": {POS: X},
"X__Number=Sing|PronType=Rel": {POS: X},
"X__Number=Sing|Subcat=Intr|Tense=Pres|VerbForm=Part": {POS: X},
"X__Number=Sing|Subcat=Tran": {POS: X},
"X__Number=Sing|Subcat=Tran|Tense=Past|VerbForm=Part": {POS: X},
"X__Number=Sing|Tense=Past|VerbForm=Part": {POS: X},
"X__Number=Sing|Tense=Pres|VerbForm=Part": {POS: X},
"X__Person=3|PronType=Prs|Reflex=Yes": {POS: X},
"X__PronType=Dem": {POS: X},
"X__PronType=Ind": {POS: X},
"X__PronType=Int": {POS: X},
"X__PronType=Rel": {POS: X},
"X__Subcat=Intr|Tense=Past|VerbForm=Part": {POS: X},
"X__Subcat=Tran|Tense=Past|VerbForm=Part": {POS: X},
"X__VerbForm=Inf": {POS: X},
"X__VerbForm=Inf|VerbType=Mod": {POS: X},
"X__VerbType=Aux,Cop": {POS: X},
"X___": {POS: X},
"_SP": {POS: SPACE}
}
|
mit
|
ayepezv/GAD_ERP
|
openerp/addons/base/tests/test_res_partner_bank.py
|
60
|
2015
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
from odoo.tests.common import TransactionCase
class TestResPartnerBank(TransactionCase):
"""Tests acc_number
"""
def test_sanitized_acc_number(self):
partner_bank_model = self.env['res.partner.bank']
acc_number = " BE-001 2518823 03 "
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEquals(0, len(vals))
partner_bank = partner_bank_model.create({
'acc_number': acc_number,
'partner_id': self.ref('base.res_partner_2'),
'acc_type': 'bank',
})
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
vals = partner_bank_model.search([('acc_number', 'in', [acc_number])])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
self.assertEqual(partner_bank.acc_number, acc_number)
# sanitaze the acc_number
sanitized_acc_number = 'BE001251882303'
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number)])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
vals = partner_bank_model.search(
[('acc_number', 'in', [sanitized_acc_number])])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
self.assertEqual(partner_bank.sanitized_acc_number,
sanitized_acc_number)
# search is case insensitive
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number.lower())])
self.assertEquals(1, len(vals))
vals = partner_bank_model.search(
[('acc_number', '=', acc_number.lower())])
self.assertEquals(1, len(vals))
|
gpl-3.0
|
tudennis/LeetCode---kamyu104-11-24-2015
|
Python/search-for-a-range.py
|
2
|
2020
|
from __future__ import print_function
# Time: O(logn)
# Space: O(1)
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
# Given [5, 7, 7, 8, 8, 10] and target value 8,
# return [3, 4].
#
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Find the first idx where nums[idx] >= target
left = self.binarySearch(lambda x, y: x >= y, nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
# Find the first idx where nums[idx] > target
right = self.binarySearch(lambda x, y: x > y, nums, target)
return [left, right - 1]
def binarySearch(self, compare, nums, target):
left, right = 0, len(nums)
while left < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid + 1
return left
def binarySearch2(self, compare, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid - 1
else:
left = mid + 1
return left
def binarySearch3(self, compare, nums, target):
left, right = -1, len(nums)
while left + 1 < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid
return left if left != -1 and compare(nums[left], target) else right
if __name__ == "__main__":
print(Solution().searchRange([2, 2], 3))
print(Solution().searchRange([5, 7, 7, 8, 8, 10], 8))
|
mit
|
jhawkesworth/ansible
|
test/units/mock/yaml_helper.py
|
209
|
5267
|
import io
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
class YamlTestUtils(object):
"""Mixin class to combine with a unittest.TestCase subclass."""
def _loader(self, stream):
"""Vault related tests will want to override this.
Vault cases should setup a AnsibleLoader that has the vault password."""
return AnsibleLoader(stream)
def _dump_stream(self, obj, stream, dumper=None):
"""Dump to a py2-unicode or py3-string stream."""
if PY3:
return yaml.dump(obj, stream, Dumper=dumper)
else:
return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
def _dump_string(self, obj, dumper=None):
"""Dump to a py2-unicode or py3-string"""
if PY3:
return yaml.dump(obj, Dumper=dumper)
else:
return yaml.dump(obj, Dumper=dumper, encoding=None)
def _dump_load_cycle(self, obj):
# Each pass though a dump or load revs the 'generation'
# obj to yaml string
string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
# wrap a stream/file like StringIO around that yaml
stream_from_object_dump = io.StringIO(string_from_object_dump)
loader = self._loader(stream_from_object_dump)
# load the yaml stream to create a new instance of the object (gen 2)
obj_2 = loader.get_data()
# dump the gen 2 objects directory to strings
string_from_object_dump_2 = self._dump_string(obj_2,
dumper=AnsibleDumper)
# The gen 1 and gen 2 yaml strings
self.assertEquals(string_from_object_dump, string_from_object_dump_2)
# the gen 1 (orig) and gen 2 py object
self.assertEquals(obj, obj_2)
# again! gen 3... load strings into py objects
stream_3 = io.StringIO(string_from_object_dump_2)
loader_3 = self._loader(stream_3)
obj_3 = loader_3.get_data()
string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
self.assertEquals(obj, obj_3)
# should be transitive, but...
self.assertEquals(obj_2, obj_3)
self.assertEquals(string_from_object_dump, string_from_object_dump_3)
def _old_dump_load_cycle(self, obj):
'''Dump the passed in object to yaml, load it back up, dump again, compare.'''
stream = io.StringIO()
yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
self._dump_stream(obj, stream, dumper=AnsibleDumper)
yaml_string_from_stream = stream.getvalue()
# reset stream
stream.seek(0)
loader = self._loader(stream)
# loader = AnsibleLoader(stream, vault_password=self.vault_password)
obj_from_stream = loader.get_data()
stream_from_string = io.StringIO(yaml_string)
loader2 = self._loader(stream_from_string)
# loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
obj_from_string = loader2.get_data()
stream_obj_from_stream = io.StringIO()
stream_obj_from_string = io.StringIO()
if PY3:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
else:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
stream_obj_from_stream.seek(0)
stream_obj_from_string.seek(0)
if PY3:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
else:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
assert yaml_string == yaml_string_obj_from_stream
assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
yaml_string_stream_obj_from_string)
assert obj == obj_from_stream
assert obj == obj_from_string
assert obj == yaml_string_obj_from_stream
assert obj == yaml_string_obj_from_string
assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
return {'obj': obj,
'yaml_string': yaml_string,
'yaml_string_from_stream': yaml_string_from_stream,
'obj_from_stream': obj_from_stream,
'obj_from_string': obj_from_string,
'yaml_string_obj_from_string': yaml_string_obj_from_string}
|
gpl-3.0
|
cmosa/gitinspector
|
gitinspector/config.py
|
51
|
2939
|
# coding: utf-8
#
# Copyright © 2013 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import extensions
import filtering
import format
import interval
import optval
import os
import subprocess
def __read_git_config__(repo, variable):
previous_directory = os.getcwd()
os.chdir(repo)
setting = subprocess.Popen("git config inspector." + variable, shell=True, bufsize=1,
stdout=subprocess.PIPE).stdout
os.chdir(previous_directory)
try:
setting = setting.readlines()[0]
setting = setting.decode("utf-8", "replace").strip()
except IndexError:
setting = ""
return setting
def __read_git_config_bool__(repo, variable):
try:
variable = __read_git_config__(repo, variable)
return optval.get_boolean_argument(False if variable == "" else variable)
except optval.InvalidOptionArgument:
return False
def __read_git_config_string__(repo, variable):
string = __read_git_config__(repo, variable)
return (True, string) if len(string) > 0 else (False, None)
def init(run):
var = __read_git_config_string__(run.repo, "file-types")
if var[0]:
extensions.define(var[1])
var = __read_git_config_string__(run.repo, "exclude")
if var[0]:
filtering.add(var[1])
var = __read_git_config_string__(run.repo, "format")
if var[0] and not format.select(var[1]):
raise format.InvalidFormatError(_("specified output format not supported."))
run.hard = __read_git_config_bool__(run.repo, "hard")
run.list_file_types = __read_git_config_bool__(run.repo, "list-file-types")
run.localize_output = __read_git_config_bool__(run.repo, "localize-output")
run.metrics = __read_git_config_bool__(run.repo, "metrics")
run.responsibilities = __read_git_config_bool__(run.repo, "responsibilities")
run.useweeks = __read_git_config_bool__(run.repo, "weeks")
var = __read_git_config_string__(run.repo, "since")
if var[0]:
interval.set_since(var[1])
var = __read_git_config_string__(run.repo, "until")
if var[0]:
interval.set_until(var[1])
run.timeline = __read_git_config_bool__(run.repo, "timeline")
if __read_git_config_bool__(run.repo, "grading"):
run.hard = True
run.list_file_types = True
run.metrics = True
run.responsibilities = True
run.timeline = True
run.useweeks = True
|
gpl-3.0
|
phil-lopreiato/the-blue-alliance
|
database/dict_converters/team_converter.py
|
3
|
1224
|
from database.dict_converters.converter_base import ConverterBase
class TeamConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 4,
}
@classmethod
def _convert(cls, teams, dict_version):
CONVERTERS = {
3: cls.teamsConverter_v3,
}
return CONVERTERS[dict_version](teams)
@classmethod
def teamsConverter_v3(cls, teams):
return map(cls.teamConverter_v3, teams)
@classmethod
def teamConverter_v3(cls, team):
has_nl = team.nl and team.nl.city and team.nl.state_prov and team.nl.country
default_name = "Team {}".format(team.team_number)
team_dict = {
'key': team.key.id(),
'team_number': team.team_number,
'nickname': team.nickname if team.nickname else default_name,
'name': team.name if team.name else default_name,
'website': team.website,
'rookie_year': team.rookie_year,
'motto': None,
'home_championship': team.championship_location,
'school_name': team.school_name,
}
team_dict.update(cls.constructLocation_v3(team))
return team_dict
|
mit
|
togaurav1981/Hello-World
|
node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/_mapping.py
|
94
|
5509
|
# -*- coding: utf-8 -*-
"""
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter mapping defintions. This file is generated by itself. Everytime
you change something on a builtin formatter defintion, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# start
from pygments.formatters.bbcode import BBCodeFormatter
from pygments.formatters.html import HtmlFormatter
from pygments.formatters.img import BmpImageFormatter
from pygments.formatters.img import GifImageFormatter
from pygments.formatters.img import ImageFormatter
from pygments.formatters.img import JpgImageFormatter
from pygments.formatters.latex import LatexFormatter
from pygments.formatters.other import NullFormatter
from pygments.formatters.other import RawTokenFormatter
from pygments.formatters.rtf import RtfFormatter
from pygments.formatters.svg import SvgFormatter
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
FORMATTERS = {
BBCodeFormatter: ('BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
BmpImageFormatter: ('img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
GifImageFormatter: ('img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
HtmlFormatter: ('HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
ImageFormatter: ('img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
JpgImageFormatter: ('img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
LatexFormatter: ('LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
NullFormatter: ('Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
RawTokenFormatter: ('Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
RtfFormatter: ('RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft\xc2\xae Word\xc2\xae documents.'),
SvgFormatter: ('SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
Terminal256Formatter: ('Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
TerminalFormatter: ('Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.')
}
if __name__ == '__main__':
import sys
import os
# lookup formatters
found_formatters = []
imports = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pygments.util import docstring_headline
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters.%s' % filename[:-3]
print(module_name)
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
imports.append((module_name, formatter_name))
formatter = getattr(module, formatter_name)
found_formatters.append(
'%s: %r' % (formatter_name,
(formatter.name,
tuple(formatter.aliases),
tuple(formatter.filenames),
docstring_headline(formatter))))
# sort them, that should make the diff files for svn smaller
found_formatters.sort()
imports.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('# start')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'w')
f.write(header)
f.write('# start\n')
f.write('\n'.join(['from %s import %s' % imp for imp in imports]))
f.write('\n\n')
f.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
f.write(footer)
f.close()
|
mit
|
cchurch/ansible
|
lib/ansible/modules/network/f5/bigip_firewall_rule.py
|
3
|
43681
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_rule
short_description: Manage AFM Firewall rules
description:
- Manages firewall rules in an AFM firewall policy. New rules will always be added to the
end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module.
Rules can also be pre-ordered using the C(bigip_security_policy) module and then later
updated using the C(bigip_firewall_rule) module.
version_added: 2.7
options:
name:
description:
- Specifies the name of the rule.
type: str
required: True
parent_policy:
description:
- The policy which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
parent_rule_list:
description:
- The rule list which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
action:
description:
- Specifies the action for the firewall rule.
- When C(accept), allows packets with the specified source, destination,
and protocol to pass through the firewall. Packets that match the rule,
and are accepted, traverse the system as if the firewall is not present.
- When C(drop), drops packets with the specified source, destination, and
protocol. Dropping a packet is a silent action with no notification to
the source or destination systems. Dropping the packet causes the connection
to be retried until the retry threshold is reached.
- When C(reject), rejects packets with the specified source, destination,
and protocol. When a packet is rejected the firewall sends a destination
unreachable message to the sender.
- When C(accept-decisively), allows packets with the specified source,
destination, and protocol to pass through the firewall, and does not require
any further processing by any of the further firewalls. Packets that match
the rule, and are accepted, traverse the system as if the firewall is not
present. If the Rule List is applied to a virtual server, management IP,
or self IP firewall rule, then Accept Decisively is equivalent to Accept.
- When creating a new rule, if this parameter is not provided, the default is
C(reject).
type: str
choices:
- accept
- drop
- reject
- accept-decisively
status:
description:
- Indicates the activity state of the rule or rule list.
- When C(disabled), specifies that the rule or rule list does not apply at all.
- When C(enabled), specifies that the system applies the firewall rule or rule
list to the given context and addresses.
- When C(scheduled), specifies that the system applies the rule or rule list
according to the specified schedule.
- When creating a new rule, if this parameter is not provided, the default
is C(enabled).
type: str
choices:
- enabled
- disabled
- scheduled
schedule:
description:
- Specifies a schedule for the firewall rule.
- You configure schedules to define days and times when the firewall rule is
made active.
type: str
description:
description:
- The rule description.
type: str
irule:
description:
- Specifies an iRule that is applied to the firewall rule.
- An iRule can be started when the firewall rule matches traffic.
type: str
protocol:
description:
- Specifies the protocol to which the rule applies.
- Protocols may be specified by either their name or numeric value.
- A special protocol value C(any) can be specified to match any protocol. The
numeric equivalent of this protocol is C(255).
type: str
source:
description:
- Specifies packet sources to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following source items. An IPv4 or IPv6 address, an IPv4
or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
vlan:
description:
- Specifies VLANs to which the rule applies.
- The VLAN source refers to the packet's source.
type: str
type: list
destination:
description:
- Specifies packet destinations to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following destination items. An IPv4 or IPv6 address,
an IPv4 or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
type: list
logging:
description:
- Specifies whether logging is enabled or disabled for the firewall rule.
- When creating a new rule, if this parameter is not specified, the default
if C(no).
type: bool
rule_list:
description:
- Specifies an existing rule list to use in the rule.
- This parameter is mutually exclusive with many of the other individual-rule
specific settings. This includes C(logging), C(action), C(source),
C(destination), C(irule'), C(protocol) and C(logging).
- This parameter is only used when C(parent_policy) is specified, otherwise it is ignored.
type: str
icmp_message:
description:
- Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message
C(type) and C(code) that the rule uses.
- This parameter is only relevant when C(protocol) is either C(icmp)(1) or
C(icmpv6)(58).
suboptions:
type:
description:
- Specifies the type of ICMP message.
- You can specify control messages, such as Echo Reply (0) and Destination
Unreachable (3), or you can specify C(any) to indicate that the system
applies the rule for all ICMP messages.
- You can also specify an arbitrary ICMP message.
- The ICMP protocol contains definitions for the existing message type and
number pairs.
type: str
code:
description:
- Specifies the code returned in response to the specified ICMP message type.
- You can specify codes, each set appropriate to the associated type, such
as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1)
(associated with Destination Unreachable (3)), or you can specify C(any)
to indicate that the system applies the rule for all codes in response to
that specific ICMP message.
- You can also specify an arbitrary code.
- The ICMP protocol contains definitions for the existing message code and
number pairs.
type: str
type: list
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures that the rule exists.
- When C(state) is C(absent), ensures that the rule is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a new rule in the foo firewall policy
bigip_firewall_rule:
name: foo
parent_policy: policy1
protocol: tcp
source:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- vlan: vlan1
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
destination:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
irule: irule1
action: accept
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an ICMP specific rule
bigip_firewall_rule:
name: foo
protocol: icmp
icmp_message:
type: 0
source:
- country: US
action: drop
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a new policy rule that uses an existing rule list
bigip_firewall_rule:
name: foo
parent_policy: foo_policy
rule_list: rule-list1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the rule.
returned: changed
type: str
sample: FooRule
parent_policy:
description: The policy which contains the rule to be managed.
returned: changed
type: str
sample: FooPolicy
parent_rule_list:
description: The rule list which contains the rule to be managed.
returned: changed
type: str
sample: FooRuleList
action:
description: The action for the firewall rule.
returned: changed
type: str
sample: drop
status:
description: The activity state of the rule or rule list.
returned: changed
type: str
sample: scheduled
schedule:
description: The schedule for the firewall rule.
returned: changed
type: str
sample: Foo_schedule
description:
description: The rule description.
returned: changed
type: str
sample: MyRule
irule:
description: The iRule that is applied to the firewall rule.
returned: changed
type: str
sample: _sys_auth_radius
protocol:
description: The protocol to which the rule applies.
returned: changed
type: str
sample: any
source:
description: The packet sources to which the rule applies
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
vlan:
description: Source VLANs for the packets.
returned: changed
type: str
sample: vlan1
sample: hash/dictionary of values
destination:
description: The packet destinations to which the rule applies.
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
sample: hash/dictionary of values
logging:
description: Enable or Disable logging for the firewall rule.
returned: changed
type: bool
sample: yes
rule_list:
description: An existing rule list to use in the parent policy.
returned: changed
type: str
sample: rule-list-1
icmp_message:
description: The (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses.
returned: changed
type: complex
contains:
type:
description: The type of ICMP message.
returned: changed
type: str
sample: 0
code:
description: The code returned in response to the specified ICMP message type.
returned: changed
type: str
sample: 1
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'ipProtocol': 'protocol',
'log': 'logging',
'icmp': 'icmp_message',
'ruleList': 'rule_list'
}
api_attributes = [
'irule',
'ipProtocol',
'log',
'schedule',
'status',
'destination',
'source',
'icmp',
'action',
'description',
'ruleList',
]
returnables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
'rule_list',
]
updatables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
'rule_list',
]
protocol_map = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
'58': 'icmpv6',
'255': 'any',
}
class ApiParameters(Parameters):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] == 'yes':
return True
return False
@property
def protocol(self):
if self._values['protocol'] is None:
return None
if self._values['protocol'] in self.protocol_map:
return self.protocol_map[self._values['protocol']]
return self._values['protocol']
@property
def source(self):
result = []
if self._values['source'] is None:
return None
v = self._values['source']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'vlans' in v:
result += [('vlan', x) for x in v['vlans']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', str(x['name'])) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
v = self._values['destination']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', x['name']) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = [x['name'] for x in self._values['icmp_message']]
return result
class ModuleParameters(Parameters):
@property
def irule(self):
if self._values['irule'] is None:
return None
if self._values['irule'] == '':
return ''
return fq_name(self.partition, self._values['irule'])
@property
def description(self):
if self._values['description'] is None:
return None
if self._values['description'] == '':
return ''
return self._values['description']
@property
def schedule(self):
if self._values['schedule'] is None:
return None
if self._values['schedule'] == '':
return ''
return fq_name(self.partition, self._values['schedule'])
@property
def source(self):
result = []
if self._values['source'] is None:
return None
for x in self._values['source']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'vlan' in x and x['vlan'] is not None:
result += [('vlan', fq_name(self.partition, x['vlan']))]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
for x in self._values['destination']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
type = x.get('type', '255')
code = x.get('code', '255')
if type is None or type == 'any':
type = '255'
if code is None or code == 'any':
code = '255'
if type == '255' and code == '255':
result.append("255")
elif type == '255' and code != '255':
raise F5ModuleError(
"A type of 'any' (255) requires a code of 'any'."
)
elif code == '255':
result.append(type)
else:
result.append('{0}:{1}'.format(type, code))
result = list(set(result))
return result
@property
def rule_list(self):
if self._values['rule_list'] is None:
return None
if self._values['parent_policy'] is not None:
return fq_name(self.partition, self._values['rule_list'])
return None
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] is True:
return "yes"
return "no"
@property
def source(self):
if self._values['source'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['source']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'vlan':
result['vlans'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['destination']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
result.append({'name': x})
return result
class ReportableChanges(Changes):
@property
def source(self):
if self._values['source'] is None:
return None
result = []
v = self._values['source']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['vlans']:
result += [('vlan', x) for x in v['vlans']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
@property
def destination(self):
if self._values['destination'] is None:
return None
result = []
v = self._values['destination']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def irule(self):
if self.want.irule is None:
return None
if self.have.irule is None and self.want.irule == '':
return None
if self.have.irule is None:
return self.want.irule
if self.want.irule != self.have.irule:
return self.want.irule
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.have.description is None:
return self.want.description
if self.want.description != self.have.description:
return self.want.description
@property
def source(self):
if self.want.source is None:
return None
if self.want.source is None and self.have.source is None:
return None
if self.have.source is None:
return self.want.source
if set(self.want.source) != set(self.have.source):
return self.want.source
@property
def destination(self):
if self.want.destination is None:
return None
if self.want.destination is None and self.have.destination is None:
return None
if self.have.destination is None:
return self.want.destination
if set(self.want.destination) != set(self.have.destination):
return self.want.destination
@property
def icmp_message(self):
if self.want.icmp_message is None:
return None
if self.want.icmp_message is None and self.have.icmp_message is None:
return None
if self.have.icmp_message is None:
return self.want.icmp_message
if set(self.want.icmp_message) != set(self.have.icmp_message):
return self.want.icmp_message
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
name = self.want.name
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
name.replace('/', '_')
)
resp = self.client.api.get(uri)
if resp.ok:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self.set_reasonable_creation_defaults()
if self.want.status == 'scheduled' and self.want.schedule is None:
raise F5ModuleError(
"A 'schedule' must be specified when 'status' is 'scheduled'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def set_reasonable_creation_defaults(self):
if self.want.action is None:
self.changes.update({'action': 'reject'})
if self.want.logging is None:
self.changes.update({'logging': False})
if self.want.status is None:
self.changes.update({'status': 'enabled'})
def create_on_device(self):
params = self.changes.api_params()
name = self.want.name
params['name'] = name.replace('/', '_')
params['partition'] = self.want.partition
params['placeAfter'] = 'last'
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
)
if self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
name = self.want.name
if self.want.parent_policy and self.want.rule_list:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
elif self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
name.replace('/', '_')
)
if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
if self.changes.protocol in ['icmp', 'icmpv6']:
self.changes.update({'source': {}})
self.changes.update({'destination': {}})
params = self.changes.api_params()
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
name = self.want.name
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
name.replace('/', '_')
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent_policy=dict(),
parent_rule_list=dict(),
logging=dict(type='bool'),
protocol=dict(),
irule=dict(),
description=dict(),
source=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
vlan=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country', 'vlan',
'port', 'port_range', 'port_list'
]]
),
destination=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country',
'port', 'port_range', 'port_list'
]]
),
action=dict(
choices=['accept', 'drop', 'reject', 'accept-decisively']
),
status=dict(
choices=['enabled', 'disabled', 'scheduled']
),
schedule=dict(),
rule_list=dict(),
icmp_message=dict(
type='list',
elements='dict',
options=dict(
type=dict(),
code=dict(),
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['rule_list', 'action'],
['rule_list', 'source'],
['rule_list', 'destination'],
['rule_list', 'irule'],
['rule_list', 'protocol'],
['rule_list', 'logging'],
['parent_policy', 'parent_rule_list']
]
self.required_one_of = [
['parent_policy', 'parent_rule_list']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
gpl-3.0
|
ihsanudin/odoo
|
addons/website_blog/__init__.py
|
373
|
1036
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
import wizard
|
agpl-3.0
|
vinayan3/clpricehistory
|
django/db/backends/sqlite3/introspection.py
|
160
|
5815
|
import re
from django.db.backends import BaseDatabaseIntrospection
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'max_length': int(m.group(1))})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li+1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
indexes = {}
for info in self._table_info(cursor, table_name):
indexes[info['name']] = {'primary_key': info['pk'] != 0,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name]['unique'] = True
return indexes
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
|
bsd-3-clause
|
drtuxwang/system-config
|
bin/battery.py
|
1
|
3916
|
#!/usr/bin/env python3
"""
Monitor laptop battery
"""
import argparse
import signal
import sys
from typing import List
import power_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_summary_flag(self) -> bool:
"""
Return summary flag.
"""
return self._args.summary_flag
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(description='Monitor laptop battery.')
parser.add_argument(
'-s',
action='store_true',
dest='summary_flag',
help='Show summary'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
@staticmethod
def _show_battery(battery: power_mod.Battery) -> None:
model = (
battery.get_oem() + ' ' + battery.get_name() + ' ' +
battery.get_type() + ' ' + str(battery.get_capacity_max()) +
'mAh/' + str(battery.get_voltage()) + 'mV'
)
if battery.get_charge() == '-':
state = '-'
if battery.get_rate() > 0:
state += str(battery.get_rate()) + 'mA'
if battery.get_voltage() > 0:
power = '{0:4.2f}'.format(float(
battery.get_rate()*battery.get_voltage()) / 1000000)
state += ', ' + str(power) + 'W'
hours = '{0:3.1f}'.format(float(
battery.get_capacity()) / battery.get_rate())
state += ', ' + str(hours) + 'h'
elif battery.get_charge() == '+':
state = '+'
if battery.get_rate() > 0:
state += str(battery.get_rate()) + 'mA'
if battery.get_voltage() > 0:
power = '{0:4.2f}'.format(float(
battery.get_rate()*battery.get_voltage()) / 1000000)
state += ', ' + str(power) + 'W'
else:
state = 'Unused'
print(
model + " = ", battery.get_capacity(),
"mAh [" + state + "]",
sep=""
)
@staticmethod
def _show_summary(batteries: List[power_mod.Battery]) -> None:
capacity = 0
rate = 0
for battery in batteries:
if battery.is_exist():
capacity += battery.get_capacity()
if battery.get_charge() == '-':
rate -= battery.get_rate()
elif battery.get_charge() == '+':
rate += battery.get_rate()
if capacity:
if rate:
print("{0:d}mAh [{1:+d}mAh]".format(capacity, rate))
else:
print("{0:d}mAh [Unused]".format(capacity))
def run(self) -> int:
"""
Start program
"""
options = Options()
batteries = power_mod.Battery.factory()
if options.get_summary_flag():
self._show_summary(batteries)
else:
for battery in batteries:
if battery.is_exist():
self._show_battery(battery)
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
|
gpl-2.0
|
jounex/hue
|
desktop/core/ext-py/Django-1.6.10/django/template/response.py
|
221
|
6214
|
from django.http import HttpResponse
from django.template import loader, Context, RequestContext
from django.utils import six
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
mimetype=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use tricky-to-debug problems
self.template_name = template
self.context_data = context
self._post_render_callbacks = []
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status,
mimetype)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = super(SimpleTemplateResponse, self).__getstate__()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, six.string_types):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Converts context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + \
['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, mimetype=None, current_app=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
self._current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, mimetype)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
return RequestContext(self._request, context, current_app=self._current_app)
|
apache-2.0
|
bonitadecker77/python-for-android
|
python3-alpha/python3-src/Lib/encodings/cp1006.py
|
272
|
13568
|
""" Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1006',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
'\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
'\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
'\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
'\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
'\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
'\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
'\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
'\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
'\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
'\u060c' # 0xAB -> ARABIC COMMA
'\u061b' # 0xAC -> ARABIC SEMICOLON
'\xad' # 0xAD -> SOFT HYPHEN
'\u061f' # 0xAE -> ARABIC QUESTION MARK
'\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
'\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
'\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
'\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
'\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
'\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
'\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
'\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
'\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
'\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
'\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
'\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
'\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
'\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
'\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
'\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
'\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
'\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
'\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
'\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
'\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
'\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
'\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
'\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
'\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
'\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
'\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
'\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
'\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
'\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
'\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
'\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
'\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
'\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
'\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
'\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
'\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
'\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
'\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
'\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
'\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
'\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
'\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
'\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
'\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
'\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
'\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
'\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
'\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
'\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
'\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
'\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
'\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
'\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
'\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
'\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
'\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
'\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
'\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
'\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
'\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
'\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
'\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
'\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
'\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
'\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
'\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
'\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
'\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
'\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
'\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
'\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
'\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
'\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
'\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
'\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
'\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
'\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
'\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
'\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
'\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
tafaRU/odoo
|
openerp/workflow/__init__.py
|
378
|
3793
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.workflow.service import WorkflowService
# The new API is in openerp.workflow.workflow_service
# OLD API of the Workflow
def clear_cache(cr, uid):
WorkflowService.clear_cache(cr.dbname)
def trg_write(uid, res_type, res_id, cr):
"""
Reevaluates the specified workflow instance. Thus if any condition for
a transition have been changed in the backend, then running ``trg_write``
will move the workflow over that transition.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).write()
def trg_trigger(uid, res_type, res_id, cr):
"""
Activate a trigger.
If a workflow instance is waiting for a trigger from another model, then this
trigger can be activated if its conditions are met.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).trigger()
def trg_delete(uid, res_type, res_id, cr):
"""
Delete a workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).delete()
def trg_create(uid, res_type, res_id, cr):
"""
Create a new workflow instance
:param res_type: the model name
:param res_id: the model instance id to own the created worfklow instance
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).create()
def trg_validate(uid, res_type, res_id, signal, cr):
"""
Fire a signal on a given workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:signal: the signal name to be fired
:param cr: a database cursor
"""
assert isinstance(signal, basestring)
return WorkflowService.new(cr, uid, res_type, res_id).validate(signal)
def trg_redirect(uid, res_type, res_id, new_rid, cr):
"""
Re-bind a workflow instance to another instance of the same model.
Make all workitems which are waiting for a (subflow) workflow instance
for the old resource point to the (first active) workflow instance for
the new resource.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param new_rid: the model instance id to own the worfklow instance
:param cr: a database cursor
"""
assert isinstance(new_rid, (long, int))
return WorkflowService.new(cr, uid, res_type, res_id).redirect(new_rid)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/user_api/management/tests/test_cancel_retirement.py
|
5
|
3047
|
"""
Test the cancel_user_retirement_request management command
"""
import pytest
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.management import CommandError, call_command
from openedx.core.djangoapps.user_api.accounts.tests.retirement_helpers import ( # pylint: disable=unused-import
logged_out_retirement_request,
setup_retirement_states
)
from openedx.core.djangoapps.user_api.models import RetirementState, UserRetirementRequest, UserRetirementStatus
from common.djangoapps.student.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def test_successful_cancellation(setup_retirement_states, logged_out_retirement_request, capsys): # pylint: disable=redefined-outer-name, unused-argument
"""
Test a successfully cancelled retirement request.
"""
call_command('cancel_user_retirement_request', logged_out_retirement_request.original_email)
output = capsys.readouterr().out
# Confirm that no retirement status exists for the user.
with pytest.raises(UserRetirementStatus.DoesNotExist):
UserRetirementStatus.objects.get(original_email=logged_out_retirement_request.user.email)
# Confirm that no retirement request exists for the user.
with pytest.raises(UserRetirementRequest.DoesNotExist):
UserRetirementRequest.objects.get(user=logged_out_retirement_request.user)
# Ensure user can be retrieved using the original email address.
user = User.objects.get(email=logged_out_retirement_request.original_email)
# Ensure the user has a usable password so they can go through the reset flow
assert not user.password.startswith(UNUSABLE_PASSWORD_PREFIX)
assert "Successfully cancelled retirement request for user with email address" in output
assert logged_out_retirement_request.original_email in output
def test_cancellation_in_unrecoverable_state(setup_retirement_states, logged_out_retirement_request): # pylint: disable=redefined-outer-name, unused-argument
"""
Test a failed cancellation of a retirement request due to the retirement already beginning.
"""
retiring_lms_state = RetirementState.objects.get(state_name='RETIRING_LMS')
logged_out_retirement_request.current_state = retiring_lms_state
logged_out_retirement_request.save()
with pytest.raises(CommandError, match=r'Retirement requests can only be cancelled for users in the PENDING state'):
call_command('cancel_user_retirement_request', logged_out_retirement_request.original_email)
def test_cancellation_unknown_email_address(setup_retirement_states, logged_out_retirement_request): # pylint: disable=redefined-outer-name, unused-argument
"""
Test attempting to cancel a non-existent request of a user.
"""
user = UserFactory()
with pytest.raises(CommandError, match=r'No retirement request with email address'):
call_command('cancel_user_retirement_request', user.email)
|
agpl-3.0
|
karllessard/tensorflow
|
tensorflow/python/keras/layers/preprocessing/text_vectorization.py
|
1
|
29394
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = category_encoding.TFIDF
INT = category_encoding.INT
BINARY = category_encoding.BINARY
COUNT = category_encoding.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1. standardize each sample (usually lowercasing + punctuation stripping)
2. split each sample into substrings (usually words)
3. recombine substrings into tokens (usually ngrams)
4. index tokens (associate a unique int value with each token)
5. transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to", "split],
["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens -
1 - (1 if output == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token. When output == "int", 0 is reserved for masked locations;
this reduces the vocab size to max_tokens-2 instead of max_tokens-1
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
vocabulary: An optional list of vocabulary terms, or a path to a text file
containing a vocabulary to load into this layer. The file should contain
one token per line. If the list or file contains the same token multiple
times, an error will be thrown.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"])
>>> max_features = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> embedding_dims = 2
>>>
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len)
>>>
>>> # Now that the vocab layer has been created, call `adapt` on the text-only
>>> # dataset to create the vocabulary. You don't have to batch, but for large
>>> # datasets this means we're not keeping spare copies of the dataset.
>>> vectorize_layer.adapt(text_dataset.batch(64))
>>>
>>> # Create the model that uses the vectorize text layer
>>> model = tf.keras.models.Sequential()
>>>
>>> # Start by creating an explicit input layer. It needs to have a shape of
>>> # (1,) (because we need to guarantee that there is exactly one string
>>> # input per batch), and the dtype needs to be 'string'.
>>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
>>>
>>> # The first layer in our model is the vectorization layer. After this
>>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
>>> # indices.
>>> model.add(vectorize_layer)
>>>
>>> # Now, the model can map strings to integers, and you can add an embedding
>>> # layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> model.predict(input_data)
array([[2, 1, 4, 0],
[1, 3, 0, 0]])
Example:
This example instantiates a TextVectorization layer by passing a list
of vocabulary terms to the layer's __init__ method.
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = get_layer_class()(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>>
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
vocabulary=None,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
self._vocab_size = 0
self._called = False
super(TextVectorization, self).__init__(
combiner=None,
**kwargs)
base_preprocessing_layer._kpl_gauge.get_cell("V2").set("TextVectorization")
mask_token = "" if output_mode in [None, INT] else None
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens, mask_token=mask_token, vocabulary=vocabulary)
# If this layer is configured for string or integer output, we do not
# create a vectorization layer (as the output is not vectorized).
if self._output_mode in [None, INT]:
self._vectorize_layer = None
else:
if max_tokens is not None and self._pad_to_max:
max_elements = max_tokens
else:
max_elements = None
self._vectorize_layer = self._get_vectorization_class()(
max_tokens=max_elements, output_mode=self._output_mode)
# These are V1/V2 shim points. There are V1 implementations in the V1 class.
def _get_vectorization_class(self):
return category_encoding.CategoryEncoding
def _get_index_lookup_class(self):
return string_lookup.StringLookup
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
if len(input_shape) == 1:
input_shape = tuple(input_shape) + (1,)
return tensor_shape.TensorShape(input_shape)
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
if len(input_shape) == 1:
input_shape = input_shape + [self._output_sequence_length]
else:
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64 if self._output_mode == INT else K.floatx()
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
as a NumPy array, a string tensor, or as a list of texts.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, (list, tuple, np.ndarray)):
data = ops.convert_to_tensor_v2_with_dispatch(data)
if isinstance(data, ops.Tensor):
if data.shape.rank == 1:
data = array_ops.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._preprocess(data)
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 0:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, 0))
shape = dataset_ops.get_legacy_output_shapes(data)
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or an array as input, got {}".format(
type(data)))
self._index_lookup_layer.adapt(preprocessed_inputs)
if self._vectorize_layer:
if isinstance(data, ops.Tensor):
integer_data = self._index_lookup_layer(preprocessed_inputs)
else:
integer_data = preprocessed_inputs.map(self._index_lookup_layer)
self._vectorize_layer.adapt(integer_data)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
# This does not include the 'vocabulary' arg, since if the vocab was passed
# at init time it's now stored in variable state - we don't need to
# pull it off disk again.
config = {
"max_tokens": self._max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self,
vocab,
df_data=None,
oov_df_value=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and DF data for this layer directly, instead
of analyzing a dataset through 'adapt'. It should be used whenever the vocab
(and optionally document frequency) information is already known. If
vocabulary data is already present in the layer, this method will replace
it.
Arguments:
vocab: An array of string tokens.
df_data: An array of document frequency data. Only necessary if the layer
output_mode is TFIDF.
oov_df_value: The document frequency of the OOV token. Only necessary if
output_mode is TFIDF.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self._output_mode != TFIDF and df_data is not None:
raise ValueError("df_data should only be set if output_mode is TFIDF. "
"output_mode is %s." % self._output_mode)
if (self._output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self._pad_to_max):
raise RuntimeError(("When using TextVectorization in {mode} mode and "
"pad_to_max_tokens is False, the vocabulary cannot "
"be changed after the layer is "
"called.").format(mode=self._output_mode))
self._index_lookup_layer.set_vocabulary(vocab)
# When doing raw or integer output, we don't have a Vectorize layer to
# manage. In this case, we can return directly.
if self._output_mode in [None, INT]:
return
if not self._pad_to_max or self._max_tokens is None:
num_tokens = self._index_lookup_layer.vocab_size()
self._vectorize_layer.set_num_elements(num_tokens)
if self._output_mode == TFIDF:
if df_data is None:
raise ValueError("df_data must be set if output_mode is TFIDF")
if len(vocab) != len(df_data):
raise ValueError("df_data must be the same length as vocab. "
"len(df_data) is %s, len(vocab) is %s" %
(len(vocab), len(df_data)))
if oov_df_value is None:
raise ValueError("You must pass an oov_df_value when output_mode is "
"TFIDF.")
df_data = self._convert_to_ndarray(df_data)
if not isinstance(oov_df_value, np.ndarray):
oov_df_value = np.array([oov_df_value])
df_data = np.insert(df_data, 0, oov_df_value)
self._vectorize_layer.set_tfidf_data(df_data)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None:
if input_shape.ndims > 1 and not input_shape[-1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the innermost "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(
updates[_VOCAB_NAME],
updates[_IDF_NAME],
updates[_OOV_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if tf_utils.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
if inputs.shape.ndims > 1:
inputs = array_ops.squeeze(inputs, axis=-1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
self._called = True
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
indexed_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if tf_utils.is_ragged(indexed_data):
dense_data = indexed_data.to_tensor(default_value=0)
else:
dense_data = indexed_data
if self._output_sequence_length is None:
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_shape = output_tensor.shape.as_list()
output_shape[-1] = self._output_sequence_length
output_tensor.set_shape(tensor_shape.TensorShape(output_shape))
return output_tensor
# If we're not returning integers here, we rely on the vectorization layer
# to create the output.
return self._vectorize_layer(indexed_data)
|
apache-2.0
|
ofermend/medicare-demo
|
socialite/jython/Lib/test/test_structseq.py
|
34
|
2972
|
import unittest
from test import test_support
import time
class StructSeqTest(unittest.TestCase):
def test_tuple(self):
t = time.gmtime()
astuple = tuple(t)
self.assertEqual(len(t), len(astuple))
self.assertEqual(t, astuple)
# Check that slicing works the same way; at one point, slicing t[i:j] with
# 0 < i < j could produce NULLs in the result.
for i in xrange(-len(t), len(t)):
self.assertEqual(t[i:], astuple[i:])
for j in xrange(-len(t), len(t)):
self.assertEqual(t[i:j], astuple[i:j])
for j in xrange(-len(t), len(t)):
self.assertEqual(t[:j], astuple[:j])
self.assertRaises(IndexError, t.__getitem__, -len(t)-1)
self.assertRaises(IndexError, t.__getitem__, len(t))
for i in xrange(-len(t), len(t)-1):
self.assertEqual(t[i], astuple[i])
def test_repr(self):
t = time.gmtime()
repr(t)
def test_concat(self):
t1 = time.gmtime()
t2 = t1 + tuple(t1)
for i in xrange(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
def test_repeat(self):
t1 = time.gmtime()
t2 = 3 * t1
for i in xrange(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
self.assertEqual(t2[i], t2[i+2*len(t1)])
def test_contains(self):
t1 = time.gmtime()
for item in t1:
self.assert_(item in t1)
self.assert_(-42 not in t1)
def test_hash(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1)))
def test_cmp(self):
t1 = time.gmtime()
t2 = type(t1)(t1)
self.assertEqual(t1, t2)
self.assert_(not (t1 < t2))
self.assert_(t1 <= t2)
self.assert_(not (t1 > t2))
self.assert_(t1 >= t2)
self.assert_(not (t1 != t2))
def test_fields(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_fields)
self.assertEqual(t.n_fields, t.n_sequence_fields+t.n_unnamed_fields)
def test_constructor(self):
t = time.struct_time
self.assertRaises(TypeError, t)
self.assertRaises(TypeError, t, None)
self.assertRaises(TypeError, t, "123")
self.assertRaises(TypeError, t, "123", dict={})
self.assertRaises(TypeError, t, "123456789", dict=None)
s = "123456789"
self.assertEqual("".join(t(s)), s)
def test_eviltuple(self):
class Exc(Exception):
pass
# Devious code could crash structseqs' contructors
class C:
def __getitem__(self, i):
raise Exc
def __len__(self):
return 9
self.assertRaises(Exc, time.struct_time, C())
def test_reduce(self):
t = time.gmtime()
x = t.__reduce__()
def test_main():
test_support.run_unittest(StructSeqTest)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
gclenaghan/scikit-learn
|
sklearn/datasets/california_housing.py
|
28
|
3864
|
"""California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from .base import _pkl_filepath
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, TARGET_FILENAME)
if not exists(filepath):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, filepath, compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(filepath)
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
|
bsd-3-clause
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/gis/geos/prototypes/__init__.py
|
244
|
1319
|
"""
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import create_cs, get_cs, \
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz, \
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import from_hex, from_wkb, from_wkt, \
create_point, create_linestring, create_linearring, create_polygon, create_collection, \
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone, \
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid, \
get_dims, get_num_coords, get_num_geoms, \
to_hex, to_wkb, to_wkt
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import *
# Predicates
from django.contrib.gis.geos.prototypes.predicates import geos_hasz, geos_isempty, \
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses, \
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects, \
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
|
bsd-3-clause
|
j-fuentes/werkzeug
|
tests/contrib/test_sessions.py
|
30
|
1698
|
# -*- coding: utf-8 -*-
"""
tests.sessions
~~~~~~~~~~~~~~
Added tests for the sessions.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from tempfile import gettempdir
from werkzeug.contrib.sessions import FilesystemSessionStore
def test_default_tempdir():
store = FilesystemSessionStore()
assert store.path == gettempdir()
def test_basic_fs_sessions(tmpdir):
store = FilesystemSessionStore(str(tmpdir))
x = store.new()
assert x.new
assert not x.modified
x['foo'] = [1, 2, 3]
assert x.modified
store.save(x)
x2 = store.get(x.sid)
assert not x2.new
assert not x2.modified
assert x2 is not x
assert x2 == x
x2['test'] = 3
assert x2.modified
assert not x2.new
store.save(x2)
x = store.get(x.sid)
store.delete(x)
x2 = store.get(x.sid)
# the session is not new when it was used previously.
assert not x2.new
def test_non_urandom(tmpdir):
urandom = os.urandom
del os.urandom
try:
store = FilesystemSessionStore(str(tmpdir))
store.new()
finally:
os.urandom = urandom
def test_renewing_fs_session(tmpdir):
store = FilesystemSessionStore(str(tmpdir), renew_missing=True)
x = store.new()
store.save(x)
store.delete(x)
x2 = store.get(x.sid)
assert x2.new
def test_fs_session_lising(tmpdir):
store = FilesystemSessionStore(str(tmpdir), renew_missing=True)
sessions = set()
for x in range(10):
sess = store.new()
store.save(sess)
sessions.add(sess.sid)
listed_sessions = set(store.list())
assert sessions == listed_sessions
|
bsd-3-clause
|
KyleJamesWalker/ansible
|
lib/ansible/modules/network/f5/bigip_pool.py
|
41
|
20012
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_pool
short_description: "Manages F5 BIG-IP LTM pools"
description:
- Manages F5 BIG-IP LTM pools via iControl SOAP API
version_added: 1.2
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
requirements:
- bigsuds
options:
description:
description:
- Specifies descriptive text that identifies the pool.
required: false
version_added: "2.3"
state:
description:
- Pool/pool member state
required: false
default: present
choices:
- present
- absent
aliases: []
name:
description:
- Pool name
required: true
default: null
choices: []
aliases:
- pool
partition:
description:
- Partition of pool/pool member
required: false
default: 'Common'
choices: []
aliases: []
lb_method:
description:
- Load balancing method
version_added: "1.3"
required: False
default: 'round_robin'
choices:
- round_robin
- ratio_member
- least_connection_member
- observed_member
- predictive_member
- ratio_node_address
- least_connection_node_address
- fastest_node_address
- observed_node_address
- predictive_node_address
- dynamic_ratio
- fastest_app_response
- least_sessions
- dynamic_ratio_member
- l3_addr
- weighted_least_connection_member
- weighted_least_connection_node_address
- ratio_session
- ratio_least_connection_member
- ratio_least_connection_node_address
aliases: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "1.3"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on
newly added or freshly detected up pool members
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
reselect_tries:
description:
- Sets the number of times the system tries to contact a pool member
after a passive failure
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
service_down_action:
description:
- Sets the action to take when node goes down in pool
version_added: "1.3"
required: False
default: null
choices:
- none
- reset
- drop
- reselect
aliases: []
host:
description:
- "Pool member IP"
required: False
default: null
choices: []
aliases:
- address
port:
description:
- Pool member port
required: False
default: null
choices: []
aliases: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Create pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "least_connection_member"
slow_ramp_time: 120
delegate_to: localhost
- name: Modify load balancer method
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "round_robin"
- name: Add pool member
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
- name: Remove pool member from pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
- name: Delete pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
'''
RETURN = '''
'''
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_pool(api, pool, lb_method):
# create requires lb_method but we don't want to default
# to a value on subsequent runs
if not lb_method:
lb_method = 'round_robin'
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
members=[[]])
def remove_pool(api, pool):
api.LocalLB.Pool.delete_pool(pool_names=[pool])
def get_lb_method(api, pool):
lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def set_lb_method(api, pool, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
def get_monitors(api, pool):
result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
def get_slow_ramp_time(api, pool):
result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
return result
def set_slow_ramp_time(api, pool, seconds):
api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
def get_reselect_tries(api, pool):
result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0]
return result
def set_reselect_tries(api, pool, tries):
api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries])
def get_action_on_service_down(api, pool):
result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
return result
def set_action_on_service_down(api, pool, action):
action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
try:
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
# genuine exception
raise
return result
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
def set_description(api, pool, description):
api.LocalLB.Pool.set_description(
pool_names=[pool], descriptions=[description]
)
def get_description(api, pool):
return api.LocalLB.Pool.get_description(pool_names=[pool])[0]
def main():
lb_method_choices = ['round_robin', 'ratio_member',
'least_connection_member', 'observed_member',
'predictive_member', 'ratio_node_address',
'least_connection_node_address',
'fastest_node_address', 'observed_node_address',
'predictive_node_address', 'dynamic_ratio',
'fastest_app_response', 'least_sessions',
'dynamic_ratio_member', 'l3_addr',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
'ratio_least_connection_node_address']
monitor_type_choices = ['and_list', 'm_of_n']
service_down_choices = ['none', 'reset', 'drop', 'reselect']
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True, aliases=['pool']),
lb_method=dict(type='str', choices=lb_method_choices),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list'),
slow_ramp_time=dict(type='int'),
reselect_tries=dict(type='int'),
service_down_action=dict(type='str', choices=service_down_choices),
host=dict(type='str', aliases=['address']),
port=dict(type='int'),
description=dict(type='str')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
)
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
description = module.params['description']
name = module.params['name']
pool = fq_name(partition, name)
lb_method = module.params['lb_method']
if lb_method:
lb_method = lb_method.lower()
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
slow_ramp_time = module.params['slow_ramp_time']
reselect_tries = module.params['reselect_tries']
service_down_action = module.params['service_down_action']
if service_down_action:
service_down_action = service_down_action.lower()
host = module.params['host']
address = fq_name(partition, host)
port = module.params['port']
# sanity check user supplied values
if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
if port is not None and (0 > port or port > 65535):
module.fail_json(msg="valid ports must be in range 0 - 65535")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if host and port and pool:
# member removal takes precedent
if pool_exists(api, pool) and member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
deleted = delete_node_address(api, address)
result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
elif pool_exists(api, pool):
# no host/port supplied, must be pool removal
if not module.check_mode:
# hack to handle concurrent runs of module
# pool might be gone before we actually remove it
try:
remove_pool(api, pool)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = {'changed': False}
else:
# genuine exception
raise
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
update = False
if not pool_exists(api, pool):
# pool does not exist -- need to create it
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the pool doesn't exist,
# it may exist by the time we run create_pool().
# this catches the exception and does something smart
# about it!
try:
create_pool(api, pool, lb_method)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
update = True
else:
# genuine exception
raise
else:
if monitors:
set_monitors(api, pool, monitor_type, quorum, monitors)
if slow_ramp_time:
set_slow_ramp_time(api, pool, slow_ramp_time)
if reselect_tries:
set_reselect_tries(api, pool, reselect_tries)
if service_down_action:
set_action_on_service_down(api, pool, service_down_action)
if host and port:
add_pool_member(api, pool, address, port)
if description:
set_description(api, pool, description)
else:
# check-mode return value
result = {'changed': True}
else:
# pool exists -- potentially modify attributes
update = True
if update:
if lb_method and lb_method != get_lb_method(api, pool):
if not module.check_mode:
set_lb_method(api, pool, lb_method)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, pool, monitor_type, quorum, monitors)
result = {'changed': True}
if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool):
if not module.check_mode:
set_slow_ramp_time(api, pool, slow_ramp_time)
result = {'changed': True}
if reselect_tries and reselect_tries != get_reselect_tries(api, pool):
if not module.check_mode:
set_reselect_tries(api, pool, reselect_tries)
result = {'changed': True}
if service_down_action and service_down_action != get_action_on_service_down(api, pool):
if not module.check_mode:
set_action_on_service_down(api, pool, service_down_action)
result = {'changed': True}
if (host and port) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if (host and port == 0) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if description and description != get_description(api, pool):
if not module.check_mode:
set_description(api, pool, description)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
iAMr00t/opencog
|
opencog/python/learning/bayesian_learning/dynamics.py
|
34
|
2843
|
from decimal import Decimal
from random import random as rand
from learning.bayesian_learning.network import Row
from sample_data.uci_adult_dataset import main
__author__ = 'keyvan'
def _givens_and_negations_from(kwargs):
if kwargs.has_key('givens'):
givens = set(kwargs['givens'])
else:
givens=set()
if kwargs.has_key('negations'):
negations = set(kwargs['negations'])
else:
negations=set()
return givens, negations
class DataObserver(list):
"""
Container with the ability of calculating probabilities
In each time-step, only add the set of the names of the variables
that had value of True in that time-step (all the other variables
are considered false for this step, open-world assumption)
"""
epsilon = 0.00001
def probability_of(self, variable, value=True, **kwargs):
"""
Calculates probability for value of True or False by
having given and negation parameters, based on perceived
data.
e.g P(A|B,C) : probability_of('A', givens=['B','C'])
P(~A|B,~C) : probability_of('A', False, givens=['B'], negations=['C'])
"""
givens, negations = _givens_and_negations_from(kwargs)
matched, total = 0, Decimal(0)
for record in self:
if givens.issubset(record) and len(negations & record) == 0:
total += 1
if record[variable] is value:
matched += 1
if not total:
return total
return matched / total
TEST_VARIABLES = ['A','B','C','D','E']
A,B,C,D,E = TEST_VARIABLES
def generate_test_record():
"""
Bayes net: A->C , B->C, B->D, E is independent
By current probability distribution:
P(C) = 0.46 = Marginalising over all possible configurations
C and D are 'conditionally independent' given B
"""
record = Row()
if rand() < 0.5: # P(A) = 0.5
record.add(A)
if rand() < 0.6: # P(B) = 0.6
record.add(B)
if record == set([A,B]):
if rand() < 0.9: # P(C|A,B) = 0.9
record.add(C)
elif record == set(A):
if rand() < 0.4: # P(C|A,~B) = 0.4
record.add(C)
elif record == set(B):
if rand() < 0.3: # P(C|~A,B) = 0.3
record.add(C)
else:
if rand() < 0.1: # P(C|~A,~B) = 0.1
record.add(C)
if B in record:
if rand() < 0.6: # P(D|B) = 0.6
record.add(D)
if rand() < 0.4:
record.add(E) # P(E) = 0.4, independent of the rest
return record
def generate_test_dataset(dataset_size):
data = DataObserver()
for i in range(dataset_size):
data.append(main.uci_adult())
return data
#print data.probability_of('C', givens=[A], negations=[B])
# print data.probability_of('D', givens=['A','B'])
|
agpl-3.0
|
entropyx/callme
|
callme/proxy.py
|
1
|
9608
|
# Copyright (c) 2009-2014, Christian Haintz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of callme nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import socket
import time
import uuid
import kombu
from callme import base
from callme import exceptions as exc
from callme import protocol as pr
LOG = logging.getLogger(__name__)
REQUEST_TIMEOUT = 60
class Proxy(base.Base):
"""This Proxy class is used to handle the communication with the rpc
server.
:keyword server_id: default id of the Server (can be declared later
see :func:`use_server`)
:keyword amqp_host: the host of where the AMQP Broker is running
:keyword amqp_user: the username for the AMQP Broker
:keyword amqp_password: the password for the AMQP Broker
:keyword amqp_vhost: the virtual host of the AMQP Broker
:keyword amqp_port: the port of the AMQP Broker
:keyword ssl: use SSL connection for the AMQP Broker
:keyword timeout: default timeout for calls in seconds
:keyword durable: make all exchanges and queues durable
:keyword auto_delete: delete server queues after all connections are closed
not applicable for client queues
"""
def __init__(self,
server_exchange_name,
server_queue_name=None,
server_routing_key=None,
amqp_host='localhost',
amqp_user='guest',
amqp_password='guest',
amqp_vhost='/',
amqp_port=5672,
ssl=False,
timeout=REQUEST_TIMEOUT,
durable=False,
auto_delete=True,
):
super(Proxy, self).__init__(amqp_host, amqp_user, amqp_password,
amqp_vhost, amqp_port, ssl)
self._uuid = str(uuid.uuid4())
self._server_exchange_name = server_exchange_name
self._server_queue_name = server_queue_name
self._server_routing_key = server_routing_key
self._timeout = timeout
self._is_received = False
self._corr_id = None
self._response = None
self._exchange_name = 'client_{0}_ex_{1}'.format(self._server_exchange_name, self._uuid)
self._queue_name = 'client_{0}_queue_{1}'.format(self._server_queue_name, self._uuid) if self._server_queue_name else ''
self._durable = durable
self._auto_delete = auto_delete
# create queue
queue = self._make_queue(self._queue_name, None,
durable=self._durable,
auto_delete=True)
# create consumer
consumer = kombu.Consumer(channel=self._conn,
queues=queue,
callbacks=[self._on_response],
accept=['pickle'])
consumer.consume()
def use_server(self, exchange_name=None, queue_name=None, timeout=None):
"""Use the specified server and set an optional timeout for the method
call.
Typical use:
>> my_proxy.use_server('foo_exchange','foo.receive').a_remote_func()
:keyword exchange_name: the exchange_name where the call will be made
:keyword queue_name: the queue_name where the call will be made
:keyword timeout: set or overrides the call timeout in seconds
:rtype: return `self` to cascade further calls
"""
if exchange_name is not None:
self._server_exchange_name= exchange_name
if queue_name is not None:
self._server_queue_name= queue_name
if timeout is not None:
self._timeout = timeout
return self
def _on_response(self, response, message):
"""This method is automatically called when a response is incoming and
decides if it is the message we are waiting for - the message with the
result.
:param response: the body of the amqp message already deserialized
by kombu
:param message: the plain amqp kombu.message with additional
information
"""
LOG.debug("Got response: {0}".format(response))
try:
message.ack()
except Exception:
LOG.exception("Failed to acknowledge AMQP message.")
else:
LOG.debug("AMQP message acknowledged.")
# check response type
if not isinstance(response, pr.RpcResponse):
LOG.warning("Response is not a `RpcResponse` instance.")
return
# process response
try:
if self._corr_id == message.properties['correlation_id']:
self._response = response
self._is_received = True
except KeyError:
LOG.error("Message has no `correlation_id` property.")
def __request(self, func_name, func_args, func_kwargs):
"""The remote-method-call execution function.
:param func_name: name of the method that should be executed
:param func_args: arguments for the remote-method
:param func_kwargs: keyword arguments for the remote-method
:type func_name: string
:type func_args: list of parameters
:rtype: result of the method
"""
self._corr_id = str(uuid.uuid4())
request = pr.RpcRequest(func_name, func_args, func_kwargs)
LOG.debug("Publish request: {0}".format(request))
# publish request
with kombu.producers[self._conn].acquire(block=True) as producer:
type = 'topic'
exchange = self._make_exchange(
self._server_exchange_name,
type=type,
durable=self._durable,
auto_delete=self._auto_delete)
producer.publish(body=request,
serializer='pickle',
exchange=exchange,
reply_to=self._queue_name,
correlation_id=self._corr_id,
routing_key=self._server_routing_key)
# start waiting for the response
self._wait_for_result()
self._is_received = False
# handler response
result = self._response.result
LOG.debug("Result: {!r}".format(result))
if self._response.is_exception:
raise result
return result
def _wait_for_result(self):
"""Waits for the result from the server, checks every second if
a timeout occurred. If a timeout occurred - the `RpcTimeout` exception
will be raised.
"""
start_time = time.time()
while not self._is_received:
try:
self._conn.drain_events(timeout=1)
except socket.timeout:
if self._timeout > 0:
if time.time() - start_time > self._timeout:
raise exc.RpcTimeout("RPC Request timeout")
def __getattr__(self, name):
"""This method is invoked, if a method is being called, which doesn't
exist on Proxy. It is used for RPC, to get the function which should
be called on the Server.
"""
# magic method dispatcher
LOG.debug("Recursion: {0}".format(name))
return _Method(self.__request, name)
# ===========================================================================
class _Method(object):
"""This class is used to realize remote-method-calls.
:param send: name of the function that should be executed on Proxy
:param name: name of the method which should be called on the Server
"""
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self._send = send
self._name = name
def __getattr__(self, name):
return _Method(self._send, "{0}.{1}".format(self._name, name))
def __call__(self, *args, **kw):
return self._send(self._name, args, kw)
# ===========================================================================
|
bsd-3-clause
|
bitxbay/BitXBay
|
electrum/socks.py
|
65
|
16177
|
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
import struct
import sys
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
|
gpl-3.0
|
GabrielBrascher/cloudstack
|
test/selenium/smoke/TemplatesAndISO.py
|
8
|
6757
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
ISO PART YET TO BE ADDED:: remove this after adding it.
'''
import sys, os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib'))
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time
import initialize
import Global_Locators
class Template_Add(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.verificationErrors = []
def test_templateadd(self):
driver = self.driver
## Action part
#Make sure you are on Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(2)
# Go to Templates
driver.find_element_by_xpath(Global_Locators.templates_xpath).click()
#Select Template from drop down list
driver.find_element_by_xpath(Global_Locators.template_xpath).click()
# Add Template
driver.find_element_by_xpath(Global_Locators.AddTemplate_xpath).click()
# Following have names.. so they do not have their global entries.
driver.find_element_by_id("label_name").clear()
driver.find_element_by_id("label_name").send_keys("Test Template Ubuntu")
driver.find_element_by_id("label_description").clear()
driver.find_element_by_id("label_description").send_keys("Ubuntu 10.04")
driver.find_element_by_id("URL").clear()
driver.find_element_by_id("URL").send_keys("http://nfs1.lab.vmops.com/templates/Ubuntu/Ubuntuu-10-04-64bit-server.vhd")
Select(driver.find_element_by_id("label_os_type")).select_by_visible_text("Ubuntu 10.04 (64-bit)")
driver.find_element_by_id("label_public").click()
driver.find_element_by_id("label_featured").click()
driver.find_element_by_xpath("//button[@type='button']").click()
time.sleep(2)
# Go to Dash Board
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(600)
##Verification will be if this offering shows up into table and we can actually edit it.
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
class Template_Edit(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.verificationErrors = []
def test_templateedit(self):
driver = self.driver
## Action part
#Make sure you are on Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(2)
# Go to Templates
driver.find_element_by_xpath(Global_Locators.templates_xpath).click()
#Select Template from drop down list
driver.find_element_by_xpath(Global_Locators.template_xpath).click()
linkclass = None
linkclass = driver.find_elements_by_xpath(Global_Locators.template_table_xpath) # This returns a list
for link in linkclass:
if link.text == "Test Template Ubuntu": # We will search for our VM in this table
link.click()
time.sleep(2)
# Change name
driver.find_element_by_name("name").clear()
driver.find_element_by_name("name").send_keys("Test template")
# Change Description
driver.find_element_by_name("displaytext").clear()
driver.find_element_by_name("displaytext").send_keys("ubuntu")
driver.find_element_by_css_selector(Global_Locators.template_editdone_css).click()
time.sleep(2)
#Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(10)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
# Now we will find this offering and delete it!!
class Template_Delete(unittest.TestCase):
def setUp(self):
self.driver = initialize.getOrCreateWebdriver()
self.verificationErrors = []
def test_templatedelete(self):
driver = self.driver
## Action part
#Make sure you are on Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(2)
# Go to Templates
driver.find_element_by_xpath(Global_Locators.templates_xpath).click()
#Select Template from drop down list
driver.find_element_by_xpath(Global_Locators.template_xpath).click()
linkclass = None
linkclass = driver.find_elements_by_xpath(Global_Locators.template_table_xpath) # This returns a list
for link in linkclass:
if link.text == "Test Template": # We will search for our VM in this table
link.click()
time.sleep(2)
driver.find_element_by_css_selector(Gloabl_Locators.template_delete_css).click()
driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click()
time.sleep(2)
#Dashboard
driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click()
time.sleep(20)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
|
apache-2.0
|
etamponi/resilient-protocol
|
resilient/ensemble.py
|
1
|
6786
|
import hashlib
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn.utils.fixes import unique
from sklearn import preprocessing
from sklearn.utils.random import check_random_state
from resilient.logger import Logger
from resilient.selection_strategies import SelectBestPercent
from resilient.train_set_generators import RandomCentroidPDFTrainSetGenerator
from resilient.weighting_strategies import CentroidBasedWeightingStrategy
__author__ = 'Emanuele Tamponi <emanuele.tamponi@diee.unica.it>'
MAX_INT = numpy.iinfo(numpy.int32).max
class TrainingStrategy(BaseEstimator):
def __init__(self,
base_estimator=DecisionTreeClassifier(max_features='auto'),
train_set_generator=RandomCentroidPDFTrainSetGenerator(),
random_sample=None):
self.base_estimator = base_estimator
self.train_set_generator = train_set_generator
self.random_sample = random_sample
def train_estimators(self, n, inp, y, weighting_strategy, random_state):
classifiers = []
weight_generator = self.train_set_generator.get_sample_weights(
n, inp, y, random_state
)
for i, weights in enumerate(weight_generator):
if self.random_sample is not None:
ix = random_state.choice(
len(y),
size=int(self.random_sample*len(y)),
p=weights, replace=True
)
weights = numpy.bincount(ix, minlength=len(y))
s = weights.sum()
weights = numpy.array([float(w) / s for w in weights])
Logger.get().write("!Training estimator:", (i+1))
est = self._make_estimator(inp, y, weights, random_state)
weighting_strategy.add_estimator(est, inp, y, weights)
classifiers.append(est)
return classifiers
def _make_estimator(self, inp, y, sample_weights, random_state):
seed = random_state.randint(MAX_INT)
est = clone(self.base_estimator)
est.set_params(random_state=check_random_state(seed))
est.fit(inp, y, sample_weight=sample_weights)
return est
class ResilientEnsemble(BaseEstimator, ClassifierMixin):
def __init__(self,
pipeline=None,
n_estimators=10,
training_strategy=TrainingStrategy(),
weighting_strategy=CentroidBasedWeightingStrategy(),
selection_strategy=SelectBestPercent(),
multiply_by_weight=False,
use_prob=True,
random_state=None):
self.pipeline = pipeline
self.n_estimators = n_estimators
self.training_strategy = training_strategy
self.weighting_strategy = weighting_strategy
self.selection_strategy = selection_strategy
self.multiply_by_weight = multiply_by_weight
self.use_prob = use_prob
self.random_state = random_state
# Training time attributes
self.classes_ = None
self.n_classes_ = None
self.classifiers_ = None
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.random_state_ = None
def fit(self, inp, y):
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.classes_, y = unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
self.random_state_ = check_random_state(self.random_state)
if self.pipeline is not None:
inp = self.pipeline.fit_transform(inp)
self.weighting_strategy.prepare(inp, y)
self.classifiers_ = self.training_strategy.train_estimators(
self.n_estimators, inp, y,
self.weighting_strategy, self.random_state_
)
# Reset it to null because the previous line uses self.predict
self.precomputed_probs_ = None
self.precomputed_weights_ = None
return self
def predict_proba(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, (N, n_classes_), each row sums to one
if self.precomputed_probs_ is None:
self._precompute(inp)
prob = numpy.zeros((len(inp), self.n_classes_))
for i in range(len(inp)):
active_indices = self.selection_strategy.get_indices(
self.precomputed_weights_[i], self.random_state_
)
prob[i] = self.precomputed_probs_[i][active_indices].sum(axis=0)
preprocessing.normalize(prob, norm='l1', copy=False)
return prob
def predict(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, N, one label per instance
if self.pipeline is not None:
inp = self.pipeline.transform(inp)
p = self.predict_proba(inp)
return self.classes_[numpy.argmax(p, axis=1)]
def _precompute(self, inp):
self.precomputed_probs_ = numpy.zeros(
(len(inp), len(self.classifiers_), self.n_classes_)
)
self.precomputed_weights_ = numpy.zeros(
(len(inp), len(self.classifiers_))
)
for i, x in enumerate(inp):
Logger.get().write(
"!Computing", len(inp), "probabilities and weights:", (i+1)
)
for j, cls in enumerate(self.classifiers_):
prob = cls.predict_proba(x)[0]
if not self.use_prob:
max_index = prob.argmax()
prob = numpy.zeros_like(prob)
prob[max_index] = 1
self.precomputed_probs_[i][j] = prob
self.precomputed_weights_[i] = (
self.weighting_strategy.weight_estimators(x)
)
if self.multiply_by_weight:
for j in range(len(self.classifiers_)):
self.precomputed_probs_[i][j] *= (
self.precomputed_weights_[i][j]
)
def get_directory(self):
current_state = self.random_state
current_selection = self.selection_strategy
self.random_state = None
self.selection_strategy = None
filename = hashlib.md5(str(self)).hexdigest()
self.random_state = current_state
self.selection_strategy = current_selection
return filename
def get_filename(self):
return self.get_directory() + "/ensemble"
def __eq__(self, other):
return isinstance(other, ResilientEnsemble) and (
self.get_directory() == other.get_directory()
)
def __hash__(self):
return hash(self.get_directory())
|
gpl-2.0
|
mnubo/kubernetes-py
|
kubernetes_py/models/v1/Volume.py
|
3
|
9297
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes_py.models.v1.AWSElasticBlockStoreVolumeSource import AWSElasticBlockStoreVolumeSource
from kubernetes_py.models.v1.EmptyDirVolumeSource import EmptyDirVolumeSource
from kubernetes_py.models.v1.GCEPersistentDiskVolumeSource import GCEPersistentDiskVolumeSource
from kubernetes_py.models.v1.GitRepoVolumeSource import GitRepoVolumeSource
from kubernetes_py.models.v1.HostPathVolumeSource import HostPathVolumeSource
from kubernetes_py.models.v1.NFSVolumeSource import NFSVolumeSource
from kubernetes_py.models.v1.SecretVolumeSource import SecretVolumeSource
from kubernetes_py.models.v1.PersistentVolumeClaimVolumeSource import PersistentVolumeClaimVolumeSource
from kubernetes_py.models.v1.ConfigMapVolumeSource import ConfigMapVolumeSource
from kubernetes_py.utils import is_valid_string, filter_model
class Volume(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_volume
"""
VOLUME_TYPES_TO_SOURCE_MAP = {
"awsElasticBlockStore": AWSElasticBlockStoreVolumeSource,
"emptyDir": EmptyDirVolumeSource,
"gcePersistentDisk": GCEPersistentDiskVolumeSource,
"gitRepo": GitRepoVolumeSource,
"hostPath": HostPathVolumeSource,
"nfs": NFSVolumeSource,
"secret": SecretVolumeSource,
"persistentVolumeClaim": PersistentVolumeClaimVolumeSource,
"configMap": ConfigMapVolumeSource,
}
def __init__(self, model=None):
# TODO(froch): add support for the below
# self._iscsi = None
# self._glusterfs = None
# self._rbd = None
# self._flex_volume = None
# self._cinder = None
# self._cephfs = None
# self._flocker = None
# self._downward_api = None
# self._fc = None
# self._azure_file = None
# self._vsphere_volume
# self._quobyte = None
# self._azuredisk = None
self._awsElasticBlockStore = None
self._emptyDir = None
self._gcePersistentDisk = None
self._gitRepo = None
self._hostPath = None
self._name = None
self._nfs = None
self._persistentVolumeClaim = None
self._secret = None
self._config_map = None
if model is not None:
m = filter_model(model)
self._build_with_model(m)
def __eq__(self, other):
# see https://github.com/kubernetes/kubernetes/blob/master/docs/design/identifiers.md
if isinstance(other, self.__class__):
return self.name == other.name
return NotImplemented
def _build_with_model(self, model=None):
if "awsElasticBlockStore" in model:
self.awsElasticBlockStore = AWSElasticBlockStoreVolumeSource(model["awsElasticBlockStore"])
if "emptyDir" in model:
self.emptyDir = EmptyDirVolumeSource(model["emptyDir"])
if "gcePersistentDisk" in model:
self.gcePersistentDisk = GCEPersistentDiskVolumeSource(model["gcePersistentDisk"])
if "gitRepo" in model:
self.gitRepo = GitRepoVolumeSource(model["gitRepo"])
if "hostPath" in model:
self.hostPath = HostPathVolumeSource(model["hostPath"])
if "name" in model:
self.name = model["name"]
if "nfs" in model:
self.nfs = NFSVolumeSource(model["nfs"])
if "secret" in model:
self.secret = SecretVolumeSource(model["secret"])
if "persistentVolumeClaim" in model:
self.persistentVolumeClaim = PersistentVolumeClaimVolumeSource(model["persistentVolumeClaim"])
if "configMap" in model:
self.configMap = ConfigMapVolumeSource(model["configMap"])
@staticmethod
def vol_type_to_source(vol_type=None):
return Volume.VOLUME_TYPES_TO_SOURCE_MAP[vol_type]()
# ------------------------------------------------------------------------------------- aws ebs
@property
def awsElasticBlockStore(self):
return self._awsElasticBlockStore
@awsElasticBlockStore.setter
def awsElasticBlockStore(self, ebs=None):
if not isinstance(ebs, AWSElasticBlockStoreVolumeSource):
raise SyntaxError("Volume: aws_elastic_block_store: [ {0} ] is invalid.".format(ebs))
self._awsElasticBlockStore = ebs
# ------------------------------------------------------------------------------------- configMap
@property
def configMap(self):
return self._config_map
@configMap.setter
def configMap(self, config_map=None):
if not isinstance(config_map, ConfigMapVolumeSource):
raise SyntaxError("Volume: config_map: [ {0} ] is invalid.".format(config_map))
self._config_map = config_map
# ------------------------------------------------------------------------------------- emptyDir
@property
def emptyDir(self):
return self._emptyDir
@emptyDir.setter
def emptyDir(self, edir=None):
if not isinstance(edir, EmptyDirVolumeSource):
raise SyntaxError("Volume: empty_dir: [ {0} ] is invalid.".format(edir))
self._emptyDir = edir
# ------------------------------------------------------------------------------------- gce pd
@property
def gcePersistentDisk(self):
return self._gcePersistentDisk
@gcePersistentDisk.setter
def gcePersistentDisk(self, pd=None):
if not isinstance(pd, GCEPersistentDiskVolumeSource):
raise SyntaxError("Volume: gce_persistent_disk: [ {0} ] is invalid.".format(pd))
self._gcePersistentDisk = pd
# ------------------------------------------------------------------------------------- gitRepo
@property
def gitRepo(self):
return self._gitRepo
@gitRepo.setter
def gitRepo(self, repo=None):
if not isinstance(repo, GitRepoVolumeSource):
raise SyntaxError("Volume: git_repo: [ {0} ] is invalid.".format(repo))
self._gitRepo = repo
# ------------------------------------------------------------------------------------- hostPath
@property
def hostPath(self):
return self._hostPath
@hostPath.setter
def hostPath(self, hp=None):
if not isinstance(hp, HostPathVolumeSource):
raise SyntaxError("Volume: host_path: [ {0} ] is invalid.".format(hp))
self._hostPath = hp
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError("Volume: name: [ {0} ] is invalid.".format(name))
self._name = name
# ------------------------------------------------------------------------------------- nfs
@property
def nfs(self):
return self._nfs
@nfs.setter
def nfs(self, nfs=None):
if not isinstance(nfs, NFSVolumeSource):
raise SyntaxError("Volume: nfs: [ {0} ] is invalid.".format(nfs))
self._nfs = nfs
# ------------------------------------------------------------------------------------- secret
@property
def secret(self):
return self._secret
@secret.setter
def secret(self, secret=None):
if not isinstance(secret, SecretVolumeSource):
raise SyntaxError("Volume: secret: [ {0} ] is invalid.".format(secret))
self._secret = secret
# ------------------------------------------------------------------------------------- persistentVolumeClaim
@property
def persistentVolumeClaim(self):
return self._persistentVolumeClaim
@persistentVolumeClaim.setter
def persistentVolumeClaim(self, pvc=None):
if not isinstance(pvc, PersistentVolumeClaimVolumeSource):
raise SyntaxError("Volume: persistentVolumeClaim: [ {0} ] is invalid.".format(pvc))
self._persistentVolumeClaim = pvc
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.awsElasticBlockStore is not None:
data["awsElasticBlockStore"] = self.awsElasticBlockStore.serialize()
if self.emptyDir is not None:
data["emptyDir"] = self.emptyDir.serialize()
if self.gcePersistentDisk is not None:
data["gcePersistentDisk"] = self.gcePersistentDisk.serialize()
if self.gitRepo is not None:
data["gitRepo"] = self.gitRepo.serialize()
if self.hostPath is not None:
data["hostPath"] = self.hostPath.serialize()
if self.name is not None:
data["name"] = self.name
if self.nfs is not None:
data["nfs"] = self.nfs.serialize()
if self.secret is not None:
data["secret"] = self.secret.serialize()
if self.persistentVolumeClaim is not None:
data["persistentVolumeClaim"] = self.persistentVolumeClaim.serialize()
if self.configMap is not None:
data["configMap"] = self.configMap.serialize()
return data
|
apache-2.0
|
mearleycf/mwepd
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/input_test.py
|
604
|
3207
|
#!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['a'])],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])],
self.nodes['a'].FindCycles())
self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
(self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles)
self.assertTrue(
(self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([(self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a'])],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
arbrandes/edx-platform
|
common/djangoapps/student/management/tests/test_bulk_change_enrollment.py
|
4
|
10533
|
"""Tests for the bulk_change_enrollment command."""
from unittest.mock import call, patch
import ddt
import pytest
from django.core.management import call_command
from django.core.management.base import CommandError
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.models import EVENT_NAME_ENROLLMENT_MODE_CHANGED, CourseEnrollment
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class BulkChangeEnrollmentTests(SharedModuleStoreTestCase):
"""Tests for the bulk_change_enrollment command."""
def setUp(self):
super().setUp()
self.org = 'testX'
self.course = CourseFactory.create(org=self.org)
self.users = UserFactory.create_batch(5)
CourseOverview.load_from_module_store(self.course.id)
@patch('common.djangoapps.student.models.tracker')
@ddt.data(('audit', 'honor'), ('honor', 'audit'))
@ddt.unpack
def test_bulk_convert(self, from_mode, to_mode, mock_tracker):
"""Verify that enrollments are changed correctly."""
self._enroll_users(self.course, self.users, from_mode)
CourseModeFactory(course_id=self.course.id, mode_slug=to_mode)
# Verify that no users are in the `from` mode yet.
assert len(CourseEnrollment.objects.filter(mode=to_mode, course_id=self.course.id)) == 0
args = '--course {course} --from_mode {from_mode} --to_mode {to_mode} --commit'.format(
course=str(self.course.id),
from_mode=from_mode,
to_mode=to_mode
)
call_command(
'bulk_change_enrollment',
*args.split(' ')
)
# Verify that all users have been moved -- if not, this will
# raise CourseEnrollment.DoesNotExist
for user in self.users:
CourseEnrollment.objects.get(mode=to_mode, course_id=self.course.id, user=user)
self._assert_mode_changed(mock_tracker, self.course, user, to_mode)
@patch('common.djangoapps.student.models.tracker')
@ddt.data(('audit', 'no-id-professional'), ('no-id-professional', 'audit'))
@ddt.unpack
def test_bulk_convert_with_org(self, from_mode, to_mode, mock_tracker):
"""Verify that enrollments are changed correctly when org was given."""
self._enroll_users(self.course, self.users, from_mode)
CourseModeFactory(course_id=self.course.id, mode_slug=to_mode)
# Create a second course under the same org
course_2 = CourseFactory.create(org=self.org)
CourseModeFactory(course_id=course_2.id, mode_slug=to_mode)
CourseOverview.load_from_module_store(course_2.id)
self._enroll_users(course_2, self.users, from_mode)
# Verify that no users are in the `to` mode yet.
assert len(CourseEnrollment.objects.filter(mode=to_mode, course_id=self.course.id)) == 0
assert len(CourseEnrollment.objects.filter(mode=to_mode, course_id=course_2.id)) == 0
args = '--org {org} --from_mode {from_mode} --to_mode {to_mode} --commit'.format(
org=self.org,
from_mode=from_mode,
to_mode=to_mode
)
call_command(
'bulk_change_enrollment',
*args.split(' ')
)
# Verify that all users have been moved -- if not, this will
# raise CourseEnrollment.DoesNotExist
for user in self.users:
for course in [self.course, course_2]:
CourseEnrollment.objects.get(mode=to_mode, course_id=course.id, user=user)
self._assert_mode_changed(mock_tracker, course, user, to_mode)
def test_with_org_and_course_key(self):
"""Verify that command raises CommandError when `org` and `course_key` both are given."""
self._enroll_users(self.course, self.users, 'audit')
CourseModeFactory(course_id=self.course.id, mode_slug='no-id-professional')
with pytest.raises(CommandError) as err:
call_command(
'bulk_change_enrollment',
org=self.org,
course=str(self.course.id),
from_mode='audit',
to_mode='no-id-professional',
commit=True,
)
assert 'Error: one of the arguments -c/--course -o/--org is required' == str(err.value)
@patch('common.djangoapps.student.models.tracker')
def test_with_org_and_invalid_to_mode(self, mock_tracker):
"""Verify that enrollments are changed correctly when org was given."""
from_mode = 'audit'
to_mode = 'no-id-professional'
self._enroll_users(self.course, self.users, from_mode)
# Create a second course under the same org
course_2 = CourseFactory.create(org=self.org)
CourseModeFactory(course_id=course_2.id, mode_slug=to_mode)
CourseOverview.load_from_module_store(course_2.id)
self._enroll_users(course_2, self.users, from_mode)
# Verify that no users are in the `to` mode yet.
assert len(CourseEnrollment.objects.filter(mode=to_mode, course_id=self.course.id)) == 0
assert len(CourseEnrollment.objects.filter(mode=to_mode, course_id=course_2.id)) == 0
args = '--org {org} --from_mode {from_mode} --to_mode {to_mode} --commit'.format(
org=self.org,
from_mode=from_mode,
to_mode=to_mode
)
call_command(
'bulk_change_enrollment',
*args.split(' ')
)
# Verify that users were not moved for the invalid course/mode combination
for user in self.users:
with pytest.raises(CourseEnrollment.DoesNotExist):
CourseEnrollment.objects.get(mode=to_mode, course_id=self.course.id, user=user)
# Verify that all users have been moved -- if not, this will
# raise CourseEnrollment.DoesNotExist
for user in self.users:
CourseEnrollment.objects.get(mode=to_mode, course_id=course_2.id, user=user)
self._assert_mode_changed(mock_tracker, course_2, user, to_mode)
def test_with_invalid_org(self):
"""Verify that command raises CommandError when invalid `org` is given."""
self._enroll_users(self.course, self.users, 'audit')
CourseModeFactory(course_id=self.course.id, mode_slug='no-id-professional')
with pytest.raises(CommandError) as err:
args = '--org {org} --from_mode {from_mode} --to_mode {to_mode} --commit'.format(
org='fakeX',
from_mode='audit',
to_mode='no-id-professional',
)
call_command(
'bulk_change_enrollment', *args.split(' ')
)
assert 'No courses exist for the org "fakeX".' == str(err.value)
def test_without_commit(self):
"""Verify that nothing happens when the `commit` flag is not given."""
self._enroll_users(self.course, self.users, 'audit')
CourseModeFactory(course_id=self.course.id, mode_slug='honor')
args = '--course {course} --from_mode {from_mode} --to_mode {to_mode}'.format(
course=str(self.course.id),
from_mode='audit',
to_mode='honor'
)
call_command(
'bulk_change_enrollment',
*args.split(' ')
)
# Verify that no users are in the honor mode.
assert len(CourseEnrollment.objects.filter(mode='honor', course_id=self.course.id)) == 0
def test_without_to_mode(self):
"""Verify that the command fails when the `to_mode` argument does not exist."""
self._enroll_users(self.course, self.users, 'audit')
CourseModeFactory(course_id=self.course.id, mode_slug='audit')
args = '--course {course} --from_mode {from_mode}'.format(
course='yolo',
from_mode='audit'
)
with pytest.raises(CommandError) as err:
call_command(
'bulk_change_enrollment',
*args.split(' ')
)
assert 'Error: the following arguments are required: -t/--to_mode' == str(err.value)
@ddt.data('from_mode', 'to_mode', 'course')
def test_without_options(self, option):
"""Verify that the command fails when some options are not given."""
command_options = {
'from_mode': 'audit',
'to_mode': 'honor',
'course': str(self.course.id),
}
command_options.pop(option)
with pytest.raises(CommandError):
call_command('bulk_change_enrollment', **command_options)
def test_bad_course_id(self):
"""Verify that the command fails when the given course ID does not parse."""
args = '--course {course} --from_mode {from_mode} --to_mode {to_mode}'.format(
course='yolo',
from_mode='audit',
to_mode='honor'
)
with pytest.raises(CommandError) as err:
call_command('bulk_change_enrollment', *args.split(' '))
assert 'Course ID yolo is invalid.' == str(err.value)
def test_nonexistent_course_id(self):
"""Verify that the command fails when the given course does not exist."""
args = '--course {course} --from_mode {from_mode} --to_mode {to_mode}'.format(
course='course-v1:testX+test+2016',
from_mode='audit',
to_mode='honor'
)
with pytest.raises(CommandError) as err:
call_command(
'bulk_change_enrollment',
*args.split(' ')
)
assert 'The given course course-v1:testX+test+2016 does not exist.' == str(err.value)
def _assert_mode_changed(self, mock_tracker, course, user, to_mode):
"""Confirm the analytics event was emitted."""
mock_tracker.emit.assert_has_calls(
[
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': str(course.id), 'user_id': user.id, 'mode': to_mode}
),
]
)
def _enroll_users(self, course, users, mode):
"""Enroll users in the given mode."""
for user in users:
CourseEnrollmentFactory(mode=mode, course_id=course.id, user=user)
|
agpl-3.0
|
singingwolfboy/edx-ora2
|
docs/en_us/course_authors/source/conf.py
|
48
|
1200
|
# -*- coding: utf-8 -*-
#
import sys, os
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
sys.path.append(os.path.abspath('../../../'))
sys.path.append(os.path.abspath('../../'))
#from docs.shared.conf import *
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
# General information about the project.
project = u'Creating a Peer Assessment'
copyright = u'2014, edX'
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
|
agpl-3.0
|
sirex/Misago
|
misago/users/views/admin/users.py
|
8
|
13970
|
from django.contrib import messages
from django.contrib.auth import get_user_model, update_session_auth_hash
from django.db import transaction
from django.http import JsonResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from misago.admin.auth import start_admin_session
from misago.admin.views import generic
from misago.conf import settings
from misago.core.mail import mail_users
from misago.core.pgutils import batch_update
from misago.forums.models import Forum
from misago.threads.models import Thread
from misago.users.avatars.dynamic import set_avatar as set_dynamic_avatar
from misago.users.forms.admin import (StaffFlagUserFormFactory, NewUserForm,
EditUserForm, SearchUsersForm,
BanUsersForm)
from misago.users.models import ACTIVATION_REQUIRED_NONE, User, Ban
from misago.users.models.ban import BAN_USERNAME, BAN_EMAIL, BAN_IP
from misago.users.signatures import set_user_signature
class UserAdmin(generic.AdminBaseMixin):
root_link = 'misago:admin:users:accounts:index'
templates_dir = 'misago/admin/users'
def get_model(self):
return get_user_model()
def create_form_type(self, request, target):
if request.user.is_superuser:
add_staff_field = request.user.pk != target.id
else:
add_staff_field = False
return StaffFlagUserFormFactory(
self.Form, target, add_staff_field=add_staff_field)
class UsersList(UserAdmin, generic.ListView):
items_per_page = 24
ordering = (
('-id', _("From newest")),
('id', _("From oldest")),
('slug', _("A to z")),
('-slug', _("Z to a")),
('posts', _("Biggest posters")),
('-posts', _("Smallest posters")),
)
selection_label = _('With users: 0')
empty_selection_label = _('Select users')
mass_actions = [
{
'action': 'activate',
'name': _("Activate accounts"),
'icon': 'fa fa-check-square-o',
},
{
'action': 'ban',
'name': _("Ban users"),
'icon': 'fa fa-lock',
},
{
'action': 'delete_accounts',
'name': _("Delete accounts"),
'icon': 'fa fa-times-circle',
'confirmation': _("Are you sure you want "
"to delete selected users?"),
},
{
'action': 'delete_all',
'name': _("Delete all"),
'icon': 'fa fa-eraser',
'confirmation': _("Are you sure you want to delete selected "
"users? This will also delete all content "
"associated with their accounts."),
'is_atomic': False,
}
]
def get_queryset(self):
qs = super(UsersList, self).get_queryset()
return qs.select_related('rank')
def get_search_form(self, request):
return SearchUsersForm
def action_activate(self, request, users):
inactive_users = []
for user in users:
if user.requires_activation:
inactive_users.append(user)
if not inactive_users:
message = _("You have to select inactive users.")
raise generic.MassActionError(message)
else:
activated_users_pks = [u.pk for u in inactive_users]
queryset = User.objects.filter(pk__in=activated_users_pks)
queryset.update(requires_activation=ACTIVATION_REQUIRED_NONE)
mail_subject = _("Your account on %(forum_title)s "
"forums has been activated")
subject_formats = {'forum_title': settings.forum_name}
mail_subject = mail_subject % subject_formats
mail_subject = mail_subject
mail_users(request, inactive_users, mail_subject,
'misago/emails/activation/by_admin')
message = _("Selected users accounts have been activated.")
messages.success(request, message)
def action_ban(self, request, users):
users = users.order_by('slug')
for user in users:
if user.is_superuser:
message = _("%(user)s is super admin and can't be banned.")
mesage = message % {'user': user.username}
raise generic.MassActionError(mesage)
form = BanUsersForm()
if 'finalize' in request.POST:
form = BanUsersForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
banned_values = []
ban_kwargs = {
'user_message': cleaned_data.get('user_message'),
'staff_message': cleaned_data.get('staff_message'),
'expires_on': cleaned_data.get('expires_on')
}
for user in users:
for ban in cleaned_data['ban_type']:
if ban == 'usernames':
check_type = BAN_USERNAME
banned_value = user.username.lower()
if ban == 'emails':
check_type = BAN_EMAIL
banned_value = user.email.lower()
if ban == 'domains':
check_type = BAN_EMAIL
banned_value = user.email.lower()
at_pos = banned_value.find('@')
banned_value = '*%s' % banned_value[at_pos:]
if ban == 'ip':
check_type = BAN_IP
banned_value = user.joined_from_ip
if ban in ('ip_first', 'ip_two'):
check_type = BAN_IP
if ':' in user.joined_from_ip:
ip_separator = ':'
if '.' in user.joined_from_ip:
ip_separator = '.'
bits = user.joined_from_ip.split(ip_separator)
if ban == 'ip_first':
formats = (bits[0], ip_separator)
if ban == 'ip_two':
formats = (
bits[0], ip_separator,
bits[1], ip_separator
)
banned_value = '%s*' % (''.join(formats))
if banned_value not in banned_values:
ban_kwargs.update({
'check_type': check_type,
'banned_value': banned_value
})
Ban.objects.create(**ban_kwargs)
banned_values.append(banned_value)
Ban.objects.invalidate_cache()
message = _("Selected users have been banned.")
messages.success(request, message)
return None
return self.render(
request, template='misago/admin/users/ban.html', context={
'users': users,
'form': form,
})
def action_delete_accounts(self, request, users):
for user in users:
if user.is_staff or user.is_superuser:
message = _("%(user)s is admin and can't be deleted.")
mesage = message % {'user': user.username}
raise generic.MassActionError(mesage)
for user in users:
user.delete()
message = _("Selected users have been deleted.")
messages.success(request, message)
def action_delete_all(self, request, users):
return self.render(
request, template='misago/admin/users/delete.html', context={
'users': users,
})
for user in users:
if user.is_staff or user.is_superuser:
message = _("%(user)s is admin and can't be deleted.")
mesage = message % {'user': user.username}
raise generic.MassActionError(mesage)
for user in users:
user.delete(delete_content=True)
message = _("Selected users and their content has been deleted.")
messages.success(request, message)
class NewUser(UserAdmin, generic.ModelFormView):
Form = NewUserForm
template = 'new.html'
message_submit = _('New user "%(user)s" has been registered.')
def handle_form(self, form, request, target):
User = get_user_model()
new_user = User.objects.create_user(
form.cleaned_data['username'],
form.cleaned_data['email'],
form.cleaned_data['new_password'],
title=form.cleaned_data['title'],
rank=form.cleaned_data.get('rank'),
joined_from_ip=request.user_ip,
set_default_avatar=True)
if form.cleaned_data.get('staff_level'):
new_user.staff_level = form.cleaned_data['staff_level']
if form.cleaned_data.get('roles'):
new_user.roles.add(*form.cleaned_data['roles'])
new_user.update_acl_key()
new_user.save()
messages.success(
request, self.message_submit % {'user': target.username})
return redirect('misago:admin:users:accounts:edit',
user_id=new_user.id)
class EditUser(UserAdmin, generic.ModelFormView):
Form = EditUserForm
template = 'edit.html'
message_submit = _('User "%(user)s" has been edited.')
def real_dispatch(self, request, target):
target.old_username = target.username
target.old_is_avatar_locked = target.is_avatar_locked
return super(EditUser, self).real_dispatch(request, target)
def handle_form(self, form, request, target):
target.username = target.old_username
if target.username != form.cleaned_data.get('username'):
target.set_username(form.cleaned_data.get('username'),
changed_by=request.user)
if form.cleaned_data.get('new_password'):
target.set_password(form.cleaned_data['new_password'])
if target.pk == request.user.pk:
start_admin_session(request, target)
update_session_auth_hash(request, target)
if form.cleaned_data.get('email'):
target.set_email(form.cleaned_data['email'])
if target.pk == request.user.pk:
start_admin_session(request, target)
if form.cleaned_data.get('is_avatar_locked'):
if not target.old_is_avatar_locked:
set_dynamic_avatar(target)
if 'staff_level' in form.cleaned_data:
target.staff_level = form.cleaned_data['staff_level']
target.rank = form.cleaned_data.get('rank')
if form.cleaned_data.get('roles'):
target.roles.add(*form.cleaned_data['roles'])
set_user_signature(request, target, form.cleaned_data.get('signature'))
target.update_acl_key()
target.save()
messages.success(
request, self.message_submit % {'user': target.username})
class DeletionStep(UserAdmin, generic.ButtonView):
is_atomic = False
def check_permissions(self, request, target):
if not request.is_ajax():
return _("This action can't be accessed directly")
if target.is_staff or target.is_superuser:
message = _("%(user)s is admin and can't be deleted.")
return message % {'user': user.username}
def execute_step(self, user):
raise NotImplementedError("execute_step method should return dict "
"with number of deleted_count and "
"is_completed keys")
def button_action(self, request, target):
return JsonResponse(self.execute_step(target))
class DeleteThreadsStep(DeletionStep):
def execute_step(self, user):
recount_forums = set()
deleted_threads = 0
is_completed = False
for thread in user.thread_set.order_by('-id')[:50]:
recount_forums.add(thread.forum_id)
with transaction.atomic():
thread.delete()
deleted_threads += 1
if recount_forums:
for forum in Forum.objects.filter(id__in=recount_forums):
forum.synchronize()
forum.save()
else:
is_completed = True
return {
'deleted_count': deleted_threads,
'is_completed': is_completed
}
class DeletePostsStep(DeletionStep):
def execute_step(self, user):
recount_forums = set()
recount_threads = set()
deleted_posts = 0
is_completed = False
for post in user.post_set.order_by('-id')[:50]:
recount_forums.add(post.forum_id)
recount_threads.add(post.thread_id)
with transaction.atomic():
post.delete()
deleted_posts += 1
if recount_forums:
changed_threads_qs = Thread.objects.filter(id__in=recount_threads)
for thread in batch_update(changed_threads_qs, 50):
thread.synchronize()
thread.save()
for forum in Forum.objects.filter(id__in=recount_forums):
forum.synchronize()
forum.save()
else:
is_completed = True
return {
'deleted_count': deleted_posts,
'is_completed': is_completed
}
class DeleteAccountStep(DeletionStep):
def execute_step(self, user):
user.delete(delete_content=True)
return {'is_completed': True}
|
gpl-2.0
|
andrewschaaf/pj-closure
|
js/goog/array.py
|
1
|
3829
|
#<pre>Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.</pre>
# TODO the rest
from goog import bind, isString
ARRAY_PROTOTYPE_ = Array.prototype
def concat(var_args):
return ARRAY_PROTOTYPE_.concat.apply(ARRAY_PROTOTYPE_, arguments)
def forEach(arr, f, opt_obj):
#DIFF: goog runs this if-statement at load-time
if ARRAY_PROTOTYPE_.forEach:
# TODO assert(arr.length != None)
ARRAY_PROTOTYPE_.forEach.call(arr, f, opt_obj)
else:
arr2 = (arr.split('') if isString(arr) else arr)
for i in range(len(arr)):
if i in arr2:
f.call(opt_obj, arr2[i], i, arr)
def map(arr, f, opt_obj):
#DIFF: goog runs this if-statement at load-time
if ARRAY_PROTOTYPE_.map:
#TODO goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.map.call(arr, f, opt_obj)
else:
l = len(arr)
res = Array(l)
arr2 = (arr.split('') if isString(arr) else arr)
for i in range(l):
if i in arr2:
res[i] = f.call(opt_obj, arr2[i], i, arr)
return res
def reduce(arr, f, val, opt_obj):
if arr.reduce:
if opt_obj:
return arr.reduce(bind(f, opt_obj), val)
else:
return arr.reduce(f, val)
rval = val
def f(val, index):
rval = f.call(opt_obj, rval, val, index, arr)
forEach(arr, f)
return rval
def slice(arr, start, opt_end):
#goog.asserts.assert(arr.length != null);
# passing 1 arg to slice is not the same as passing 2 where the second is
# null or undefined (in that case the second argument is treated as 0).
# we could use slice on the arguments object and then use apply instead of
# testing the length
if arguments.length <= 2:
return ARRAY_PROTOTYPE_.slice.call(arr, start)
else:
return ARRAY_PROTOTYPE_.slice.call(arr, start, opt_end)
def splice(arr, index, howMany, var_args):
#goog.asserts.assert(arr.length != null)
return ARRAY_PROTOTYPE_.splice.apply(
arr, slice(arguments, 1))
def insertAt(arr, obj, opt_i):
splice(arr, opt_i, 0, obj)
def filter(arr, f, opt_obj):
if ARRAY_PROTOTYPE_.filter:
#goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.filter.call(arr, f, opt_obj)
else:
res = []
resLength = 0
arr2 = arr.split('') if isString(arr) else arr
for i in range(len(arr)):
if i in arr2:
val = arr2[i]
if f.call(opt_obj, val, i, arr):
# Is this better than .push?
resLength += 1
res[resLength] = val
return res
def indexOf(arr, obj, opt_fromIndex):
if ARRAY_PROTOTYPE_.indexOf:
#goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.indexOf.call(arr, obj, opt_fromIndex)
else:
fromIndex = (
0
if opt_fromIndex == None else
(
Math.max(0, arr.length + opt_fromIndex)
if opt_fromIndex < 0 else
opt_fromIndex))
if isString(arr):
# Array.prototype.indexOf uses === so only strings should be found.
if not isString(obj) or len(obj) != 1:
return -1
return arr.indexOf(obj, fromIndex)
for i in range(fromIndex, len(arr)):
if (i in arr) and (arr[i] == obj):
return i
return -1
|
apache-2.0
|
ihsanudin/odoo
|
addons/account/ir_sequence.py
|
336
|
2454
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import fields, osv
class ir_sequence_fiscalyear(osv.osv):
_name = 'account.sequence.fiscalyear'
_rec_name = "sequence_main_id"
_columns = {
"sequence_id": fields.many2one("ir.sequence", 'Sequence', required=True,
ondelete='cascade'),
"sequence_main_id": fields.many2one("ir.sequence", 'Main Sequence',
required=True, ondelete='cascade'),
"fiscalyear_id": fields.many2one('account.fiscalyear', 'Fiscal Year',
required=True, ondelete='cascade')
}
_sql_constraints = [
('main_id', 'CHECK (sequence_main_id != sequence_id)',
'Main Sequence must be different from current !'),
]
class ir_sequence(osv.osv):
_inherit = 'ir.sequence'
_columns = {
'fiscal_ids': fields.one2many('account.sequence.fiscalyear',
'sequence_main_id', 'Sequences', copy=True)
}
@api.cr_uid_ids_context
def _next(self, cr, uid, seq_ids, context=None):
if context is None:
context = {}
for seq in self.browse(cr, uid, seq_ids, context):
for line in seq.fiscal_ids:
if line.fiscalyear_id.id == context.get('fiscalyear_id'):
return super(ir_sequence, self)._next(cr, uid, [line.sequence_id.id], context)
return super(ir_sequence, self)._next(cr, uid, seq_ids, context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ilastik/ilastik-0.5
|
ilastik/modules/unsupervised_decomposition/core/unsupervisedMgr.py
|
1
|
7290
|
from ilastik.core.baseModuleMgr import BaseModuleDataItemMgr, BaseModuleMgr
import numpy
import traceback, sys
from ilastik.core import jobMachine
from PyQt4 import QtCore
import os
import algorithms
from ilastik.core.volume import DataAccessor
from ilastik.core.overlayMgr import OverlayItem
""" Import all algorithm plugins"""
pathext = os.path.dirname(__file__)
try:
for f in os.listdir(os.path.abspath(pathext + '/algorithms')):
module_name, ext = os.path.splitext(f) # Handles no-extension files, etc.
if ext == '.py': # Important, ignore .pyc/othesr files.
module = __import__('ilastik.modules.unsupervised_decomposition.core.algorithms.' + module_name)
except Exception, e:
print e
traceback.print_exc()
pass
for i, c in enumerate(algorithms.unsupervisedDecompositionBase.UnsupervisedDecompositionBase.__subclasses__()):
print "Loaded unsupervised decomposition algorithm:", c.name
#*******************************************************************************
# U n s u p e r v i s e d I t e m M o d u l e M g r *
#*******************************************************************************
class UnsupervisedItemModuleMgr(BaseModuleDataItemMgr):
name = "Unsupervised_Decomposition"
def __init__(self, dataItemImage):
BaseModuleDataItemMgr.__init__(self, dataItemImage)
self.dataItemImage = dataItemImage
self.overlays = []
self.inputData = None
def setInputData(self, data):
self.inputData = data
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n M o d u l e M g r *
#*******************************************************************************
class UnsupervisedDecompositionModuleMgr(BaseModuleMgr):
name = "Unsupervised_Decomposition"
def __init__(self, dataMgr):
BaseModuleMgr.__init__(self, dataMgr)
self.dataMgr = dataMgr
self.unsupervisedMethod = algorithms.unsupervisedDecompositionPCA.UnsupervisedDecompositionPCA
if self.dataMgr.module["Unsupervised_Decomposition"] is None:
self.dataMgr.module["Unsupervised_Decomposition"] = self
def computeResults(self, inputOverlays):
self.decompThread = UnsupervisedDecompositionThread(self.dataMgr, inputOverlays, self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod)
self.decompThread.start()
return self.decompThread
def finalizeResults(self):
activeItem = self.dataMgr[self.dataMgr._activeImageNumber]
activeItem._dataVol.unsupervised = self.decompThread.result
#create overlays for unsupervised decomposition:
if self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname] is None:
data = self.decompThread.result[:,:,:,:,:]
myColor = OverlayItem.qrgb(0, 0, 0)
for o in range(0, data.shape[4]):
data2 = OverlayItem.normalizeForDisplay(data[:,:,:,:,o:(o+1)])
# for some strange reason we have to invert the data before displaying it
ov = OverlayItem(255 - data2, color = myColor, alpha = 1.0, colorTable = None, autoAdd = True, autoVisible = True)
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname + " component %d" % (o+1)] = ov
# remove outdated overlays (like PCA components 5-10 if a decomposition with 4 components is done)
numOverlaysBefore = len(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.keys())
finished = False
while finished != True:
o = o + 1
# assumes consecutive numbering
key = "Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname + " component %d" % (o+1)
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.remove(key)
numOverlaysAfter = len(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.keys())
if(numOverlaysBefore == numOverlaysAfter):
finished = True
else:
numOverlaysBefore = numOverlaysAfter
else:
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname]._data = DataAccessor(self.decompThread.result)
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n T h r e a d *
#*******************************************************************************
class UnsupervisedDecompositionThread(QtCore.QThread):
def __init__(self, dataMgr, overlays, unsupervisedMethod = algorithms.unsupervisedDecompositionPCA.UnsupervisedDecompositionPCA, unsupervisedMethodOptions = None):
QtCore.QThread.__init__(self, None)
self.reshapeToFeatures(overlays)
self.dataMgr = dataMgr
self.count = 0
self.numberOfJobs = 1
self.stopped = False
self.unsupervisedMethod = unsupervisedMethod
self.unsupervisedMethodOptions = unsupervisedMethodOptions
self.jobMachine = jobMachine.JobMachine()
self.result = []
def reshapeToFeatures(self, overlays):
# transform to feature matrix
# ...first find out how many columns and rows the feature matrix will have
numFeatures = 0
numPoints = overlays[0].shape[0] * overlays[0].shape[1] * overlays[0].shape[2] * overlays[0].shape[3]
for overlay in overlays:
numFeatures += overlay.shape[4]
# ... then copy the data
features = numpy.zeros((numPoints, numFeatures), dtype=numpy.float)
currFeature = 0
for overlay in overlays:
currData = overlay[:,:,:,:,:]
features[:, currFeature:currFeature+overlay.shape[4]] = currData.reshape(numPoints, (currData.shape[4]))
currFeature += currData.shape[4]
self.features = features
self.origshape = overlays[0].shape
def decompose(self):
# V contains the component spectra/scores, W contains the projected data
unsupervisedMethod = self.unsupervisedMethod()
V, W = unsupervisedMethod.decompose(self.features)
self.result = (W.T).reshape((self.origshape[0], self.origshape[1], self.origshape[2], self.origshape[3], W.shape[0]))
def run(self):
self.dataMgr.featureLock.acquire()
try:
jobs = []
job = jobMachine.IlastikJob(UnsupervisedDecompositionThread.decompose, [self])
jobs.append(job)
self.jobMachine.process(jobs)
self.dataMgr.featureLock.release()
except Exception, e:
print "######### Exception in UnsupervisedThread ##########"
print e
traceback.print_exc(file=sys.stdout)
self.dataMgr.featureLock.release()
|
bsd-2-clause
|
vperron/sentry
|
src/sentry/migrations/0130_auto__del_field_project_owner.py
|
36
|
26056
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Project.owner'
db.delete_column(u'sentry_project', 'owner_id')
def backwards(self, orm):
# Adding field 'Project.owner'
db.add_column(u'sentry_project', 'owner',
self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='sentry_owned_project_set', null=True, to=orm['sentry.User']),
keep_default=False)
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
|
shekkbuilder/rowhammer-test
|
dram_physaddr_mapping/analyse_addrs.py
|
6
|
3529
|
# Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script examines the physical aggressor/victim addresses
# outputted by rowhammer_test_ext, and it checks whether these
# addresses match a model of how physical addresses are mapped to DRAM
# row/bank/column numbers.
#
# This script explains the resulting addresses from a laptop with a
# Sandy Bridge CPU which has 2 * 4GB SO-DIMMs.
#
# For this laptop, decode-dimms reports:
# Size 4096 MB
# Banks x Rows x Columns x Bits 8 x 15 x 10 x 64
# Ranks 2
def GetResultAddrs(log_filename):
for line in open(log_filename):
if line.startswith('RESULT '):
parts = line[len('RESULT '):].strip('\n').split(',')
if parts[0] == 'PAIR':
yield [int(part, 16) for part in parts[1:]]
def FormatBits(val, bits):
got = []
for bit in xrange(bits - 1, -1, -1):
got.append(str((val >> bit) & 1))
return ''.join(got)
def ExtractBits(val, offset_in_bits, size_in_bits):
return [(val >> offset_in_bits) & ((1 << size_in_bits) - 1),
size_in_bits]
def Convert(phys):
fields = [
('col_lo', ExtractBits(phys, 0, 6)),
('channel', ExtractBits(phys, 6, 1)),
('col_hi', ExtractBits(phys, 7, 7)),
('bank', ExtractBits(phys, 14, 3)),
('rank', ExtractBits(phys, 17, 1)),
('row', ExtractBits(phys, 18, 14)),
]
d = dict(fields)
# The bottom 3 bits of the row number are XOR'd into the bank number.
d['bank'][0] ^= d['row'][0] & 7
return fields
def Format(fields):
return ' '.join('%s=%s' % (name, FormatBits(val, size))
for name, (val, size) in reversed(fields))
def Main():
count = 0
count_fits = 0
for addrs in GetResultAddrs('bitflip_addrs'):
aggs = addrs[0:2]
victim = addrs[2]
# Sort aggressor addresses by closeness to victim. We assume
# the closest one is the one that causes the victim's bit flip.
aggs.sort(key=lambda agg: abs(victim - agg))
def FormatAddr(name, val):
fmt = Format(Convert(val))
print '\taddr=0x%09x -> %s (%s)' % (val, fmt, name)
print 'result:'
print '\tdiff=%x' % (victim - aggs[0])
FormatAddr('victim', victim)
FormatAddr('aggressor1', aggs[0])
FormatAddr('aggressor2', aggs[1])
# Test hypotheses.
agg1_dict = dict(Convert(aggs[0]))
agg2_dict = dict(Convert(aggs[1]))
victim_dict = dict(Convert(victim))
row_diff = abs(agg1_dict['row'][0] - victim_dict['row'][0])
fits = (agg1_dict['bank'] == victim_dict['bank'] and
agg2_dict['bank'] == victim_dict['bank'] and
row_diff in (1, -1))
print '\t' + 'fits=%s' % fits
if agg1_dict['row'][0] & 2 != victim_dict['row'][0] & 2:
print '\t' + 'unusual?'
count += 1
if fits:
count_fits += 1
print "\nSummary: of %i results, %i fit and %i don't fit" % (
count, count_fits, count - count_fits)
if __name__ == '__main__':
Main()
|
apache-2.0
|
mmcdermo/helpinghand
|
server/venv/lib/python2.7/site-packages/django/contrib/admin/bin/compress.py
|
193
|
1958
|
#!/usr/bin/env python
import os
import optparse
import subprocess
import sys
js_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static', 'admin', 'js')
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(js_path, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
|
mit
|
Theer108/invenio
|
invenio/utils/remote_debugger/config.py
|
6
|
6075
|
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# global switch - if 0, remote_debugger is not loaded at all and
# no remote debugging is available, if you run the debugger for the Invenio
# site, make sure also your config contains the same directive
# Remote debugging is enabled via CFG_DEVEL_TOOLS (note, invenio.wsgi will not
# import the remote debugger if not set).
CFG_REMOTE_DEBUGGER_ENABLED = 0 # by default, we don't want to enable debugger
CFG_REMOTE_DEBUGGER_TYPE = ''
CFG_REMOTE_DEBUGGER_NAME = ''
try:
from invenio.config import CFG_DEVEL_TOOLS
if 'winpdb-local' in CFG_DEVEL_TOOLS:
CFG_REMOTE_DEBUGGER_TYPE = 'winpdb'
CFG_REMOTE_DEBUGGER_NAME = 'winpdb-local'
CFG_REMOTE_DEBUGGER_ENABLED = 1
elif 'winpdb-remote' in CFG_DEVEL_TOOLS:
CFG_REMOTE_DEBUGGER_TYPE = 'winpdb'
CFG_REMOTE_DEBUGGER_NAME = 'winpdb-remote'
CFG_REMOTE_DEBUGGER_ENABLED = 1
elif 'pydev' in CFG_DEVEL_TOOLS:
CFG_REMOTE_DEBUGGER_TYPE = 'pydev'
CFG_REMOTE_DEBUGGER_NAME = 'pydev'
CFG_REMOTE_DEBUGGER_ENABLED = 1
except ImportError:
pass
# Start debugger on WSGI application loading (i.e loading of invenio.wsgi).
# Default is only to start debugger on per request basis and not on application
# loading.
try:
from invenio.config import CFG_REMOTE_DEBUGGER_WSGI_LOADING
except ImportError:
CFG_REMOTE_DEBUGGER_WSGI_LOADING = False
# Modules that should be imported during initialization
# the structure is: 'debugger name': { 'module_path' : 'name', ... }, ...
#
# So, 'pydev.pydevd': 'pydev' means: "import pydev.pydevd as pydev"
# WARNING! The name of the module is quite important, as the functions
# are calling them without checking if they were imported or not. That is
# not a bug, that is a feature! You shall know what you are doing when
# changing default names.
#
# You can uncomment some lines if you know you are not going to use these
# modules for debugging.
CFG_REMOTE_DEBUGGER_IMPORT = {
'': {}, # no debugger specified
'winpdb': {
'rpdb2': 'rpdb2', # windpb debugging
},
'pydev': {
'pydev.pydevd': 'pydevd', # eclipse/pydev
'pydev.pydevd_file_utils': 'putils', # eclipse/pydev
},
}
# -----------------------------------------------------------------------------
# configuration options for winpdb debugging
try:
from invenio.config import CFG_REMOTE_DEBUGGER_WINPDB_PASSWORD
except ImportError:
CFG_REMOTE_DEBUGGER_WINPDB_PASSWORD = 'Change1Me'
# -----------------------------------------------------------------------------
# configuration options for Eclipse/Pydev
# Remote debugging with Eclipse Pydev - Apache does not need to be configured
# as a single-worker. But you do the following:
#
# 1. find pydevd.py inside your Eclipse/Pydev installation
# (on my computer it is in: c:\dev\eclipse342\plugins\org.python.pydev.debug_1.5.0.1251989166\pysrc)
# 2. copy the pysrc folder into the remote machine (eg. inside: /usr/lib/python2.5/site-packages
# 3. rename the pysrc into pydev
# 4. put __init__.py inside pydev (if you don't do that, pydev is not recognized as a package)
#
# Then, in your Eclipse, change perspective to the debug mode, start Pydev remote
# debug server, set some breakpoint and reload a webpage with url param debug=3
# eg. http://invenio-vm/?debug=3
#
# I repeat, you must be in the Debug perspective to catch the breakpoints!
#
# This is where your (local) Eclipse is listening for communication, this IP address
# can be anything that Invenio can access (ie. the machine that is running Invenio
# must have access to the IP)
CFG_REMOTE_DEBUGGER_PYDEV_REMOTE_IP = '127.0.0.1' #'192.168.0.1'
CFG_REMOTE_DEBUGGER_PYDEV_REMOTE_PORT = 5678
# When you hit a breakpoing, Eclipse needs to know which file to display. For instance:
# you are developing inside a virtual machine (Linux) where Invenio code lives at
# /opt/cds-invenio/lib/python/invenio. Your environment is Windows (yes, why not? ;-))
# and you have access to the invenio folder through samba as:
#
# \\invenio-vm\root\opt\cds-invenio\lib\python\invenio
#
# (or perhaps you don't have samba but you have a local copy of the codebase somewhere else)
#
# You must set a mapping: local Eclipse path <--> remote Linux path
# in this way:
#
# CFG_REMOTE_DEBUGGER_PYDEV_PATHS = [('\\\\invenio-vm\\root\\opt\\cds-invenio\\lib\\python\\invenio',
# '/opt/cds-invenio/lib/python/invenio')]
#
# what I (rca) do is to map a whole drive through samba:
# CFG_REMOTE_DEBUGGER_PYDEV_PATHS = [('\\\\invenio-vm\\root\\',
# '/')]
# and I also recommend using (back)slashes at the end, ie. /opt/ and not /opt
CFG_REMOTE_DEBUGGER_PYDEV_PATHS = [
#('/opt/cds-invenio/lib/python/invenio/', '/usr/local/lib/python2.5/site-packages/invenio/'),
#('U:\\opt\\', '/opt/'),
#('\\\\Invenio-ubu\\root\\', '/'),
#('U:\\usr\\', '/usr/'),
#('\\\\invenio-ubu\\root\\usr\\', '/usr/'),
#('\\\\invenio-ubu\\root\\opt\\', '/opt/')
]
# Shall we monitor changes and restart daemon threads when you change source code?
# Put here list of glob patters (files) to monitor. The paths are relative to the
# Invenio root dir.
CFG_REMOTE_DEBUGGER_WSGI_RELOAD = ['lib/python/invenio/*.py']
# For debugging of a debugger ;)
CFG_PYDEV_DEBUG = False
|
gpl-2.0
|
playm2mboy/edx-platform
|
common/lib/calc/calc/calc.py
|
176
|
13572
|
"""
Parser and evaluator for FormulaResponse and NumericalResponse
Uses pyparsing to parse. Main function as of now is evaluator().
"""
import math
import operator
import numbers
import numpy
import scipy.constants
import functions
from pyparsing import (
Word, Literal, CaselessLiteral, ZeroOrMore, MatchFirst, Optional, Forward,
Group, ParseResults, stringEnd, Suppress, Combine, alphas, nums, alphanums
)
DEFAULT_FUNCTIONS = {
'sin': numpy.sin,
'cos': numpy.cos,
'tan': numpy.tan,
'sec': functions.sec,
'csc': functions.csc,
'cot': functions.cot,
'sqrt': numpy.sqrt,
'log10': numpy.log10,
'log2': numpy.log2,
'ln': numpy.log,
'exp': numpy.exp,
'arccos': numpy.arccos,
'arcsin': numpy.arcsin,
'arctan': numpy.arctan,
'arcsec': functions.arcsec,
'arccsc': functions.arccsc,
'arccot': functions.arccot,
'abs': numpy.abs,
'fact': math.factorial,
'factorial': math.factorial,
'sinh': numpy.sinh,
'cosh': numpy.cosh,
'tanh': numpy.tanh,
'sech': functions.sech,
'csch': functions.csch,
'coth': functions.coth,
'arcsinh': numpy.arcsinh,
'arccosh': numpy.arccosh,
'arctanh': numpy.arctanh,
'arcsech': functions.arcsech,
'arccsch': functions.arccsch,
'arccoth': functions.arccoth
}
DEFAULT_VARIABLES = {
'i': numpy.complex(0, 1),
'j': numpy.complex(0, 1),
'e': numpy.e,
'pi': numpy.pi,
'k': scipy.constants.k, # Boltzmann: 1.3806488e-23 (Joules/Kelvin)
'c': scipy.constants.c, # Light Speed: 2.998e8 (m/s)
'T': 298.15, # Typical room temperature: 298.15 (Kelvin), same as 25C/77F
'q': scipy.constants.e # Fund. Charge: 1.602176565e-19 (Coulombs)
}
# We eliminated the following extreme suffixes:
# P (1e15), E (1e18), Z (1e21), Y (1e24),
# f (1e-15), a (1e-18), z (1e-21), y (1e-24)
# since they're rarely used, and potentially confusing.
# They may also conflict with variables if we ever allow e.g.
# 5R instead of 5*R
SUFFIXES = {
'%': 0.01, 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
'c': 1e-2, 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12
}
class UndefinedVariable(Exception):
"""
Indicate when a student inputs a variable which was not expected.
"""
pass
def lower_dict(input_dict):
"""
Convert all keys in a dictionary to lowercase; keep their original values.
Keep in mind that it is possible (but not useful?) to define different
variables that have the same lowercase representation. It would be hard to
tell which is used in the final dict and which isn't.
"""
return {k.lower(): v for k, v in input_dict.iteritems()}
# The following few functions define evaluation actions, which are run on lists
# of results from each parse component. They convert the strings and (previously
# calculated) numbers into the number that component represents.
def super_float(text):
"""
Like float, but with SI extensions. 1k goes to 1000.
"""
if text[-1] in SUFFIXES:
return float(text[:-1]) * SUFFIXES[text[-1]]
else:
return float(text)
def eval_number(parse_result):
"""
Create a float out of its string parts.
e.g. [ '7.13', 'e', '3' ] -> 7130
Calls super_float above.
"""
return super_float("".join(parse_result))
def eval_atom(parse_result):
"""
Return the value wrapped by the atom.
In the case of parenthesis, ignore them.
"""
# Find first number in the list
result = next(k for k in parse_result if isinstance(k, numbers.Number))
return result
def eval_power(parse_result):
"""
Take a list of numbers and exponentiate them, right to left.
e.g. [ 2, 3, 2 ] -> 2^3^2 = 2^(3^2) -> 512
(not to be interpreted (2^3)^2 = 64)
"""
# `reduce` will go from left to right; reverse the list.
parse_result = reversed(
[k for k in parse_result
if isinstance(k, numbers.Number)] # Ignore the '^' marks.
)
# Having reversed it, raise `b` to the power of `a`.
power = reduce(lambda a, b: b ** a, parse_result)
return power
def eval_parallel(parse_result):
"""
Compute numbers according to the parallel resistors operator.
BTW it is commutative. Its formula is given by
out = 1 / (1/in1 + 1/in2 + ...)
e.g. [ 1, 2 ] -> 2/3
Return NaN if there is a zero among the inputs.
"""
if len(parse_result) == 1:
return parse_result[0]
if 0 in parse_result:
return float('nan')
reciprocals = [1. / e for e in parse_result
if isinstance(e, numbers.Number)]
return 1. / sum(reciprocals)
def eval_sum(parse_result):
"""
Add the inputs, keeping in mind their sign.
[ 1, '+', 2, '-', 3 ] -> 0
Allow a leading + or -.
"""
total = 0.0
current_op = operator.add
for token in parse_result:
if token == '+':
current_op = operator.add
elif token == '-':
current_op = operator.sub
else:
total = current_op(total, token)
return total
def eval_product(parse_result):
"""
Multiply the inputs.
[ 1, '*', 2, '/', 3 ] -> 0.66
"""
prod = 1.0
current_op = operator.mul
for token in parse_result:
if token == '*':
current_op = operator.mul
elif token == '/':
current_op = operator.truediv
else:
prod = current_op(prod, token)
return prod
def add_defaults(variables, functions, case_sensitive):
"""
Create dictionaries with both the default and user-defined variables.
"""
all_variables = dict(DEFAULT_VARIABLES)
all_functions = dict(DEFAULT_FUNCTIONS)
all_variables.update(variables)
all_functions.update(functions)
if not case_sensitive:
all_variables = lower_dict(all_variables)
all_functions = lower_dict(all_functions)
return (all_variables, all_functions)
def evaluator(variables, functions, math_expr, case_sensitive=False):
"""
Evaluate an expression; that is, take a string of math and return a float.
-Variables are passed as a dictionary from string to value. They must be
python numbers.
-Unary functions are passed as a dictionary from string to function.
"""
# No need to go further.
if math_expr.strip() == "":
return float('nan')
# Parse the tree.
math_interpreter = ParseAugmenter(math_expr, case_sensitive)
math_interpreter.parse_algebra()
# Get our variables together.
all_variables, all_functions = add_defaults(variables, functions, case_sensitive)
# ...and check them
math_interpreter.check_variables(all_variables, all_functions)
# Create a recursion to evaluate the tree.
if case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
evaluate_actions = {
'number': eval_number,
'variable': lambda x: all_variables[casify(x[0])],
'function': lambda x: all_functions[casify(x[0])](x[1]),
'atom': eval_atom,
'power': eval_power,
'parallel': eval_parallel,
'product': eval_product,
'sum': eval_sum
}
return math_interpreter.reduce_tree(evaluate_actions)
class ParseAugmenter(object):
"""
Holds the data for a particular parse.
Retains the `math_expr` and `case_sensitive` so they needn't be passed
around method to method.
Eventually holds the parse tree and sets of variables as well.
"""
def __init__(self, math_expr, case_sensitive=False):
"""
Create the ParseAugmenter for a given math expression string.
Do the parsing later, when called like `OBJ.parse_algebra()`.
"""
self.case_sensitive = case_sensitive
self.math_expr = math_expr
self.tree = None
self.variables_used = set()
self.functions_used = set()
def vpa(tokens):
"""
When a variable is recognized, store it in `variables_used`.
"""
varname = tokens[0][0]
self.variables_used.add(varname)
def fpa(tokens):
"""
When a function is recognized, store it in `functions_used`.
"""
varname = tokens[0][0]
self.functions_used.add(varname)
self.variable_parse_action = vpa
self.function_parse_action = fpa
def parse_algebra(self):
"""
Parse an algebraic expression into a tree.
Store a `pyparsing.ParseResult` in `self.tree` with proper groupings to
reflect parenthesis and order of operations. Leave all operators in the
tree and do not parse any strings of numbers into their float versions.
Adding the groups and result names makes the `repr()` of the result
really gross. For debugging, use something like
print OBJ.tree.asXML()
"""
# 0.33 or 7 or .34 or 16.
number_part = Word(nums)
inner_number = (number_part + Optional("." + Optional(number_part))) | ("." + number_part)
# pyparsing allows spaces between tokens--`Combine` prevents that.
inner_number = Combine(inner_number)
# SI suffixes and percent.
number_suffix = MatchFirst(Literal(k) for k in SUFFIXES.keys())
# 0.33k or 17
plus_minus = Literal('+') | Literal('-')
number = Group(
Optional(plus_minus) +
inner_number +
Optional(CaselessLiteral("E") + Optional(plus_minus) + number_part) +
Optional(number_suffix)
)
number = number("number")
# Predefine recursive variables.
expr = Forward()
# Handle variables passed in. They must start with letters/underscores
# and may contain numbers afterward.
inner_varname = Word(alphas + "_", alphanums + "_")
varname = Group(inner_varname)("variable")
varname.setParseAction(self.variable_parse_action)
# Same thing for functions.
function = Group(inner_varname + Suppress("(") + expr + Suppress(")"))("function")
function.setParseAction(self.function_parse_action)
atom = number | function | varname | "(" + expr + ")"
atom = Group(atom)("atom")
# Do the following in the correct order to preserve order of operation.
pow_term = atom + ZeroOrMore("^" + atom)
pow_term = Group(pow_term)("power")
par_term = pow_term + ZeroOrMore('||' + pow_term) # 5k || 4k
par_term = Group(par_term)("parallel")
prod_term = par_term + ZeroOrMore((Literal('*') | Literal('/')) + par_term) # 7 * 5 / 4
prod_term = Group(prod_term)("product")
sum_term = Optional(plus_minus) + prod_term + ZeroOrMore(plus_minus + prod_term) # -5 + 4 - 3
sum_term = Group(sum_term)("sum")
# Finish the recursion.
expr << sum_term # pylint: disable=pointless-statement
self.tree = (expr + stringEnd).parseString(self.math_expr)[0]
def reduce_tree(self, handle_actions, terminal_converter=None):
"""
Call `handle_actions` recursively on `self.tree` and return result.
`handle_actions` is a dictionary of node names (e.g. 'product', 'sum',
etc&) to functions. These functions are of the following form:
-input: a list of processed child nodes. If it includes any terminal
nodes in the list, they will be given as their processed forms also.
-output: whatever to be passed to the level higher, and what to
return for the final node.
`terminal_converter` is a function that takes in a token and returns a
processed form. The default of `None` just leaves them as strings.
"""
def handle_node(node):
"""
Return the result representing the node, using recursion.
Call the appropriate `handle_action` for this node. As its inputs,
feed it the output of `handle_node` for each child node.
"""
if not isinstance(node, ParseResults):
# Then treat it as a terminal node.
if terminal_converter is None:
return node
else:
return terminal_converter(node)
node_name = node.getName()
if node_name not in handle_actions: # pragma: no cover
raise Exception(u"Unknown branch name '{}'".format(node_name))
action = handle_actions[node_name]
handled_kids = [handle_node(k) for k in node]
return action(handled_kids)
# Find the value of the entire tree.
return handle_node(self.tree)
def check_variables(self, valid_variables, valid_functions):
"""
Confirm that all the variables used in the tree are valid/defined.
Otherwise, raise an UndefinedVariable containing all bad variables.
"""
if self.case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
# Test if casify(X) is valid, but return the actual bad input (i.e. X)
bad_vars = set(var for var in self.variables_used
if casify(var) not in valid_variables)
bad_vars.update(func for func in self.functions_used
if casify(func) not in valid_functions)
if bad_vars:
raise UndefinedVariable(' '.join(sorted(bad_vars)))
|
agpl-3.0
|
mcanthony/nupic
|
examples/opf/experiments/anomaly/temporal/noisy_saw/description.py
|
32
|
14305
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'f': { 'clipInput': True,
'fieldname': u'f',
'maxval': 520,
'minval': 0,
'n': 500,
'name': u'f',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'cerebro_dummy',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":None,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='aae', inferenceElement='prediction', params={'window': 1000}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
agpl-3.0
|
tedder/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_iso.py
|
14
|
13499
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_iso
short_description: Manages ISO images on Apache CloudStack based clouds.
description:
- Register and remove ISO images.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the ISO.
required: true
display_text:
description:
- Display text of the ISO.
- If not specified, C(name) will be used.
version_added: "2.4"
url:
description:
- URL where the ISO can be downloaded from. Required if C(state) is present.
os_type:
description:
- Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
is_ready:
description:
- This flag is used for searching existing ISOs. If set to C(yes), it will only list ISO ready for deployment e.g.
successfully downloaded and installed. Recommended to set it to C(no).
type: bool
default: no
is_public:
description:
- Register the ISO to be publicly available to all users. Only used if C(state) is present.
is_featured:
description:
- Register the ISO to be featured. Only used if C(state) is present.
type: bool
is_dynamically_scalable:
description:
- Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
type: bool
checksum:
description:
- The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
bootable:
description:
- Register the ISO to be bootable. Only used if C(state) is present.
type: bool
domain:
description:
- Domain the ISO is related to.
account:
description:
- Account the ISO is related to.
project:
description:
- Name of the project the ISO to be registered in.
zone:
description:
- Name of the zone you wish the ISO to be registered or deleted from.
- If not specified, first zone found will be used.
cross_zones:
description:
- Whether the ISO should be synced or removed across zones.
- Mutually exclusive with C(zone).
type: bool
default: 'no'
version_added: "2.4"
iso_filter:
description:
- Name of the filter used to search for the ISO.
default: 'self'
choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
state:
description:
- State of the ISO.
default: 'present'
choices: [ 'present', 'absent' ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: 'yes'
version_added: "2.3"
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Register an ISO if ISO name does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
# Register an ISO with given name if ISO md5 checksum does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
checksum: 0b31bccccb048d20b551f70830bb7ad0
# Remove an ISO by name
- local_action:
module: cs_iso
name: Debian 7 64-bit
state: absent
# Remove an ISO by checksum
- local_action:
module: cs_iso
name: Debian 7 64-bit
checksum: 0b31bccccb048d20b551f70830bb7ad0
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ISO.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the ISO.
returned: success
type: str
sample: Debian 7 64-bit
display_text:
description: Text to be displayed of the ISO.
returned: success
type: str
sample: Debian 7.7 64-bit minimal 2015-03-19
zone:
description: Name of zone the ISO is registered in.
returned: success
type: str
sample: zuerich
status:
description: Status of the ISO.
returned: success
type: str
sample: Successfully Installed
is_ready:
description: True if the ISO is ready to be deployed from.
returned: success
type: bool
sample: true
is_public:
description: True if the ISO is public.
returned: success
type: bool
sample: true
version_added: "2.4"
bootable:
description: True if the ISO is bootable.
returned: success
type: bool
sample: true
version_added: "2.4"
is_featured:
description: True if the ISO is featured.
returned: success
type: bool
sample: true
version_added: "2.4"
format:
description: Format of the ISO.
returned: success
type: str
sample: ISO
version_added: "2.4"
os_type:
description: Typo of the OS.
returned: success
type: str
sample: CentOS 6.5 (64-bit)
version_added: "2.4"
checksum:
description: MD5 checksum of the ISO.
returned: success
type: str
sample: 0b31bccccb048d20b551f70830bb7ad0
created:
description: Date of registering.
returned: success
type: str
sample: 2015-03-29T14:57:06+0200
cross_zones:
description: true if the ISO is managed across all zones, false otherwise.
returned: success
type: bool
sample: false
version_added: "2.4"
domain:
description: Domain the ISO is related to.
returned: success
type: str
sample: example domain
account:
description: Account the ISO is related to.
returned: success
type: str
sample: example account
project:
description: Project the ISO is related to.
returned: success
type: str
sample: example project
tags:
description: List of resource tags associated with the ISO.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
version_added: "2.4"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackIso(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIso, self).__init__(module)
self.returns = {
'checksum': 'checksum',
'status': 'status',
'isready': 'is_ready',
'crossZones': 'cross_zones',
'format': 'format',
'ostypename': 'os_type',
'isfeatured': 'is_featured',
'bootable': 'bootable',
'ispublic': 'is_public',
}
self.iso = None
def _get_common_args(self):
return {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'),
'ostypeid': self.get_os_type('id'),
'bootable': self.module.params.get('bootable'),
}
def register_iso(self):
args = self._get_common_args()
args.update({
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'checksum': self.module.params.get('checksum'),
'isfeatured': self.module.params.get('is_featured'),
'ispublic': self.module.params.get('is_public'),
})
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
args['zoneid'] = -1
if args['bootable'] and not args['ostypeid']:
self.module.fail_json(msg="OS type 'os_type' is required if 'bootable=true'.")
args['url'] = self.module.params.get('url')
if not args['url']:
self.module.fail_json(msg="URL is required.")
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('registerIso', **args)
self.iso = res['iso'][0]
return self.iso
def present_iso(self):
iso = self.get_iso()
if not iso:
iso = self.register_iso()
else:
iso = self.update_iso(iso)
if iso:
iso = self.ensure_tags(resource=iso, resource_type='ISO')
self.iso = iso
return iso
def update_iso(self, iso):
args = self._get_common_args()
args.update({
'id': iso['id'],
})
if self.has_changed(args, iso):
self.result['changed'] = True
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
# Workaround API does not return cross_zones=true
self.result['cross_zones'] = True
args['zoneid'] = -1
if not self.module.check_mode:
res = self.query_api('updateIso', **args)
self.iso = res['iso']
return self.iso
def get_iso(self):
if not self.iso:
args = {
'isready': self.module.params.get('is_ready'),
'isofilter': self.module.params.get('iso_filter'),
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
# if checksum is set, we only look on that.
checksum = self.module.params.get('checksum')
if not checksum:
args['name'] = self.module.params.get('name')
isos = self.query_api('listIsos', **args)
if isos:
if not checksum:
self.iso = isos['iso'][0]
else:
for i in isos['iso']:
if i['checksum'] == checksum:
self.iso = i
break
return self.iso
def absent_iso(self):
iso = self.get_iso()
if iso:
self.result['changed'] = True
args = {
'id': iso['id'],
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
if not self.module.check_mode:
res = self.query_api('deleteIso', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'iso')
return iso
def get_result(self, iso):
super(AnsibleCloudStackIso, self).get_result(iso)
# Workaround API does not return cross_zones=true
if self.module.params.get('cross_zones'):
self.result['cross_zones'] = True
if 'zone' in self.result:
del self.result['zone']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
url=dict(),
os_type=dict(),
zone=dict(),
cross_zones=dict(type='bool', default=False),
iso_filter=dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
domain=dict(),
account=dict(),
project=dict(),
checksum=dict(),
is_ready=dict(type='bool', default=False),
bootable=dict(type='bool'),
is_featured=dict(type='bool'),
is_dynamically_scalable=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['zone', 'cross_zones'],
),
supports_check_mode=True
)
acs_iso = AnsibleCloudStackIso(module)
state = module.params.get('state')
if state in ['absent']:
iso = acs_iso.absent_iso()
else:
iso = acs_iso.present_iso()
result = acs_iso.get_result(iso)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
eg-zhang/h2o-2
|
py/testdir_single_jvm/test_NN2_params_rand2.py
|
9
|
4517
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i, h2o_nn
def define_params():
paramDict = {
'destination_key' : [None, 'NN2_model'],
'ignored_cols' : [None, 0, 1, '0,1'],
'classification' : [None, 0, 1],
'validation' : [None, 'covtype.20k.hex'],
# 'mode' : [None, 'SingleNode', 'SingleThread', 'MapReduce'],
'activation' : [None, 'Tanh', 'TanhWithDropout', 'Rectifier', 'RectifierWithDropout',
'Maxout', 'MaxoutWithDropout'],
'input_dropout_ratio' : [None, 0, 0.5, .99], # 1 is illegal
'hidden' : [None, 1, '100,50'],
'adaptive_rate' : [None, 0, 1],
'rate' : [None, 0.005, 0.010],
'rate_annealing' : [None, 0, 1e-6, 1e-4],
'momentum_start' : [None, 0, 0.1, 0.5, 0.9999],
'momentum_ramp' : [None, 1, 10000, 1000000],
'momentum_stable' : [None, 0, 0.9, 0.8],
'max_w2' : [None, 5, 10, 'Infinity'],
'l1' : [None, 0, 1e-4],
'l2' : [None, 0, 1e-4, 0.5],
'seed' : [None, 0, 1, 5234234],
'initial_weight_distribution' : [None, 'UniformAdaptive', 'Uniform', 'Normal'],
'initial_weight_scale' : [None, 0, 1],
'rate_decay' : [None, 0, 1],
'epochs' : [0.001, 2],
'score_training_samples' : [None, 0, 1],
'score_validation_samples' : [None, 0, 1],
'score_interval' : [None, 0, 1],
'train_samples_per_iteration' : [None, 0, 1],
'diagnostics' : [None, 0, 1],
'force_load_balance' : [None, 0, 1],
'replicate_training_data' : [None, 0, 1],
'shuffle_training_data' : [None, 0, 1],
'score_duty_cycle' : [None, 0.1, 0.01],
'fast_mode' : [None, 0, 1],
'ignore_const_cols' : [None, 0, 1],
'shuffle_training_data' : [None, 0, 1],
'nesterov_accelerated_gradient': [None, 0, 1],
# 'warmup_samples' : [None, 0, 10],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_NN2_params_rand2(self):
csvPathname = 'covtype/covtype.20k.data'
hex_key = 'covtype.20k.hex'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, schema='put')
paramDict = define_params()
for trial in range(3):
# params is mutable. This is default.
params = {'response': 'C55', 'epochs': '1'}
h2o_nn.pickRandDeepLearningParams(paramDict, params)
kwargs = params.copy()
start = time.time()
nn = h2o_cmd.runDeepLearning(timeoutSecs=500, parseResult=parseResult, **kwargs)
print "nn result:", h2o.dump_json(nn)
h2o.check_sandbox_for_errors()
deeplearning_model = nn['deeplearning_model']
errors = deeplearning_model['errors']
# print "errors", h2o.dump_json(errors)
# print "errors, classification", errors['classification']
# assert 1==0
# unstable = nn['model_info']['unstable']
# unstable case caused by :
# normal initial distribution with amplitude 1 and input_dropout_ratio=1.
# blowing up numerically during propagation of all zeroes as input repeatedly.
# arnon added logging to stdout in addition to html in 7899b92ad67.
# Will have to check that first before making predictions.
# print "unstable:", unstable
# FIX! simple check?
print "Deep Learning end on ", csvPathname, 'took', time.time() - start, 'seconds'
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
rolando-contrib/scrapy
|
tests/test_downloadermiddleware_stats.py
|
101
|
1596
|
from unittest import TestCase
from scrapy.downloadermiddlewares.stats import DownloaderStats
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class MyException(Exception):
pass
class TestDownloaderStats(TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('scrapytest.org')
self.mw = DownloaderStats(self.crawler.stats)
self.crawler.stats.open_spider(self.spider)
self.req = Request('http://scrapytest.org')
self.res = Response('scrapytest.org', status=400)
def assertStatsEqual(self, key, value):
self.assertEqual(
self.crawler.stats.get_value(key, spider=self.spider),
value,
str(self.crawler.stats.get_stats(self.spider))
)
def test_process_request(self):
self.mw.process_request(self.req, self.spider)
self.assertStatsEqual('downloader/request_count', 1)
def test_process_response(self):
self.mw.process_response(self.req, self.res, self.spider)
self.assertStatsEqual('downloader/response_count', 1)
def test_process_exception(self):
self.mw.process_exception(self.req, MyException(), self.spider)
self.assertStatsEqual('downloader/exception_count', 1)
self.assertStatsEqual(
'downloader/exception_type_count/tests.test_downloadermiddleware_stats.MyException',
1
)
def tearDown(self):
self.crawler.stats.close_spider(self.spider, '')
|
bsd-3-clause
|
ncliam/serverpos
|
openerp/addons/project/res_config.py
|
232
|
4551
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_sale_service': fields.boolean('Generate tasks from sale orders',
help='This feature automatically creates project tasks from service products in sale orders. '
'More precisely, tasks are created for procurement lines with product of type \'Service\', '
'procurement method \'Make to Order\', and supply method \'Manufacture\'.\n'
'-This installs the module sale_service.'),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help='Lets the company customize which Pad installation should be used to link to new pads '
'(for example: http://ietherpad.com/).\n'
'-This installs the module pad.'),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help='This allows you to transfer the entries under tasks defined for Project Management to '
'the timesheet line entries for particular date and user, with the effect of creating, '
'editing and deleting either ways.\n'
'-This installs the module project_timesheet.'),
'module_project_issue': fields.boolean("Track issues and bugs",
help='Provides management of issues/bugs in projects.\n'
'-This installs the module project_issue.'),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help='This will set the unit of measure used in projects and tasks.\n'
'Changing the unit will only impact new entries.'),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help='Provides timesheet support for the issues/bugs management in project.\n'
'-This installs the module project_issue_sheet.'),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
chbfiv/fabric-engine-old
|
Native/ThirdParty/Private/Python/closure_linter/common/position.py
|
285
|
3324
|
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent positions within strings."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Position(object):
"""Object representing a segment of a string.
Attributes:
start: The index in to the string where the segment starts.
length: The length of the string segment.
"""
def __init__(self, start, length):
"""Initialize the position object.
Args:
start: The start index.
length: The number of characters to include.
"""
self.start = start
self.length = length
def Get(self, string):
"""Returns this range of the given string.
Args:
string: The string to slice.
Returns:
The string within the range specified by this object.
"""
return string[self.start:self.start + self.length]
def Set(self, target, source):
"""Sets this range within the target string to the source string.
Args:
target: The target string.
source: The source string.
Returns:
The resulting string
"""
return target[:self.start] + source + target[self.start + self.length:]
def AtEnd(string):
"""Create a Position representing the end of the given string.
Args:
string: The string to represent the end of.
Returns:
The created Position object.
"""
return Position(len(string), 0)
AtEnd = staticmethod(AtEnd)
def IsAtEnd(self, string):
"""Returns whether this position is at the end of the given string.
Args:
string: The string to test for the end of.
Returns:
Whether this position is at the end of the given string.
"""
return self.start == len(string) and self.length == 0
def AtBeginning():
"""Create a Position representing the beginning of any string.
Returns:
The created Position object.
"""
return Position(0, 0)
AtBeginning = staticmethod(AtBeginning)
def IsAtBeginning(self):
"""Returns whether this position is at the beginning of any string.
Returns:
Whether this position is at the beginning of any string.
"""
return self.start == 0 and self.length == 0
def All(string):
"""Create a Position representing the entire string.
Args:
string: The string to represent the entirety of.
Returns:
The created Position object.
"""
return Position(0, len(string))
All = staticmethod(All)
def Index(index):
"""Returns a Position object for the specified index.
Args:
index: The index to select, inclusively.
Returns:
The created Position object.
"""
return Position(index, 1)
Index = staticmethod(Index)
|
agpl-3.0
|
mudbungie/NetExplorer
|
env/lib/python3.4/site-packages/networkx/tests/test_convert_pandas.py
|
43
|
2177
|
from nose import SkipTest
from nose.tools import assert_true
import networkx as nx
class TestConvertPandas(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
try:
import pandas as pd
except ImportError:
raise SkipTest('Pandas not available.')
def __init__(self, ):
global pd
import pandas as pd
self.r = pd.np.random.RandomState(seed=5)
ints = self.r.random_integers(1, 10, size=(3,2))
a = ['A', 'B', 'C']
b = ['D', 'A', 'E']
df = pd.DataFrame(ints, columns=['weight', 'cost'])
df[0] = a # Column label 0 (int)
df['b'] = b # Column label 'b' (str)
self.df = df
def assert_equal(self, G1, G2):
assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y ))
def test_from_dataframe_all_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', True)
self.assert_equal(G, Gtrue)
def test_from_dataframe_multi_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost'])
self.assert_equal(G, Gtrue)
def test_from_dataframe_one_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'weight': 10}),
('B', 'A', {'weight': 7}),
('A', 'D', {'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight')
self.assert_equal(G, Gtrue)
def test_from_dataframe_no_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {}),
('B', 'A', {}),
('A', 'D', {})])
G=nx.from_pandas_dataframe(self.df, 0, 'b',)
self.assert_equal(G, Gtrue)
|
mit
|
foodszhang/kbengine
|
kbe/res/scripts/common/Lib/multiprocessing/resource_sharer.py
|
102
|
5318
|
#
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from . import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resouces using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
|
lgpl-3.0
|
paulosman/django-openid-auth
|
django_openid_auth/teams.py
|
25
|
14251
|
# Launchpad OpenID Teams Extension support for python-openid
#
# Copyright (C) 2008-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Team membership support for Launchpad.
The primary form of communication between the RP and Launchpad is an
OpenID authentication request. Our solution is to piggyback a team
membership test onto this interaction.
As part of an OpenID authentication request, the RP includes the
following fields:
openid.ns.lp:
An OpenID 2.0 namespace URI for the extension. It is not strictly
required for 1.1 requests, but including it is good for forward
compatibility.
It must be set to: http://ns.launchpad.net/2007/openid-teams
openid.lp.query_membership:
A comma separated list of Launchpad team names that the RP is
interested in.
As part of the positive assertion OpenID response, the following field
will be provided:
openid.ns.lp:
(as above)
openid.lp.is_member:
A comma separated list of teams that the user is actually a member
of. The list may be limited to those teams mentioned in the
request.
This field must be included in the response signature in order to
be considered valid (as the response is bounced through the user's
web browser, an unsigned value could be modified).
@since: 2.1.1
"""
from openid.message import registerNamespaceAlias, \
NamespaceAliasRegistrationError
from openid.extension import Extension
from openid import oidutil
try:
basestring #pylint:disable-msg=W0104
except NameError:
# For Python 2.2
basestring = (str, unicode) #pylint:disable-msg=W0622
__all__ = [
'TeamsRequest',
'TeamsResponse',
'ns_uri',
'supportsTeams',
]
ns_uri = 'http://ns.launchpad.net/2007/openid-teams'
try:
registerNamespaceAlias(ns_uri, 'lp')
except NamespaceAliasRegistrationError, e:
oidutil.log('registerNamespaceAlias(%r, %r) failed: %s' % (ns_uri,
'lp', str(e),))
def supportsTeams(endpoint):
"""Does the given endpoint advertise support for Launchpad Teams?
@param endpoint: The endpoint object as returned by OpenID discovery
@type endpoint: openid.consumer.discover.OpenIDEndpoint
@returns: Whether an lp type was advertised by the endpoint
@rtype: bool
"""
return endpoint.usesExtension(ns_uri)
class TeamsNamespaceError(ValueError):
"""The Launchpad teams namespace was not found and could not
be created using the expected name (there's another extension
using the name 'lp')
This is not I{illegal}, for OpenID 2, although it probably
indicates a problem, since it's not expected that other extensions
will re-use the alias that is in use for OpenID 1.
If this is an OpenID 1 request, then there is no recourse. This
should not happen unless some code has modified the namespaces for
the message that is being processed.
"""
def getTeamsNS(message):
"""Extract the Launchpad teams namespace URI from the given
OpenID message.
@param message: The OpenID message from which to parse Launchpad
teams. This may be a request or response message.
@type message: C{L{openid.message.Message}}
@returns: the lp namespace URI for the supplied message. The
message may be modified to define a Launchpad teams
namespace.
@rtype: C{str}
@raise ValueError: when using OpenID 1 if the message defines
the 'lp' alias to be something other than a Launchpad
teams type.
"""
# See if there exists an alias for the Launchpad teams type.
alias = message.namespaces.getAlias(ns_uri)
if alias is None:
# There is no alias, so try to add one. (OpenID version 1)
try:
message.namespaces.addAlias(ns_uri, 'lp')
except KeyError, why:
# An alias for the string 'lp' already exists, but it's
# defined for something other than Launchpad teams
raise TeamsNamespaceError(why[0])
# we know that ns_uri defined, because it's defined in the
# else clause of the loop as well, so disable the warning
return ns_uri #pylint:disable-msg=W0631
class TeamsRequest(Extension):
"""An object to hold the state of a Launchpad teams request.
@ivar query_membership: A comma separated list of Launchpad team
names that the RP is interested in.
@type required: [str]
@group Consumer: requestField, requestTeams, getExtensionArgs, addToOpenIDRequest
@group Server: fromOpenIDRequest, parseExtensionArgs
"""
ns_alias = 'lp'
def __init__(self, query_membership=None, lp_ns_uri=ns_uri):
"""Initialize an empty Launchpad teams request"""
Extension.__init__(self)
self.query_membership = []
self.ns_uri = lp_ns_uri
if query_membership:
self.requestTeams(query_membership)
# Assign getTeamsNS to a static method so that it can be
# overridden for testing.
_getTeamsNS = staticmethod(getTeamsNS)
def fromOpenIDRequest(cls, request):
"""Create a Launchpad teams request that contains the
fields that were requested in the OpenID request with the
given arguments
@param request: The OpenID request
@type request: openid.server.CheckIDRequest
@returns: The newly created Launchpad teams request
@rtype: C{L{TeamsRequest}}
"""
self = cls()
# Since we're going to mess with namespace URI mapping, don't
# mutate the object that was passed in.
message = request.message.copy()
self.ns_uri = self._getTeamsNS(message)
args = message.getArgs(self.ns_uri)
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified Launchpad teams request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized
Launchpad teams request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the lp namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified Launchpad teams arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the Launchpad teams specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
items = args.get('query_membership')
if items:
for team_name in items.split(','):
try:
self.requestTeam(team_name, strict)
except ValueError:
if strict:
raise
def allRequestedTeams(self):
"""A list of all of the Launchpad teams that were
requested.
@rtype: [str]
"""
return self.query_membership
def wereTeamsRequested(self):
"""Have any Launchpad teams been requested?
@rtype: bool
"""
return bool(self.allRequestedTeams())
def __contains__(self, team_name):
"""Was this team in the request?"""
return team_name in self.query_membership
def requestTeam(self, team_name, strict=False):
"""Request the specified team from the OpenID user
@param team_name: the unqualified Launchpad team name
@type team_name: str
@param strict: whether to raise an exception when a team is
added to a request more than once
@raise ValueError: when strict is set and the team was
requested more than once
"""
if strict:
if team_name in self.query_membership:
raise ValueError('That team has already been requested')
else:
if team_name in self.query_membership:
return
self.query_membership.append(team_name)
def requestTeams(self, query_membership, strict=False):
"""Add the given list of teams to the request
@param query_membership: The Launchpad teams request
@type query_membership: [str]
@raise ValueError: when a team requested is not a string
or strict is set and a team was requested more than once
"""
if isinstance(query_membership, basestring):
raise TypeError('Teams should be passed as a list of '
'strings (not %r)' % (type(query_membership),))
for team_name in query_membership:
self.requestTeam(team_name, strict=strict)
def getExtensionArgs(self):
"""Get a dictionary of unqualified Launchpad teams
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the Launchpad
teams request fields.
@rtype: {str:str}
"""
args = {}
if self.query_membership:
args['query_membership'] = ','.join(self.query_membership)
return args
class TeamsResponse(Extension):
"""Represents the data returned in a Launchpad teams response
inside of an OpenID C{id_res} response. This object will be
created by the OpenID server, added to the C{id_res} response
object, and then extracted from the C{id_res} message by the
Consumer.
@ivar data: The Launchpad teams data, an array.
@ivar ns_uri: The URI under which the Launchpad teams data was
stored in the response message.
@group Server: extractResponse
@group Consumer: fromSuccessResponse
@group Read-only dictionary interface: keys, iterkeys, items, iteritems,
__iter__, get, __getitem__, keys, has_key
"""
ns_alias = 'lp'
def __init__(self, is_member=None, lp_ns_uri=ns_uri):
Extension.__init__(self)
if is_member is None:
self.is_member = []
else:
self.is_member = is_member
self.ns_uri = lp_ns_uri
def addTeam(self, team_name):
if team_name not in self.is_member:
self.is_member.append(team_name)
def extractResponse(cls, request, is_member_str):
"""Take a C{L{TeamsRequest}} and a list of Launchpad
team values and create a C{L{TeamsResponse}}
object containing that data.
@param request: The Launchpad teams request object
@type request: TeamsRequest
@param is_member: The Launchpad teams data for this
response, as a list of strings.
@type is_member: {str:str}
@returns: a Launchpad teams response object
@rtype: TeamsResponse
"""
self = cls()
self.ns_uri = request.ns_uri
self.is_member = is_member_str.split(',')
return self
extractResponse = classmethod(extractResponse)
# Assign getTeamsNS to a static method so that it can be
# overridden for testing
_getTeamsNS = staticmethod(getTeamsNS)
def fromSuccessResponse(cls, success_response, signed_only=True):
"""Create a C{L{TeamsResponse}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@param signed_only: Whether to process only data that was
signed in the id_res message from the server.
@type signed_only: bool
@rtype: TeamsResponse
@returns: A Launchpad teams response containing the data
that was supplied with the C{id_res} response.
"""
self = cls()
self.ns_uri = self._getTeamsNS(success_response.message)
if signed_only:
args = success_response.getSignedNS(self.ns_uri)
else:
args = success_response.message.getArgs(self.ns_uri)
if "is_member" in args:
is_member_str = args["is_member"]
self.is_member = is_member_str.split(',')
#self.is_member = args["is_member"]
return self
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""Get the fields to put in the Launchpad teams namespace
when adding them to an id_res message.
@see: openid.extension
"""
ns_args = {'is_member': ','.join(self.is_member),}
return ns_args
|
bsd-2-clause
|
manipopopo/tensorflow
|
tensorflow/contrib/data/python/kernel_tests/serialization/concatenate_dataset_serialization_test.py
|
14
|
2037
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ConcatenateDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ConcatenateDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_concatenate_dataset(self, var_array):
input_components = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (np.tile(
np.array([[5], [6], [7], [8], [9]]), 20), var_array)
return dataset_ops.Dataset.from_tensor_slices(input_components).concatenate(
dataset_ops.Dataset.from_tensor_slices(to_concatenate_components))
def testConcatenateCore(self):
num_outputs = 9
array = np.tile(np.array([[16], [17], [18], [19], [20]]), 15)
diff_array = np.array([[1], [2], [3], [4], [5]])
self.run_core_tests(lambda: self._build_concatenate_dataset(array),
lambda: self._build_concatenate_dataset(diff_array),
num_outputs)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
tareqalayan/ansible
|
test/units/template/test_vars.py
|
84
|
2886
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from ansible.template.vars import AnsibleJ2Vars
class TestVars(unittest.TestCase):
def setUp(self):
self.mock_templar = MagicMock(name='mock_templar')
def test(self):
ajvars = AnsibleJ2Vars(None, None)
print(ajvars)
def test_globals_empty_2_8(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {})
res28 = self._dict_jinja28(ajvars)
self.assertIsInstance(res28, dict)
def test_globals_empty_2_9(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {})
res29 = self._dict_jinja29(ajvars)
self.assertIsInstance(res29, dict)
def _assert_globals(self, res):
self.assertIsInstance(res, dict)
self.assertIn('foo', res)
self.assertEqual(res['foo'], 'bar')
def test_globals_2_8(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]})
res28 = self._dict_jinja28(ajvars)
self._assert_globals(res28)
def test_globals_2_9(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]})
res29 = self._dict_jinja29(ajvars)
self._assert_globals(res29)
def _dicts(self, ajvars):
print(ajvars)
res28 = self._dict_jinja28(ajvars)
res29 = self._dict_jinja29(ajvars)
# res28_other = self._dict_jinja28(ajvars, {'other_key': 'other_value'})
# other = {'other_key': 'other_value'}
# res29_other = self._dict_jinja29(ajvars, *other)
print('res28: %s' % res28)
print('res29: %s' % res29)
# print('res28_other: %s' % res28_other)
# print('res29_other: %s' % res29_other)
# return (res28, res29, res28_other, res29_other)
# assert ajvars == res28
# assert ajvars == res29
return (res28, res29)
def _dict_jinja28(self, *args, **kwargs):
return dict(*args, **kwargs)
def _dict_jinja29(self, the_vars):
return dict(the_vars)
|
gpl-3.0
|
aerickson/ansible
|
lib/ansible/utils/module_docs_fragments/eos.py
|
87
|
5493
|
#
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(eapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(eapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
required: true
choices:
- eapi
- cli
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=eapi). If the transport
argument is not eapi, this value is ignored.
default: yes
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates. If the transport
argument is not eapi, this value is ignored.
choices: ['yes', 'no']
"""
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.