repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Szkered/BC_2402
|
sydb/stocks/forms.py
|
1
|
5392
|
from django import forms
from stocks.models import *
from django.forms.models import modelformset_factory, inlineformset_factory, BaseInlineFormSet
from django.forms.formsets import formset_factory, BaseFormSet
import re
import datetime
class DonorForm(forms.ModelForm):
class Meta:
model = Donor
class VendorForm(forms.ModelForm):
class Meta:
model = Vendor
class ConfirmForm(forms.Form):
confirm = forms.BooleanField()
class StockInForm(forms.Form):
stock_name = forms.CharField()
unit_price = forms.FloatField()
unit_measure = forms.CharField(max_length=40)
category = forms.CharField(required=False)
quantity = forms.IntegerField()
def clean(self):
cleaned_data = super(StockInForm, self).clean()
quantity = cleaned_data.get('quantity')
unit_price = cleaned_data.get('unit_price')
unit_measure = cleaned_data.get('unit_measure')
category_list = re.split(', | |,',cleaned_data.get('category'))
if quantity:
if not quantity > 0:
raise forms.ValidationError("Quantity must be positive integer!")
if unit_price:
if not unit_price > 0:
raise forms.ValidationError("Unit price must be positive integer!")
# if unit_measure:
# if not re.compile("\d+\w+").search(unit_measure):
# raise forms.ValidationError(
# "Unit measure must be the combination of number and characters!")
if category_list:
if 'Standard' not in category_list and 'Non-Standard' not in category_list:
raise forms.ValidationError("An item must be either Standard or Non-Standard!")
return cleaned_data
class DateForm(forms.Form):
date = forms.DateField()
class StartEndDateForm(forms.Form):
start_date = forms.DateField(required=False)
end_date = forms.DateField(required=False)
class FamilyForm(forms.Form):
TYPE_A = 'A'
TYPE_B = 'B'
TYPE_C = 'C'
TYPE_D = 'D'
ALL = 'L'
FAMILY_TYPES = (
(TYPE_A, 'Type A'),
(TYPE_B, 'Type B'),
(TYPE_C, 'Type C'),
(TYPE_D, 'Type D'),
(ALL, 'All')
)
family_type = forms.ChoiceField(choices=FAMILY_TYPES)
class DistributionForm(forms.Form):
quantity = forms.IntegerField(initial=0)
stock_id = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean(self):
cleaned_data = super(DistributionForm, self).clean()
quantity = cleaned_data.get('quantity')
stock = Stock.objects.get(pk=cleaned_data.get('stock_id'))
now = datetime.datetime.now()
if quantity > stock.current_amt(now):
raise forms.ValidationError("You don't have that much %s!" % stock.name)
if quantity < 0:
raise forms.ValidationError("Quantity must be positive integer!")
return cleaned_data
class DestinationForm(forms.ModelForm):
class Meta:
model = Destination
class TransferForm(forms.Form):
stock_name = forms.CharField()
unit_measure = forms.CharField()
quantity = forms.IntegerField()
remark = forms.CharField(required=False)
def clean(self):
now = datetime.datetime.now()
cleaned_data = super(TransferForm, self).clean()
quantity = cleaned_data.get('quantity')
stock = Stock.objects.get(
name=cleaned_data.get('stock_name'),
unit_measure=cleaned_data.get('unit_measure')
)
if quantity > stock.current_amt(now):
raise forms.ValidationError("You don't have that much %s!" % stock.name)
if quantity < 0:
raise forms.ValidationError("Quantity must be positive integer!")
return cleaned_data
class StockForm(forms.ModelForm):
class Meta:
model = Stock
class MergeForm(forms.Form):
merge_stock_name = forms.CharField()
merge_stock_unit_measure = forms.CharField()
class TargetForm(forms.Form):
target_stock_name = forms.CharField()
target_stock_unit_measure = forms.CharField()
class MergeCheckForm(forms.Form):
confirm = forms.BooleanField()
class CategoryForm(forms.Form):
category = forms.CharField()
class AdjustForm(forms.Form):
stock_name = forms.CharField()
unit_measure = forms.CharField()
current_amount = forms.IntegerField()
# This class is used to require forms in the formset not to be empty
class RequiredFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
super(RequiredFormSet, self).__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False # self.forms[0].empty_permitted = False
StockInFormSet = formset_factory(StockInForm, max_num=30, formset=RequiredFormSet)
OrderFormSet = modelformset_factory(Order, extra=0)
DonationFormSet = modelformset_factory(Donation, extra=0)
DonateFormSet = modelformset_factory(Donate, extra=0)
PurchaseFormSet = modelformset_factory(Purchase, exclude=['order'] ,extra=0)
DistributeFormSet = modelformset_factory(Distribute, extra=0)
VendorFormSet = modelformset_factory(Vendor, extra=0)
DonorFormSet = modelformset_factory(Donor, extra=0)
StockFormSet = modelformset_factory(Stock, exclude='is_donated',extra=0)
|
apache-2.0
| 5,637,425,769,789,692,000 | 30.905325 | 95 | 0.648368 | false |
GoogleCloudPlatform/training-data-analyst
|
quests/dataflow_python/8b_Stream_Testing_Pipeline/solution/taxi_streaming_pipeline_test.py
|
1
|
4004
|
import logging
import json
import unittest
import sys
import apache_beam as beam
from taxi_streaming_pipeline import *
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import BeamAssertException
from apache_beam.testing.util import assert_that, equal_to_per_window
from apache_beam.testing.test_stream import TestStream
from apache_beam.transforms.window import TimestampedValue, IntervalWindow
from apache_beam.options.pipeline_options import PipelineOptions, StandardOptions
def main(out = sys.stderr, verbosity = 2):
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
unittest.TextTestRunner(out, verbosity = verbosity).run(suite)
class TaxiWindowingTest(unittest.TestCase):
def test_windowing_behavior(self):
options = PipelineOptions()
options.view_as(StandardOptions).streaming = True
with TestPipeline(options=options) as p:
base_json_pickup = "{\"ride_id\":\"x\",\"point_idx\":1,\"latitude\":0.0,\"longitude\":0.0," \
"\"timestamp\":\"00:00:00\",\"meter_reading\":1.0,\"meter_increment\":0.1," \
"\"ride_status\":\"pickup\",\"passenger_count\":1}"
base_json_enroute = "{\"ride_id\":\"x\",\"point_idx\":1,\"latitude\":0.0,\"longitude\":0.0," \
"\"timestamp\":\"00:00:00\",\"meter_reading\":1.0,\"meter_increment\":0.1," \
"\"ride_status\":\"pickup\",\"passenger_count\":1}"
test_stream = TestStream().advance_watermark_to(0).add_elements([
TimestampedValue(base_json_pickup, 0),
TimestampedValue(base_json_pickup, 0),
TimestampedValue(base_json_enroute, 0),
TimestampedValue(base_json_pickup, 60)
]).advance_watermark_to(60).advance_processing_time(60).add_elements([
TimestampedValue(base_json_pickup, 120)
]).advance_watermark_to_infinity()
taxi_counts = (p | test_stream
| TaxiCountTransform()
)
EXPECTED_WINDOW_COUNTS = {IntervalWindow(0,60): [3],
IntervalWindow(60,120): [1],
IntervalWindow(120,180): [1]}
assert_that(taxi_counts, equal_to_per_window(EXPECTED_WINDOW_COUNTS),
reify_windows=True)
class TaxiLateDataTest(unittest.TestCase):
def test_late_data_behavior(self):
options = PipelineOptions()
options.view_as(StandardOptions).streaming = True
with TestPipeline(options=options) as p:
base_json_pickup = "{\"ride_id\":\"x\",\"point_idx\":1,\"latitude\":0.0,\"longitude\":0.0," \
"\"timestamp\":\"00:00:00\",\"meter_reading\":1.0,\"meter_increment\":0.1," \
"\"ride_status\":\"pickup\",\"passenger_count\":1}"
test_stream = TestStream().advance_watermark_to(0).add_elements([
TimestampedValue(base_json_pickup, 0),
TimestampedValue(base_json_pickup, 0),
]).advance_watermark_to(60).advance_processing_time(60).add_elements([
TimestampedValue(base_json_pickup, 0)
]).advance_watermark_to(300).advance_processing_time(240).add_elements([
TimestampedValue(base_json_pickup, 0)
])
EXPECTED_RESULTS = {IntervalWindow(0,60): [2,3]} #On Time and Late Result
taxi_counts_late = (p | test_stream
| TaxiCountTransform()
)
assert_that(taxi_counts_late, equal_to_per_window(EXPECTED_RESULTS),
reify_windows=True)
if __name__ == '__main__':
with open('testing.out', 'w') as f:
main(f)
|
apache-2.0
| -12,612,812,322,786,736 | 41.606383 | 109 | 0.564685 | false |
rtucker/recentpostr
|
recentpostr.py
|
1
|
10922
|
#!/usr/bin/python
# Spits out a Javascript embeddable list o' recent RSS stuff.
# Ryan Tucker, August 21 2009, <rtucker@gmail.com>
checkevery = 30*60 # check every ~30 minutes
displaymax = 5
import cgi
import feedparser
import logging
import logging.handlers
import operator
import robotparser
import sqlite3
import sys
import time
import timelimited
import urllib2
try:
import json
except:
import simplejson as json
# Set up logging to syslog
logger = logging.getLogger('')
loghandler = logging.handlers.SysLogHandler('/dev/log',
facility=logging.handlers.SysLogHandler.LOG_DAEMON)
logformatter = logging.Formatter('%(filename)s: %(levelname)s: %(message)s')
loghandler.setFormatter(logformatter)
logger.addHandler(loghandler)
logger.setLevel(logging.DEBUG)
# Set user agent for feedparser
feedparser.USER_AGENT = 'recentpostr/0.1 +http://blog.hoopycat.com/'
cachedout = []
cachedttl = 600
cachedgen = 0
def initDB(filename='/tmp/recentpostr.sqlite3'):
"""Connect to and initialize the cache database.
Optional: Filename of database
Returns: db object
"""
db = sqlite3.connect(filename)
c = db.cursor()
c.execute('pragma table_info(blogcache)')
columns = ' '.join(i[1] for i in c.fetchall()).split()
if columns == []:
# need to create table
c.execute("""create table blogcache
(feedurl text, blogurl text, blogtitle text, lasttitle text,
lastlink text, lasttime integer, lastcheck integer, etag text,
lastmodified integer, robotok boolean, robotcheck integer)""")
db.commit()
return db
def iterFeedList(filename='feedlist.txt'):
fd = open(filename, 'r')
for i in fd.readlines():
if i.startswith("#"):
pass
elif i.strip() == '':
pass
else:
splitted = i.strip().split('|')
if len(splitted) == 1:
yield {splitted[0]: ''}
elif len(splitted) == 2:
yield {splitted[0]: splitted[1]}
elif len(splitted) == 3:
yield {splitted[0]: (splitted[1], splitted[2])}
def checkRobotOK(url):
rp = robotparser.RobotFileParser()
try:
logging.debug('Checking robot OK for %s' % url)
request = urllib2.Request(getURLBase(url) + '/robots.txt',
None, {'User-Agent': feedparser.USER_AGENT})
robotsfd = urllib2.urlopen(request)
if robotsfd.code != 200:
logging.debug('robots.txt not found for %s, assuming OK' % url)
return True
except AttributeError:
pass
except IOError:
logging.debug('Received IO Error opening robots.txt for %s' % url)
return False
rp.parse(robotsfd.readlines())
result = rp.can_fetch(feedparser.USER_AGENT, url)
logging.debug('robots.txt for %s says %s' % (url, str(result)))
return result
def getURLBase(url):
host = urllib2.splithost(urllib2.splittype(url)[1])[0]
method = urllib2.splittype(url)[0]
return method + '://' + host
def updateFeed(feedurl, etag=None, lastmodified=None):
if etag in ['None', '']:
etag = None
if type(lastmodified) is int:
lastmod = time.gmtime(lastmodified)
elif type(lastmodified) in [tuple, time.struct_time]:
lastmod = lastmodified
else:
lastmod = None
logging.debug('Checking %s ...' % feedurl)
d = feedparser.parse(feedurl, etag=etag, modified=lastmod)
if d.status is 304:
# It hasn't been updated.
return None
elif len(d.entries) == 0:
# There's nothing there...?!
return None
else:
# There's something here!
return d
def fetchMostRecent(d):
if 'updated_parsed' in d['entries'][0]:
mostrecent = sorted(d['entries'],
key=operator.itemgetter('updated_parsed'),
reverse=True)[0]
else:
mostrecent = d['entries'][0]
mostrecent['updated_parsed'] = None
return (mostrecent.title, mostrecent.link, mostrecent.updated_parsed)
def updateBlogList(db, blogiter, checkevery=2*60*60):
c = db.cursor()
c.execute("select feedurl from blogcache")
allrows = c.fetchall()
blogdict = {}
flagsdict = {}
for i in blogiter:
key = i.keys()[0]
if type(i[key]) == type(()):
value = i[key][0]
flagsdict[key] = i[key][1].split(',')
else:
value = i[key]
blogdict[key] = value
if (key, ) not in allrows:
logging.debug('New blog found: %s' % key)
c.execute("insert into blogcache values(?,'','','','',1,1,'',1,0,1)", (key,))
lastcheckthreshold = int(time.time()-checkevery)
c.execute("select feedurl,etag,lasttime,robotok,robotcheck from blogcache where lastcheck < ? order by lastcheck", (lastcheckthreshold, ))
rows = c.fetchall()
starttime = time.time()
deadtime = time.time()+3
for results in rows:
if results[0] in flagsdict.keys():
flags = flagsdict[results[0]]
else:
flags = []
if results[0] not in blogdict.keys():
logging.debug('skipping old blog: %s' % (results[0]))
continue
if deadtime-time.time() < 0:
logging.info('updateBlogList timeout reached')
break
updateFeed_timed = timelimited.TimeLimited(updateFeed,
max(deadtime-time.time(), 1))
try:
feed = None
if 'norobot' in flags:
logging.debug('overriding robot check due to norobot flag')
robotok = True
elif results[4] < time.time()-86400:
logging.debug('robot check expired for %s: %i' % (
results[0], time.time()-results[4]))
robotok = checkRobotOK(results[0])
c.execute("update blogcache set robotok=?,robotcheck=?"+
"where feedurl=?", (int(robotok),time.time(),
results[0]))
else:
robotok = bool(results[3])
if robotok:
feed = updateFeed_timed(results[0], results[1], results[2])
else:
logging.info('robots.txt for %s prohibits us' % results[0])
feed = None
except timelimited.TimeLimitExpired:
logging.info('updateFeed timeout reached')
lastcheck = int(time.time())
if feed:
if 'etag' in feed:
etag = str(feed.etag)
else:
etag = ''
if 'modified' in feed:
lastmodified = int(time.mktime(feed.modified))
else:
lastmodified = 1
if 'link' in feed.feed:
blogurl = feed.feed.link
else:
blogurl = feedurl
if 'title' in feed.feed:
blogtitle = feed.feed.title
else:
blogtitle = ''
lasttitle, lastlink, lasttimetuple = fetchMostRecent(feed)
if lasttimetuple:
lasttime = int(time.mktime(lasttimetuple))
else:
lasttime = -1
c.execute("""update blogcache set blogurl=?, blogtitle=?,
lasttitle=?, lastlink=?, lasttime=?, lastcheck=?,
etag=?, lastmodified=? where feedurl=?""",
(blogurl, blogtitle, lasttitle, lastlink, lasttime,
lastcheck, etag, lastmodified, results[0]))
db.commit()
logging.debug("Updated %s" % results[0])
else:
c.execute("""update blogcache set
lastcheck=? where feedurl=?""",
(lastcheck, results[0]))
db.commit()
logging.debug("No new data on feed: %s" % results[0])
return blogdict
def iterCachedBlogRoll(db, blogdict):
c = db.cursor()
c.execute("""select feedurl,blogurl,blogtitle,lasttitle,lastlink,lasttime
from blogcache
order by lasttime desc""")
rows = c.fetchall()
for i in rows:
if i[0] in blogdict:
if blogdict[i[0]]:
blogtitle = blogdict[i[0]]
else:
blogtitle = i[2]
yield {'blogurl': cgi.escape(i[1], quote=True),
'blogtitle': cgi.escape(blogtitle, quote=True),
'posttitle': cgi.escape(i[3], quote=True),
'postlink': cgi.escape(i[4], quote=True),
'postts': i[5]}
def formatOutputRowJavaScript(entry):
entry['isostamp'] = ''
if entry['postts'] > 1:
entry['isostamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(entry['postts']))
return """
document.write("<li><b><a href='%(blogurl)s'>%(blogtitle)s</a></b><br><a href='%(postlink)s'>%(posttitle)s<br><i><small>");
document.write(jQuery.timeago("%(isostamp)s"));
document.write("</small></i></a></li>");""" % entry
def formatOutputBlobJSON(entryiter,max):
outlist = []
counter = 0
for i in entryiter:
if counter >= max:
break
if i['postts'] > 1:
i['isostamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(i['postts']))
else:
i['isostamp'] = ''
outlist.append(i)
counter += 1
return json.dumps(outlist)
def processOutput(type='javascript',callback=None):
db = initDB()
blogiter = iterFeedList()
blogdict = updateBlogList(db, blogiter)
element = iterCachedBlogRoll(db, blogdict)
output = ''
if type == 'javascript':
for i in range(0, displaymax):
try:
output += unicode(formatOutputRowJavaScript(element.next()))
except StopIteration:
pass
if type == 'json':
if callback:
output += '%(callback)s(%(json)s)' % ({
'callback': callback,
'json': formatOutputBlobJSON(element, displaymax)})
else:
output += formatOutputBlobJSON(element, displaymax)
return output
def wsgiInterface(environ, start_response):
global cachedout, cachedgen, cachedttl
start_response('200 OK', [('Content-Type', 'application/javascript')])
if cachedout == [] or (cachedgen + cachedttl < time.time()):
logging.debug('Regenerating cache (age: %i)' % (time.time() - cachedgen))
cachedout = processOutput(type='json',
callback='recentpostr').split('\n')
cachedgen = time.time()
else:
logging.debug('Outputting cache (age: %i)' % (time.time() - cachedgen))
return cachedout
def __main__():
print processOutput()
if __name__ == '__main__': __main__()
|
mit
| 1,412,064,997,997,759,200 | 33.238245 | 142 | 0.557865 | false |
runt18/mojo
|
mojo/public/tools/bindings/pylib/mojom/generate/mojom_translator_unittest.py
|
1
|
37435
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import module
try:
import mojom_translator
from generated import mojom_files_mojom
from generated import mojom_types_mojom
bindings_imported = True
except ImportError:
bindings_imported = False
@unittest.skipUnless(bindings_imported, 'Could not import python bindings.')
class TranslateFileGraph(unittest.TestCase):
def test_basics(self):
g = mojom_files_mojom.MojomFileGraph()
# File names need to be set so the file can be translated at all.
g.files = {
'a.mojom': mojom_files_mojom.MojomFile(
file_name='a.mojom',
specified_file_name='',
imports=[]),
'b.mojom': mojom_files_mojom.MojomFile(
file_name='b.mojom',
specified_file_name='',
imports=[]),
'root/c.mojom': mojom_files_mojom.MojomFile(
file_name='root/c.mojom',
specified_file_name='',
imports=[]),
}
modules = mojom_translator.TranslateFileGraph(g)
self.assertEquals(len(modules), len(g.files))
@unittest.skipUnless(bindings_imported, 'Could not import python bindings.')
class TestTranslateFile(unittest.TestCase):
def test_basics(self):
graph = mojom_files_mojom.MojomFileGraph(
resolved_types={})
file_name = 'root/f.mojom'
imported_file_name = 'other/a.mojom'
second_level_imported_file_name = 'something/other.mojom'
mojom_file = mojom_files_mojom.MojomFile(
file_name=file_name,
specified_file_name='specified_file_name',
module_namespace='somens',
imports=[imported_file_name])
imported_file = mojom_files_mojom.MojomFile(
file_name=imported_file_name,
specified_file_name='',
module_namespace='somens',
imports=[second_level_imported_file_name])
second_level_imported_file = mojom_files_mojom.MojomFile(
file_name=second_level_imported_file_name,
specified_file_name='',
module_namespace='somens')
graph.files = {
file_name: mojom_file,
imported_file_name: imported_file,
second_level_imported_file_name: second_level_imported_file
}
mojom_interface = mojom_types_mojom.MojomInterface(
methods={},
decl_data=mojom_types_mojom.DeclarationData(
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
interface_name='AnInterface')
graph.resolved_types['interface_key'] = mojom_types_mojom.UserDefinedType(
interface_type=mojom_interface)
mojom_struct = mojom_types_mojom.MojomStruct(
fields=[],
decl_data=mojom_types_mojom.DeclarationData(
short_name='AStruct',
full_identifier='foo.AStruct',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)))
graph.resolved_types['struct_key'] = mojom_types_mojom.UserDefinedType(
struct_type=mojom_struct)
mojom_union = mojom_types_mojom.MojomUnion(
fields=[],
decl_data=mojom_types_mojom.DeclarationData(
short_name='AUnion',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)))
graph.resolved_types['union_key'] = mojom_types_mojom.UserDefinedType(
union_type=mojom_union)
mojom_enum = mojom_types_mojom.MojomEnum(
values=[],
decl_data=mojom_types_mojom.DeclarationData(
short_name='AnEnum',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)))
graph.resolved_types['enum_key'] = mojom_types_mojom.UserDefinedType(
enum_type=mojom_enum)
mojom_const = mojom_types_mojom.DeclaredConstant(
decl_data=mojom_types_mojom.DeclarationData(short_name='AConst'),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.INT64),
value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(
int64_value=30)))
user_defined_value = mojom_types_mojom.UserDefinedValue()
user_defined_value.declared_constant = mojom_const
graph.resolved_values = {'value_key': user_defined_value}
mojom_file.declared_mojom_objects = mojom_files_mojom.KeysByType(
interfaces=['interface_key'],
structs=['struct_key'],
unions=['union_key'],
top_level_enums=['enum_key'],
top_level_constants=['value_key']
)
mod = mojom_translator.FileTranslator(graph, file_name).Translate()
self.assertEquals('f.mojom', mod.name)
self.assertEquals(mojom_file.specified_file_name, mod.specified_name)
self.assertEquals(mojom_file.file_name, mod.path)
self.assertEquals(mojom_file.module_namespace, mod.namespace)
self.assertEquals(1, len(mod.imports))
self.assertEquals('a.mojom', mod.imports[0]['module_name'])
self.assertEquals(imported_file.module_namespace,
mod.imports[0]['namespace'])
self.assertEquals(imported_file.file_name, mod.imports[0]['module'].path)
self.assertEquals(2, len(mod.transitive_imports))
transitive_imports_paths = [imp['module'].path
for imp in mod.transitive_imports]
self.assertIn(imported_file_name, transitive_imports_paths)
self.assertIn(second_level_imported_file_name, transitive_imports_paths)
self.assertEquals(mojom_interface.interface_name, mod.interfaces[0].name)
# Interfaces should be assigned their name as their spec.
self.assertEquals('AnInterface', mod.interfaces[0].spec)
self.assertEquals(mojom_struct.decl_data.short_name, mod.structs[0].name)
# The struct was given a full_identifier so its spec should be that.
self.assertEquals(mojom_struct.decl_data.full_identifier,
mod.structs[0].spec)
self.assertEquals(mojom_union.decl_data.short_name, mod.unions[0].name)
# The union was given a short name but not a full_identifier so its spec
# should be the short name.
self.assertEquals(mojom_union.decl_data.short_name,
mod.unions[0].spec)
self.assertEquals(mojom_enum.decl_data.short_name, mod.enums[0].name)
self.assertEquals(mojom_const.decl_data.short_name, mod.constants[0].name)
imported_mod = mojom_translator.FileTranslator(
graph, imported_file_name).Translate()
self.assertFalse(imported_mod.specified_name)
def test_no_imports(self):
graph = mojom_files_mojom.MojomFileGraph(
resolved_types={})
file_name = 'root/f.mojom'
mojom_file = mojom_files_mojom.MojomFile(
file_name=file_name,
specified_file_name='',
module_namespace='somens')
graph.files = { file_name: mojom_file }
# Should not throw exceptions despite imports not being set on the file.
mod = mojom_translator.FileTranslator(graph, file_name).Translate()
self.assertEquals([], mod.imports)
@unittest.skipUnless(bindings_imported, 'Could not import python bindings.')
class TestUserDefinedFromTypeRef(unittest.TestCase):
def do_interface_test(self, nullable, interface_request):
# Build a MojomInterface
file_name = 'a.mojom'
mojom_interface = mojom_types_mojom.MojomInterface(
decl_data=mojom_types_mojom.DeclarationData(
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
interface_name='AnInterface')
mojom_interface.methods={}
# Register the MojomInterface in a MojomFileGraph
graph = mojom_files_mojom.MojomFileGraph()
type_key = 'some_type_key'
graph.resolved_types = {
type_key: mojom_types_mojom.UserDefinedType(
interface_type=mojom_interface)}
# Build a reference to the interface.
type_ref = mojom_types_mojom.Type(
type_reference=mojom_types_mojom.TypeReference(
type_key=type_key,
nullable=nullable,
is_interface_request=interface_request))
# Construct a translator
translator = mojom_translator.FileTranslator(graph, file_name)
# Translate the MojomInterface referenced by type_ref.
interface = translator.UserDefinedFromTypeRef(type_ref)
# Check the translation
if interface_request:
self.assertEquals('AnInterface', interface.kind.name)
else:
self.assertEquals('AnInterface', interface.name)
self.assertEquals(nullable, interface.is_nullable)
self.assertEquals(interface_request, isinstance(interface,
module.InterfaceRequest))
def test_interfaces(self):
self.do_interface_test(False, False)
self.do_interface_test(False, True)
self.do_interface_test(True, False)
self.do_interface_test(True, True)
@unittest.skipUnless(bindings_imported, 'Could not import python bindings.')
class TestUserDefinedTypeFromMojom(unittest.TestCase):
def test_structs(self):
file_name = 'a.mojom'
graph = mojom_files_mojom.MojomFileGraph()
mojom_file = mojom_files_mojom.MojomFile(
file_name='a.mojom',
module_namespace='foo.bar')
graph.files = {mojom_file.file_name: mojom_file}
mojom_struct = mojom_types_mojom.MojomStruct(
decl_data=mojom_types_mojom.DeclarationData(short_name='FirstStruct'))
mojom_struct.fields = [
mojom_types_mojom.StructField(
decl_data=mojom_types_mojom.DeclarationData(
short_name='field01',
declared_ordinal=5),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.BOOL)),
mojom_types_mojom.StructField(
decl_data=mojom_types_mojom.DeclarationData(
short_name='field02'),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.DOUBLE),
default_value=mojom_types_mojom.DefaultFieldValue(
value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(double_value=15)))),
]
mojom_struct.decl_data.source_file_info = mojom_types_mojom.SourceFileInfo(
file_name=mojom_file.file_name)
struct = module.Struct()
translator = mojom_translator.FileTranslator(graph, file_name)
translator.StructFromMojom(
struct, mojom_types_mojom.UserDefinedType(struct_type=mojom_struct))
self.assertEquals('FirstStruct', struct.name)
self.assertEquals(translator._module, struct.module)
self.assertEquals(len(mojom_struct.fields), len(struct.fields))
for gold, f in zip(mojom_struct.fields, struct.fields):
self.assertEquals(f.name, gold.decl_data.short_name)
self.assertEquals(module.BOOL, struct.fields[0].kind)
self.assertEquals(5, struct.fields[0].ordinal)
self.assertEquals(module.DOUBLE, struct.fields[1].kind)
self.assertEquals(None, struct.fields[1].ordinal)
self.assertEquals('15.0', struct.fields[1].default)
def test_constant(self):
file_name = 'a.mojom'
graph = mojom_files_mojom.MojomFileGraph()
mojom_const = mojom_types_mojom.DeclaredConstant()
mojom_const.decl_data = mojom_types_mojom.DeclarationData(
short_name='foo', container_type_key='struct_key')
mojom_const.type = mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.INT64)
mojom_const.value = mojom_types_mojom.Value()
mojom_const.value.literal_value = mojom_types_mojom.LiteralValue(
int64_value=20)
mojom_struct = mojom_types_mojom.MojomStruct(
fields=[],
decl_data=mojom_types_mojom.DeclarationData(
short_name='AStruct',
source_file_info =mojom_types_mojom.SourceFileInfo(
file_name=file_name)))
graph.resolved_types = {'struct_key': mojom_types_mojom.UserDefinedType(
struct_type=mojom_struct)}
const = module.Constant()
translator = mojom_translator.FileTranslator(graph, file_name)
translator.ConstantFromMojom(const, mojom_const)
self.assertEquals(mojom_const.decl_data.short_name, const.name)
self.assertEquals(module.INT64, const.kind)
self.assertEquals('20', const.value)
self.assertEquals(translator.UserDefinedFromTypeKey('struct_key'),
const.parent_kind)
def test_enum(self):
file_name = 'a.mojom'
mojom_enum = mojom_types_mojom.MojomEnum()
mojom_enum.decl_data = mojom_types_mojom.DeclarationData(
short_name='AnEnum',
source_file_info=mojom_types_mojom.SourceFileInfo(file_name=file_name))
value1 = mojom_types_mojom.EnumValue(
decl_data=mojom_types_mojom.DeclarationData(short_name='val1'),
enum_type_key='AnEnum',
initializer_value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(uint64_value=20)),
int_value=20)
value2 = mojom_types_mojom.EnumValue(
decl_data=mojom_types_mojom.DeclarationData(short_name='val2'),
enum_type_key='AnEnum',
int_value=70)
mojom_enum.values = [value1, value2]
graph = mojom_files_mojom.MojomFileGraph()
enum = module.Enum()
translator = mojom_translator.FileTranslator(graph, file_name)
translator.EnumFromMojom(
enum, mojom_types_mojom.UserDefinedType(enum_type=mojom_enum))
self.assertEquals(translator._module, enum.module)
self.assertEquals(mojom_enum.decl_data.short_name, enum.name)
self.assertEquals(len(mojom_enum.values), len(enum.fields))
self.assertEquals(value1.decl_data.short_name, enum.fields[0].name)
self.assertEquals(value2.decl_data.short_name, enum.fields[1].name)
self.assertEquals('20', enum.fields[0].value)
self.assertIsNone(enum.fields[1].value)
self.assertEquals(value1.int_value,
enum.fields[0].numeric_value)
self.assertEquals(value2.int_value,
enum.fields[1].numeric_value)
def test_child_enum(self):
file_name = 'a.mojom'
mojom_enum = mojom_types_mojom.MojomEnum()
mojom_enum.decl_data = mojom_types_mojom.DeclarationData(
short_name='AnEnum',
source_file_info=mojom_types_mojom.SourceFileInfo(file_name=file_name),
container_type_key='struct_key')
mojom_enum.values = []
graph = mojom_files_mojom.MojomFileGraph()
mojom_struct = mojom_types_mojom.MojomStruct(
fields=[],
decl_data=mojom_types_mojom.DeclarationData(
short_name='AStruct',
source_file_info =mojom_types_mojom.SourceFileInfo(
file_name=file_name)))
graph.resolved_types = {'struct_key': mojom_types_mojom.UserDefinedType(
struct_type=mojom_struct)}
enum = module.Enum()
translator = mojom_translator.FileTranslator(graph, file_name)
translator.EnumFromMojom(
enum, mojom_types_mojom.UserDefinedType(enum_type=mojom_enum))
self.assertEquals(mojom_enum.decl_data.short_name, enum.name)
self.assertEquals(len(mojom_enum.values), len(enum.fields))
def test_unions(self):
file_name = 'a.mojom'
mojom_union = mojom_types_mojom.MojomUnion()
mojom_union.decl_data = mojom_types_mojom.DeclarationData(
short_name='AUnion',
source_file_info=mojom_types_mojom.SourceFileInfo(file_name=file_name))
field1 = mojom_types_mojom.UnionField(
decl_data=mojom_types_mojom.DeclarationData(short_name='field1'),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.BOOL))
field2 = mojom_types_mojom.UnionField(
decl_data=mojom_types_mojom.DeclarationData(
short_name='field2', declared_ordinal=5),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.DOUBLE))
mojom_union.fields = [field1, field2]
graph = mojom_files_mojom.MojomFileGraph()
union = module.Union()
translator = mojom_translator.FileTranslator(graph, file_name)
translator.UnionFromMojom(
union, mojom_types_mojom.UserDefinedType(union_type=mojom_union))
self.assertEquals(translator._module, union.module)
self.assertEquals('AUnion', union.name)
self.assertEquals(len(mojom_union.fields), len(union.fields))
for gold, f in zip(mojom_union.fields, union.fields):
self.assertEquals(gold.decl_data.short_name, f.name)
self.assertEquals(module.BOOL, union.fields[0].kind)
self.assertEquals(None, union.fields[0].ordinal)
self.assertEquals(module.DOUBLE, union.fields[1].kind)
self.assertEquals(5, union.fields[1].ordinal)
def literal_value(self, x):
"""Creates a typed literal value containing the value |x|.
Args:
x: A string, int, float or bool value.
Returns:
{mojom_types.LiteralValue} with an appropriately typed value.
"""
if isinstance(x, str):
return mojom_types_mojom.LiteralValue(string_value=x)
elif isinstance(x, int):
return mojom_types_mojom.LiteralValue(int64_value=x)
elif isinstance(x, float):
return mojom_types_mojom.LiteralValue(double_value=x)
elif isinstance(x, bool):
return mojom_types_mojom.LiteralValue(bool_value=x)
raise Exception("unexpected type(x)=%s" % type(x))
def test_attributes(self):
mojom_enum = mojom_types_mojom.MojomEnum()
mojom_enum.decl_data = mojom_types_mojom.DeclarationData()
gold = {
'foo': 'bar',
'other': 'thing',
'hello': 'world',
'min_version': 2,
'pi': 3.14159,
'is_happy': True
}
mojom_enum.decl_data.attributes = []
for key, value in gold.iteritems():
mojom_enum.decl_data.attributes.append(
mojom_types_mojom.Attribute(key=key, value=self.literal_value(value)))
graph = mojom_files_mojom.MojomFileGraph()
attributes = mojom_translator.FileTranslator(
graph, None).AttributesFromMojom(mojom_enum)
self.assertEquals(gold, attributes)
def test_attributes_none(self):
mojom_enum = mojom_types_mojom.MojomEnum()
mojom_enum.decl_data = mojom_types_mojom.DeclarationData()
graph = mojom_files_mojom.MojomFileGraph()
attributes = mojom_translator.FileTranslator(
graph, None).AttributesFromMojom(mojom_enum)
self.assertFalse(attributes)
def test_imported_struct(self):
graph = mojom_files_mojom.MojomFileGraph()
graph.files = {
'a.mojom': mojom_files_mojom.MojomFile(
file_name='a.mojom',
specified_file_name='',
module_namespace='namespace',
imports=['root/c.mojom']),
'root/c.mojom': mojom_files_mojom.MojomFile(
file_name='root/c.mojom',
specified_file_name='',
module_namespace='otherns',
imports=[]),
}
mojom_struct = mojom_types_mojom.MojomStruct()
mojom_struct.decl_data = mojom_types_mojom.DeclarationData(
short_name='AStruct',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name='root/c.mojom'))
mojom_struct.fields = []
type_key = 'some_type_key'
graph.resolved_types = {
type_key: mojom_types_mojom.UserDefinedType(struct_type=mojom_struct)}
struct = module.Struct()
# Translate should create the imports.
translator = mojom_translator.FileTranslator(graph, 'a.mojom')
translator.Translate()
struct = translator.UserDefinedFromTypeRef(
mojom_types_mojom.Type(
type_reference=mojom_types_mojom.TypeReference(
type_key=type_key)))
self.assertEquals(
translator._transitive_imports['root/c.mojom']['module'], struct.module)
self.assertEquals(
translator._transitive_imports['root/c.mojom'], struct.imported_from)
def test_interface(self):
file_name = 'a.mojom'
mojom_interface = mojom_types_mojom.MojomInterface(
decl_data=mojom_types_mojom.DeclarationData(
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
interface_name='AnInterface')
mojom_method10 = mojom_types_mojom.MojomMethod(
ordinal=10,
decl_data=mojom_types_mojom.DeclarationData(
short_name='AMethod10',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
parameters=mojom_types_mojom.MojomStruct(fields=[]))
mojom_method0 = mojom_types_mojom.MojomMethod(
ordinal=0,
decl_data=mojom_types_mojom.DeclarationData(
short_name='AMethod0',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
parameters=mojom_types_mojom.MojomStruct(fields=[]))
mojom_method7 = mojom_types_mojom.MojomMethod(
ordinal=7,
decl_data=mojom_types_mojom.DeclarationData(
short_name='AMethod10',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
parameters=mojom_types_mojom.MojomStruct(fields=[]))
mojom_interface.methods = {10: mojom_method10, 0: mojom_method0,
7: mojom_method7}
interface = module.Interface()
graph = mojom_files_mojom.MojomFileGraph()
translator = mojom_translator.FileTranslator(graph, file_name)
translator.InterfaceFromMojom(interface, mojom_types_mojom.UserDefinedType(
interface_type=mojom_interface))
self.assertEquals(translator._module, interface.module)
self.assertEquals(mojom_interface.interface_name, interface.name)
self.assertEquals(0, interface.methods[0].ordinal)
self.assertEquals(7, interface.methods[1].ordinal)
self.assertEquals(10, interface.methods[2].ordinal)
# TODO(azani): Add the contained declarations.
def test_method(self):
file_name = 'a.mojom'
mojom_method = mojom_types_mojom.MojomMethod(
ordinal=10,
decl_data=mojom_types_mojom.DeclarationData(
short_name='AMethod',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)))
param1 = mojom_types_mojom.StructField(
decl_data=mojom_types_mojom.DeclarationData(short_name='a_param'),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.UINT32))
param2 = mojom_types_mojom.StructField(
decl_data=mojom_types_mojom.DeclarationData(short_name='b_param'),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.UINT64))
mojom_method.parameters = mojom_types_mojom.MojomStruct(
fields=[param1, param2])
interface = module.Interface()
graph = mojom_files_mojom.MojomFileGraph()
translator = mojom_translator.FileTranslator(graph, file_name)
method = translator.MethodFromMojom(mojom_method, interface)
self.assertEquals(mojom_method.decl_data.short_name, method.name)
self.assertEquals(interface, method.interface)
self.assertEquals(mojom_method.ordinal, method.ordinal)
self.assertIsNone(method.response_parameters)
self.assertEquals(
len(mojom_method.parameters.fields), len(method.parameters))
self.assertEquals(param1.decl_data.short_name, method.parameters[0].name)
self.assertEquals(param2.decl_data.short_name, method.parameters[1].name)
# Add empty return params.
mojom_method.response_params = mojom_types_mojom.MojomStruct(fields=[])
method = translator.MethodFromMojom(mojom_method, interface)
self.assertEquals([], method.response_parameters)
# Add non-empty return params.
mojom_method.response_params.fields = [param1]
method = translator.MethodFromMojom(mojom_method, interface)
self.assertEquals(
param1.decl_data.short_name, method.response_parameters[0].name)
def test_parameter(self):
# Parameters are encoded as fields in a struct.
mojom_param = mojom_types_mojom.StructField(
decl_data=mojom_types_mojom.DeclarationData(
short_name='param0',
declared_ordinal=5),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.UINT64),
default_value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(uint64_value=20)))
graph = mojom_files_mojom.MojomFileGraph()
translator = mojom_translator.FileTranslator(graph, '')
param = translator.ParamFromMojom(mojom_param)
self.assertEquals(mojom_param.decl_data.short_name, param.name)
self.assertEquals(module.UINT64, param.kind)
self.assertEquals(mojom_param.decl_data.declared_ordinal, param.ordinal)
def test_contained_declarations(self):
graph = mojom_files_mojom.MojomFileGraph()
file_name = 'root/f.mojom'
mojom_enum = mojom_types_mojom.MojomEnum(
values=[],
decl_data=mojom_types_mojom.DeclarationData(
short_name='AnEnum',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name),
container_type_key='parent_key'))
graph.resolved_types = {
'enum_key': mojom_types_mojom.UserDefinedType(enum_type=mojom_enum)}
mojom_const = mojom_types_mojom.DeclaredConstant(
decl_data=mojom_types_mojom.DeclarationData(
short_name='AConst',
container_type_key='parent_key'),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.INT64),
value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(
int64_value=30)))
user_defined_value = mojom_types_mojom.UserDefinedValue()
user_defined_value.declared_constant = mojom_const
graph.resolved_values = {'value_key': user_defined_value}
contained_declarations = mojom_types_mojom.ContainedDeclarations(
enums=['enum_key'], constants=['value_key'])
translator = mojom_translator.FileTranslator(graph, file_name)
struct = module.Struct(name='parent')
translator._type_cache['parent_key'] = struct
translator.PopulateContainedDeclarationsFromMojom(
struct, contained_declarations)
self.assertEquals(
mojom_enum.decl_data.short_name, struct.enums[0].name)
self.assertEquals(struct, struct.enums[0].parent_kind)
self.assertEquals(
mojom_const.decl_data.short_name, struct.constants[0].name)
self.assertEquals(struct, struct.constants[0].parent_kind)
@unittest.skipUnless(bindings_imported, 'Could not import python bindings.')
class TestValueFromMojom(unittest.TestCase):
def test_literal_value(self):
mojom_int64 = mojom_types_mojom.Value()
mojom_int64.literal_value = mojom_types_mojom.LiteralValue(int64_value=20)
mojom_bool = mojom_types_mojom.Value()
mojom_bool.literal_value = mojom_types_mojom.LiteralValue(bool_value=True)
mojom_double = mojom_types_mojom.Value()
mojom_double.literal_value = mojom_types_mojom.LiteralValue(
double_value=1234.012345678901)
graph = mojom_files_mojom.MojomFileGraph()
int64_const = mojom_translator.FileTranslator(graph, None).ValueFromMojom(
mojom_int64)
bool_const = mojom_translator.FileTranslator(graph, None).ValueFromMojom(
mojom_bool)
double_const = mojom_translator.FileTranslator(graph, None).ValueFromMojom(
mojom_double)
self.assertEquals('20', int64_const)
self.assertEquals('true', bool_const)
self.assertEquals('1234.012345678901', double_const)
def test_builtin_const(self):
mojom = mojom_types_mojom.Value()
graph = mojom_files_mojom.MojomFileGraph()
gold = [
(mojom_types_mojom.BuiltinConstantValue.DOUBLE_INFINITY,
'double.INFINITY'),
(mojom_types_mojom.BuiltinConstantValue.DOUBLE_NEGATIVE_INFINITY,
'double.NEGATIVE_INFINITY'),
(mojom_types_mojom.BuiltinConstantValue.DOUBLE_NAN,
'double.NAN'),
(mojom_types_mojom.BuiltinConstantValue.FLOAT_INFINITY,
'float.INFINITY'),
(mojom_types_mojom.BuiltinConstantValue.FLOAT_NEGATIVE_INFINITY,
'float.NEGATIVE_INFINITY'),
(mojom_types_mojom.BuiltinConstantValue.FLOAT_NAN, 'float.NAN'),
]
for mojom_builtin, string in gold:
mojom.builtin_value = mojom_builtin
const = mojom_translator.FileTranslator(graph, None).ValueFromMojom(mojom)
self.assertIsInstance(const, module.BuiltinValue)
self.assertEquals(string, const.value)
def test_enum_value(self):
file_name = 'a.mojom'
mojom_enum = mojom_types_mojom.MojomEnum()
mojom_enum.decl_data = mojom_types_mojom.DeclarationData(
short_name='AnEnum',
source_file_info=mojom_types_mojom.SourceFileInfo(file_name=file_name))
value1 = mojom_types_mojom.EnumValue(
decl_data=mojom_types_mojom.DeclarationData(
short_name='val1',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
enum_type_key='enum_key',
initializer_value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(uint64_value=20)),
int_value=20)
value2 = mojom_types_mojom.EnumValue(
decl_data=mojom_types_mojom.DeclarationData(short_name='val2'),
enum_type_key='enum_key',
int_value=70)
mojom_enum.values = [value1, value2]
graph = mojom_files_mojom.MojomFileGraph()
graph.resolved_types = {
'enum_key': mojom_types_mojom.UserDefinedType(enum_type=mojom_enum)}
graph.resolved_values = {
'enum_value1': mojom_types_mojom.UserDefinedValue(enum_value=value1),
'enum_value2': mojom_types_mojom.UserDefinedValue(enum_value=value2),
}
mojom = mojom_types_mojom.Value(
user_value_reference=mojom_types_mojom.UserValueReference(
identifier='SOMEID',
value_key='enum_value1'))
translator = mojom_translator.FileTranslator(graph, file_name)
enum_value = translator.ValueFromMojom(mojom)
enum = translator.UserDefinedFromTypeKey('enum_key')
self.assertIs(enum, enum_value.enum)
self.assertIs(value1.decl_data.short_name, enum_value.name)
def test_constant_value(self):
file_name = 'a.mojom'
mojom_const = mojom_types_mojom.DeclaredConstant(
decl_data=mojom_types_mojom.DeclarationData(
short_name='AConst',
source_file_info=mojom_types_mojom.SourceFileInfo(
file_name=file_name)),
type=mojom_types_mojom.Type(
simple_type=mojom_types_mojom.SimpleType.INT64),
value=mojom_types_mojom.Value(
literal_value=mojom_types_mojom.LiteralValue(
int64_value=30)))
user_defined_value = mojom_types_mojom.UserDefinedValue()
user_defined_value.declared_constant = mojom_const
graph = mojom_files_mojom.MojomFileGraph()
graph.resolved_values = {'value_key': user_defined_value}
mojom = mojom_types_mojom.Value(
user_value_reference=mojom_types_mojom.UserValueReference(
identifier='SOMEID',
value_key='value_key'))
translator = mojom_translator.FileTranslator(graph, file_name)
const_value = translator.ValueFromMojom(mojom)
self.assertIs(
translator.ConstantFromValueKey('value_key'), const_value.constant)
self.assertIs(mojom_const.decl_data.short_name, const_value.name)
@unittest.skipUnless(bindings_imported, 'Could not import python bindings.')
class TestKindFromMojom(unittest.TestCase):
def test_simple_type(self):
simple_types = [
(mojom_types_mojom.SimpleType.BOOL, module.BOOL),
(mojom_types_mojom.SimpleType.INT8, module.INT8),
(mojom_types_mojom.SimpleType.INT16, module.INT16),
(mojom_types_mojom.SimpleType.INT32, module.INT32),
(mojom_types_mojom.SimpleType.INT64, module.INT64),
(mojom_types_mojom.SimpleType.UINT8, module.UINT8),
(mojom_types_mojom.SimpleType.UINT16, module.UINT16),
(mojom_types_mojom.SimpleType.UINT32, module.UINT32),
(mojom_types_mojom.SimpleType.UINT64, module.UINT64),
(mojom_types_mojom.SimpleType.FLOAT, module.FLOAT),
(mojom_types_mojom.SimpleType.DOUBLE, module.DOUBLE),
]
g = mojom_files_mojom.MojomFileGraph()
t = mojom_translator.FileTranslator(g, None)
for mojom, golden in simple_types:
self.assertEquals(
golden, t.KindFromMojom(mojom_types_mojom.Type(simple_type=mojom)))
def test_handle_type(self):
handle_types = [
(mojom_types_mojom.HandleType.Kind.UNSPECIFIED, False,
module.HANDLE),
(mojom_types_mojom.HandleType.Kind.MESSAGE_PIPE, False,
module.MSGPIPE),
(mojom_types_mojom.HandleType.Kind.DATA_PIPE_CONSUMER, False,
module.DCPIPE),
(mojom_types_mojom.HandleType.Kind.DATA_PIPE_PRODUCER, False,
module.DPPIPE),
(mojom_types_mojom.HandleType.Kind.SHARED_BUFFER, False,
module.SHAREDBUFFER),
(mojom_types_mojom.HandleType.Kind.UNSPECIFIED, True,
module.NULLABLE_HANDLE),
(mojom_types_mojom.HandleType.Kind.MESSAGE_PIPE, True,
module.NULLABLE_MSGPIPE),
(mojom_types_mojom.HandleType.Kind.DATA_PIPE_CONSUMER, True,
module.NULLABLE_DCPIPE),
(mojom_types_mojom.HandleType.Kind.DATA_PIPE_PRODUCER, True,
module.NULLABLE_DPPIPE),
(mojom_types_mojom.HandleType.Kind.SHARED_BUFFER, True,
module.NULLABLE_SHAREDBUFFER),
]
g = mojom_files_mojom.MojomFileGraph()
t = mojom_translator.FileTranslator(g, None)
for mojom, nullable, golden in handle_types:
h = mojom_types_mojom.Type()
h.handle_type = mojom_types_mojom.HandleType(
kind=mojom, nullable=nullable)
self.assertEquals(golden, t.KindFromMojom(h))
def test_string_type(self):
g = mojom_files_mojom.MojomFileGraph()
t = mojom_translator.FileTranslator(g, None)
s = mojom_types_mojom.Type(string_type=mojom_types_mojom.StringType())
self.assertEquals(module.STRING, t.KindFromMojom(s))
s.string_type.nullable = True
self.assertEquals(module.NULLABLE_STRING, t.KindFromMojom(s))
def test_array_type(self):
array_types = [
(False, False, -1),
(False, False, 10),
(True, False, -1),
(True, True, -1),
(False, True, -1),
(False, True, 10),
]
g = mojom_files_mojom.MojomFileGraph()
t = mojom_translator.FileTranslator(g, None)
for array_nullable, element_nullable, size in array_types:
a = mojom_types_mojom.Type()
a.array_type = mojom_types_mojom.ArrayType(
nullable=array_nullable,
fixed_length=size)
a.array_type.element_type = mojom_types_mojom.Type(
string_type=mojom_types_mojom.StringType(nullable=element_nullable))
result = t.KindFromMojom(a)
self.assertTrue(module.IsArrayKind(result))
self.assertTrue(module.IsStringKind(result.kind))
self.assertEquals(array_nullable, module.IsNullableKind(result))
self.assertEquals(element_nullable, module.IsNullableKind(result.kind))
if size < 0:
self.assertIsNone(result.length)
else:
self.assertEquals(size, result.length)
def test_map_type(self):
map_types = [
(False, False),
(True, False),
(False, True),
(True, True),
]
g = mojom_files_mojom.MojomFileGraph()
t = mojom_translator.FileTranslator(g, None)
for map_nullable, value_nullable in map_types:
m = mojom_types_mojom.Type()
m.map_type = mojom_types_mojom.MapType(
nullable=map_nullable)
m.map_type.key_type = mojom_types_mojom.Type(
string_type=mojom_types_mojom.StringType())
m.map_type.value_type = mojom_types_mojom.Type(
handle_type=mojom_types_mojom.HandleType(
kind=mojom_types_mojom.HandleType.Kind.SHARED_BUFFER,
nullable=value_nullable))
result = t.KindFromMojom(m)
self.assertTrue(module.IsMapKind(result))
self.assertTrue(module.IsStringKind(result.key_kind))
self.assertTrue(module.IsSharedBufferKind(result.value_kind))
self.assertEquals(map_nullable, module.IsNullableKind(result))
self.assertEquals(value_nullable,
module.IsNullableKind(result.value_kind))
def test_user_defined_type_type(self):
graph = mojom_files_mojom.MojomFileGraph()
mojom_struct = mojom_types_mojom.MojomStruct(
decl_data=mojom_types_mojom.DeclarationData(short_name='FirstStruct'))
type_key = 'some opaque string'
mojom_struct.fields = [
# Make sure recursive structs are correctly handled.
mojom_types_mojom.StructField(
decl_data=mojom_types_mojom.DeclarationData(short_name='field00'),
type=mojom_types_mojom.Type(
type_reference=mojom_types_mojom.TypeReference(type_key=type_key)))
]
graph.resolved_types = {
type_key: mojom_types_mojom.UserDefinedType(struct_type=mojom_struct)}
mojom_type = mojom_types_mojom.Type()
mojom_type.type_reference = mojom_types_mojom.TypeReference(
type_key=type_key)
t = mojom_translator.FileTranslator(graph, None)
result = t.KindFromMojom(mojom_type)
self.assertTrue(module.IsStructKind(result))
self.assertEquals(mojom_struct.decl_data.short_name, result.name)
self.assertEquals(result, result.fields[0].kind)
# Make sure we create only one module object per type.
result2 = t.KindFromMojom(mojom_type)
self.assertIs(result, result2)
# Nullable type reference
mojom_type.type_reference.nullable = True
nullable_result = t.KindFromMojom(mojom_type)
self.assertTrue(module.IsNullableKind(nullable_result))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| -3,401,171,679,738,615,300 | 38.824468 | 80 | 0.682543 | false |
ganeti/ganeti
|
test/py/ganeti.client.gnt_cluster_unittest.py
|
1
|
17028
|
#!/usr/bin/python3
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.client.gnt_cluster"""
import unittest
import optparse
import os
import shutil
import tempfile
from ganeti import errors
from ganeti.client import gnt_cluster
from ganeti import utils
from ganeti import compat
from ganeti import constants
from ganeti import ssh
from ganeti import cli
import mock
import testutils
class TestEpoUtilities(unittest.TestCase):
def setUp(self):
self.nodes2ip = dict(("node%s" % i, "192.0.2.%s" % i) for i in range(1, 10))
self.nodes = set(self.nodes2ip.keys())
self.ips2node = dict((v, k) for (k, v) in self.nodes2ip.items())
def _FakeAction(*args):
return True
def _FakePing(ip, port, live_port_needed=False):
self.assertTrue(live_port_needed)
self.assertEqual(port, 0)
return True
def _FakeSleep(secs):
self.assertTrue(secs >= 0 and secs <= 5)
return
def _NoopFeedback(self, text):
return
def testPingFnRemoveHostsUp(self):
seen = set()
def _FakeSeenPing(ip, *args, **kwargs):
node = self.ips2node[ip]
self.assertFalse(node in seen)
seen.add(node)
return True
helper = gnt_cluster._RunWhenNodesReachableHelper(self.nodes,
self._FakeAction,
self.nodes2ip, 0,
self._NoopFeedback,
_ping_fn=_FakeSeenPing,
_sleep_fn=self._FakeSleep)
nodes_len = len(self.nodes)
for (num, _) in enumerate(self.nodes):
helper.Wait(5)
if num < nodes_len - 1:
self.assertRaises(utils.RetryAgain, helper)
else:
helper()
self.assertEqual(seen, self.nodes)
self.assertFalse(helper.down)
self.assertEqual(helper.up, self.nodes)
def testActionReturnFalseSetsHelperFalse(self):
called = False
def _FalseAction(*args):
return called
helper = gnt_cluster._RunWhenNodesReachableHelper(self.nodes, _FalseAction,
self.nodes2ip, 0,
self._NoopFeedback,
_ping_fn=self._FakePing,
_sleep_fn=self._FakeSleep)
for _ in self.nodes:
try:
helper()
except utils.RetryAgain:
called = True
self.assertFalse(helper.success)
def testMaybeInstanceStartup(self):
instances_arg = []
def _FakeInstanceStart(opts, instances, start):
instances_arg.append(set(instances))
return None
inst_map = {
"inst1": set(["node1", "node2"]),
"inst2": set(["node1", "node3"]),
"inst3": set(["node2", "node1"]),
"inst4": set(["node2", "node1", "node3"]),
"inst5": set(["node4"]),
}
fn = _FakeInstanceStart
self.assertTrue(gnt_cluster._MaybeInstanceStartup(None, inst_map, set(),
_instance_start_fn=fn))
self.assertFalse(instances_arg)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map, set(["node1"]),
_instance_start_fn=fn)
self.assertTrue(result)
self.assertFalse(instances_arg)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3"]),
_instance_start_fn=fn)
self.assertTrue(result is None)
self.assertEqual(instances_arg.pop(0), set(["inst2"]))
self.assertFalse("inst2" in inst_map)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3"]),
_instance_start_fn=fn)
self.assertTrue(result)
self.assertFalse(instances_arg)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3", "node2"]),
_instance_start_fn=fn)
self.assertEqual(instances_arg.pop(0), set(["inst1", "inst3", "inst4"]))
self.assertTrue(result is None)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3", "node2",
"node4"]),
_instance_start_fn=fn)
self.assertTrue(result is None)
self.assertEqual(instances_arg.pop(0), set(["inst5"]))
self.assertFalse(inst_map)
class _ClientForEpo:
def __init__(self, groups, nodes):
self._groups = groups
self._nodes = nodes
def QueryGroups(self, names, fields, use_locking):
assert not use_locking
assert fields == ["node_list"]
return self._groups
def QueryNodes(self, names, fields, use_locking):
assert not use_locking
assert fields == ["name", "master", "pinst_list", "sinst_list", "powered",
"offline"]
return self._nodes
class TestEpo(unittest.TestCase):
_ON_EXITCODE = 253
_OFF_EXITCODE = 254
def _ConfirmForce(self, *args):
self.fail("Shouldn't need confirmation")
def _Confirm(self, exp_names, result, names, ltype, text):
self.assertEqual(names, exp_names)
self.assertFalse(result is NotImplemented)
return result
def _Off(self, exp_node_list, opts, node_list, inst_map):
self.assertEqual(node_list, exp_node_list)
self.assertFalse(inst_map)
return self._OFF_EXITCODE
def _Test(self, *args, **kwargs):
defaults = dict(qcl=NotImplemented, _on_fn=NotImplemented,
_off_fn=NotImplemented,
_stdout_fn=lambda *args: None,
_stderr_fn=lambda *args: None)
defaults.update(kwargs)
return gnt_cluster.Epo(*args, **defaults)
def testShowAllWithGroups(self):
opts = optparse.Values(dict(groups=True, show_all=True))
result = self._Test(opts, NotImplemented)
self.assertEqual(result, constants.EXIT_FAILURE)
def testShowAllWithArgs(self):
opts = optparse.Values(dict(groups=False, show_all=True))
result = self._Test(opts, ["a", "b", "c"])
self.assertEqual(result, constants.EXIT_FAILURE)
def testNoArgumentsNoParameters(self):
for (force, confirm_result) in [(True, NotImplemented), (False, False),
(False, True)]:
opts = optparse.Values(dict(groups=False, show_all=False, force=force,
on=False))
client = _ClientForEpo(NotImplemented, [
("node1.example.com", False, [], [], True, False),
])
if force:
confirm_fn = self._ConfirmForce
else:
confirm_fn = compat.partial(self._Confirm, ["node1.example.com"],
confirm_result)
off_fn = compat.partial(self._Off, ["node1.example.com"])
result = self._Test(opts, [], qcl=client, _off_fn=off_fn,
_confirm_fn=confirm_fn)
if force or confirm_result:
self.assertEqual(result, self._OFF_EXITCODE)
else:
self.assertEqual(result, constants.EXIT_FAILURE)
def testPowerOn(self):
for master in [False, True]:
opts = optparse.Values(dict(groups=False, show_all=True,
force=True, on=True))
client = _ClientForEpo(NotImplemented, [
("node1.example.com", False, [], [], True, False),
("node2.example.com", False, [], [], False, False),
("node3.example.com", False, [], [], True, True),
("node4.example.com", False, [], [], None, True),
("node5.example.com", master, [], [], False, False),
])
def _On(_, all_nodes, node_list, inst_map):
self.assertEqual(all_nodes,
["node%s.example.com" % i for i in range(1, 6)])
if master:
self.assertEqual(node_list, ["node2.example.com"])
else:
self.assertEqual(node_list, ["node2.example.com",
"node5.example.com"])
self.assertFalse(inst_map)
return self._ON_EXITCODE
result = self._Test(opts, [], qcl=client, _on_fn=_On,
_confirm_fn=self._ConfirmForce)
self.assertEqual(result, self._ON_EXITCODE)
def testMasterWithoutShowAll(self):
opts = optparse.Values(dict(groups=False, show_all=False,
force=True, on=False))
client = _ClientForEpo(NotImplemented, [
("node1.example.com", True, [], [], True, False),
])
result = self._Test(opts, [], qcl=client, _confirm_fn=self._ConfirmForce)
self.assertEqual(result, constants.EXIT_FAILURE)
class DrbdHelperTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.enabled_disk_templates = []
def enableDrbd(self):
self.enabled_disk_templates = [constants.DT_DRBD8]
def disableDrbd(self):
self.enabled_disk_templates = [constants.DT_DISKLESS]
class InitDrbdHelper(DrbdHelperTestCase):
def testNoDrbdNoHelper(self):
opts = mock.Mock()
opts.drbd_helper = None
self.disableDrbd()
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(None, helper)
def testNoDrbdHelper(self):
opts = mock.Mock()
self.disableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(opts.drbd_helper, helper)
def testDrbdHelperNone(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = None
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(constants.DEFAULT_DRBD_HELPER, helper)
def testDrbdHelperEmpty(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = ''
self.assertRaises(errors.OpPrereqError, gnt_cluster._InitDrbdHelper, opts,
self.enabled_disk_templates, feedback_fn=mock.Mock())
def testDrbdHelper(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(opts.drbd_helper, helper)
class GetDrbdHelper(DrbdHelperTestCase):
def testNoDrbdNoHelper(self):
opts = mock.Mock()
self.disableDrbd()
opts.drbd_helper = None
helper = gnt_cluster._GetDrbdHelper(opts, self.enabled_disk_templates)
self.assertEqual(None, helper)
def testNoTemplateInfoNoHelper(self):
opts = mock.Mock()
opts.drbd_helper = None
helper = gnt_cluster._GetDrbdHelper(opts, None)
self.assertEqual(None, helper)
def testNoTemplateInfoHelper(self):
opts = mock.Mock()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._GetDrbdHelper(opts, None)
self.assertEqual(opts.drbd_helper, helper)
def testNoDrbdHelper(self):
opts = mock.Mock()
self.disableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._GetDrbdHelper(opts, None)
self.assertEqual(opts.drbd_helper, helper)
def testDrbdNoHelper(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = None
helper = gnt_cluster._GetDrbdHelper(opts, self.enabled_disk_templates)
self.assertEqual(None, helper)
def testDrbdHelper(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._GetDrbdHelper(opts, self.enabled_disk_templates)
self.assertEqual(opts.drbd_helper, helper)
class TestBuildGanetiPubKeys(testutils.GanetiTestCase):
_SOME_KEY_DICT = {"rsa": "key_rsa",
"dsa": "key_dsa"}
_MASTER_NODE_NAME = "master_node"
_MASTER_NODE_UUID = "master_uuid"
_NUM_NODES = 2 # excluding master node
_ONLINE_NODE_NAMES = ["node%s_name" % i for i in range(_NUM_NODES)]
_ONLINE_NODE_UUIDS = ["node%s_uuid" % i for i in range(_NUM_NODES)]
_CLUSTER_NAME = "cluster_name"
_PRIV_KEY = "master_private_key"
_PUB_KEY = "master_public_key"
_MODIFY_SSH_SETUP = True
_AUTH_KEYS = "a\nb\nc"
_SSH_KEY_TYPE = "dsa"
def _setUpFakeKeys(self):
os.makedirs(os.path.join(self.tmpdir, ".ssh"))
for key_type in ["rsa", "dsa"]:
self.priv_filename = os.path.join(self.tmpdir, ".ssh", "id_%s" % key_type)
utils.WriteFile(self.priv_filename, data=self._PRIV_KEY)
self.pub_filename = os.path.join(
self.tmpdir, ".ssh", "id_%s.pub" % key_type)
utils.WriteFile(self.pub_filename, data=self._PUB_KEY)
self.auth_filename = os.path.join(self.tmpdir, ".ssh", "authorized_keys")
utils.WriteFile(self.auth_filename, data=self._AUTH_KEYS)
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.pub_key_filename = os.path.join(self.tmpdir, "ganeti_test_pub_keys")
self._setUpFakeKeys()
self._ssh_read_remote_ssh_pub_keys_patcher = testutils \
.patch_object(ssh, "ReadRemoteSshPubKeys")
self._ssh_read_remote_ssh_pub_keys_mock = \
self._ssh_read_remote_ssh_pub_keys_patcher.start()
self._ssh_read_remote_ssh_pub_keys_mock.return_value = self._SOME_KEY_DICT
self.mock_cl = mock.Mock()
self.mock_cl.QueryConfigValues = mock.Mock()
self.mock_cl.QueryConfigValues.return_value = \
(self._CLUSTER_NAME, self._MASTER_NODE_NAME, self._MODIFY_SSH_SETUP,
self._SSH_KEY_TYPE)
self._get_online_nodes_mock = mock.Mock()
self._get_online_nodes_mock.return_value = \
self._ONLINE_NODE_NAMES
self._get_nodes_ssh_ports_mock = mock.Mock()
self._get_nodes_ssh_ports_mock.return_value = \
[22 for i in range(self._NUM_NODES + 1)]
self._get_node_uuids_mock = mock.Mock()
self._get_node_uuids_mock.return_value = \
self._ONLINE_NODE_UUIDS + [self._MASTER_NODE_UUID]
self._options = mock.Mock()
self._options.ssh_key_check = False
def _GetTempHomedir(self, _):
return self.tmpdir
def tearDown(self):
super(testutils.GanetiTestCase, self).tearDown()
shutil.rmtree(self.tmpdir)
self._ssh_read_remote_ssh_pub_keys_patcher.stop()
def testNewPubKeyFile(self):
gnt_cluster._BuildGanetiPubKeys(
self._options,
pub_key_file=self.pub_key_filename,
cl=self.mock_cl,
get_online_nodes_fn=self._get_online_nodes_mock,
get_nodes_ssh_ports_fn=self._get_nodes_ssh_ports_mock,
get_node_uuids_fn=self._get_node_uuids_mock,
homedir_fn=self._GetTempHomedir)
key_file_result = utils.ReadFile(self.pub_key_filename)
for node_uuid in self._ONLINE_NODE_UUIDS + [self._MASTER_NODE_UUID]:
self.assertTrue(node_uuid in key_file_result)
self.assertTrue(self._PUB_KEY in key_file_result)
def testOverridePubKeyFile(self):
fd = open(self.pub_key_filename, "w")
fd.write("Pink Bunny")
fd.close()
gnt_cluster._BuildGanetiPubKeys(
self._options,
pub_key_file=self.pub_key_filename,
cl=self.mock_cl,
get_online_nodes_fn=self._get_online_nodes_mock,
get_nodes_ssh_ports_fn=self._get_nodes_ssh_ports_mock,
get_node_uuids_fn=self._get_node_uuids_mock,
homedir_fn=self._GetTempHomedir)
self.assertFalse("Pink Bunny" in self.pub_key_filename)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
bsd-2-clause
| -322,381,317,060,913,200 | 34.773109 | 80 | 0.617865 | false |
kklmn/xrt
|
tests/raycing/info_opencl.py
|
1
|
1670
|
import pyopencl as cl # Import the OpenCL GPU computing API
print('\n' + '=' * 60 + '\nOpenCL Platforms and Devices')
for platform in cl.get_platforms(): # Print each platform on this computer
print('=' * 60)
print('Platform - Name: ' + platform.name)
print('Platform - Vendor: ' + platform.vendor)
print('Platform - Version: ' + platform.version)
print('Platform - Extensions: ' + platform.extensions)
print('Platform - Profile: ' + platform.profile)
for device in platform.get_devices(): # Print each device per-platform
print(' ' + '-' * 56)
print(' Device - Name: ' + device.name)
print(' Device - Vendor: ' + device.vendor)
print(' Device - Type: ' +
cl.device_type.to_string(device.type, "%d"))
print(' Device - Max Clock Speed: {0} Mhz'.format(
device.max_clock_frequency))
print(' Device - Compute Units: {0}'.format(
device.max_compute_units))
print(' Device - Local Memory: {0:.0f} KB'.format(
device.local_mem_size/1024))
print(' Device - Constant Memory: {0:.0f} KB'.format(
device.max_constant_buffer_size/1024))
print(' Device - Global Memory: {0:.0f} GB'.format(
device.global_mem_size/1073741824.0))
print(' Device - FP: ' + str(device.double_fp_config))
print('\n')
#ctx = cl.create_some_context()
#test your iPlatform, iDevice here. Read the output. Is it your GPU?
iPlatform, iDevice = 0, 0
platform = cl.get_platforms()[iPlatform]
device = platform.get_devices()[iDevice]
ctx = cl.Context(devices=[device])
print(ctx)
|
mit
| 7,744,002,980,101,567,000 | 42.947368 | 75 | 0.601796 | false |
GoogleCloudPlatform/professional-services
|
tools/ml-auto-eda/ml_eda/job_config_util/job_config.py
|
1
|
7563
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definition of utility class for holding the configuration of running
analysis"""
from __future__ import absolute_import
from __future__ import print_function
from typing import List
from ml_eda.constants import c
from ml_eda.proto import analysis_entity_pb2
class JobConfig:
"""Uility class for holding the configuration of running analysis"""
# pylint: disable-msg=too-many-instance-attributes
_datasource = analysis_entity_pb2.DataSource()
def __init__(self,
datasource_type: str,
datasource_location: str,
target_column: str,
numerical_attributes: List[str],
categorical_attributes: List[str],
analysis_run_ops,
analysis_run_config):
# pylint: disable-msg=too-many-arguments
if datasource_type == c.datasources.BIGQUERY:
self._datasource.type = analysis_entity_pb2.DataSource.BIGQUERY
elif datasource_type == c.datasources.CSV:
self._datasource.type = analysis_entity_pb2.DataSource.CSV
self._datasource.location = datasource_location
self._datasource.target.name = target_column
if target_column == c.schema.NULL:
self._ml_type = c.ml_type.NULL
elif target_column in numerical_attributes:
self._datasource.target.type = analysis_entity_pb2.Attribute.NUMERICAL
self._ml_type = c.ml_type.REGRESSION
elif target_column in categorical_attributes:
self._datasource.target.type = analysis_entity_pb2.Attribute.CATEGORICAL
self._ml_type = c.ml_type.CLASSIFICATION
else:
raise ValueError('The specified target column {} does not belong to'
'Categorical or Numerical features in the '
'job_config.ini'.format(target_column))
self._numerical_attributes = self._create_numerical_attributes(
numerical_attributes)
self._datasource.features.extend(self._numerical_attributes)
self._categorical_attributes = self._create_categorical_attributes(
categorical_attributes)
self._datasource.features.extend(self._categorical_attributes)
# This is for tracking categorical attributes with limited cardinality
self._categorical_low_card_attributes = self._categorical_attributes
# Running configuration
self._contingency_table_run = analysis_run_ops.getboolean(
c.analysis_run.CONTINGENCY_TABLE_RUN)
self._table_descriptive_run = analysis_run_ops.getboolean(
c.analysis_run.TABLE_DESCRIPTIVE_RUN)
self._pearson_corr_run = analysis_run_ops.getboolean(
c.analysis_run.PEARSON_CORRELATION_RUN)
self._information_gain_run = analysis_run_ops.getboolean(
c.analysis_run.INFORMATION_GAIN_RUN)
self._chi_square_run = analysis_run_ops.getboolean(
c.analysis_run.CHI_SQUARE_RUN)
self._anova_run = analysis_run_ops.getboolean(
c.analysis_run.ANOVA_RUN)
# Analysis configuration
self._histogram_bin = analysis_run_config.getint(
c.analysis_config.HISTOGRAM_BIN)
self._value_counts_limit = analysis_run_config.getint(
c.analysis_config.VALUE_COUNTS_LIMIT)
self._general_cardinality_limit = analysis_run_config.getint(
c.analysis_config.GENERAL_CARDINALITY_LIMIT)
@staticmethod
def _create_attributes(attribute_names: List[str],
attribute_type: int
) -> List[analysis_entity_pb2.Attribute]:
"""Construct analysis_entity_pb2.Attribute instance for attributes
Args:
attribute_names: (List[string]), name list of the attribute
attribute_type: (int), type of the attribute defined in the proto
Returns:
List[analysis_entity_pb2.Attribute]
"""
return [
analysis_entity_pb2.Attribute(name=name, type=attribute_type)
for name in attribute_names
]
def _create_numerical_attributes(self, attribute_names: List[str]
) -> List[analysis_entity_pb2.Attribute]:
"""Consturct analysis_entity_pb2.Attribute instance for numerical attributes
Args:
attribute_names: (List[string]), name list of the attributes
Returns:
List[analysis_entity_pb2.Attribute]
"""
return self._create_attributes(attribute_names,
analysis_entity_pb2.Attribute.NUMERICAL)
def _create_categorical_attributes(self, attribute_names: List[str]
) -> List[analysis_entity_pb2.Attribute]:
"""Construct analysis_entity_pb2.Attribute instance for cat attributes.
Args:
attribute_names: (List[string]), name list of the attributes
Returns:
List[analysis_entity_pb2.Attribute]
"""
return self._create_attributes(attribute_names,
analysis_entity_pb2.Attribute.CATEGORICAL)
def update_low_card_categorical(self, features):
"""Update low cardinality attributes"""
self._categorical_low_card_attributes = features
@property
def datasource(self):
# pylint: disable-msg=missing-docstring
return self._datasource
@property
def target_column(self):
# pylint: disable-msg=missing-docstring
return self._datasource.target
@property
def ml_type(self):
# pylint: disable-msg=missing-docstring
return self._ml_type
@property
def numerical_attributes(self):
# pylint: disable-msg=missing-docstring
return self._numerical_attributes
@property
def categorical_attributes(self):
# pylint: disable-msg=missing-docstring
return self._categorical_attributes
@property
def low_card_categorical_attributes(self):
# pylint: disable-msg=missing-docstring
return self._categorical_low_card_attributes
# Analysis Running Configuration
@property
def contingency_table_run(self):
# pylint: disable-msg=missing-docstring
return self._contingency_table_run
@property
def table_descriptive_run(self):
# pylint: disable-msg=missing-docstring
return self._table_descriptive_run
@property
def pearson_corr_run(self):
# pylint: disable-msg=missing-docstring
return self._pearson_corr_run
@property
def information_gain_run(self):
# pylint: disable-msg=missing-docstring
return self._information_gain_run
@property
def chi_square_run(self):
# pylint: disable-msg=missing-docstring
return self._chi_square_run
@property
def anova_run(self):
# pylint: disable-msg=missing-docstring
return self._anova_run
@property
def histogram_bin(self):
# pylint: disable-msg=missing-docstring
return self._histogram_bin
@property
def value_counts_limit(self):
# pylint: disable-msg=missing-docstring
return self._value_counts_limit
@property
def general_cardinality_limit(self):
# pylint: disable-msg=missing-docstring
return self._general_cardinality_limit
|
apache-2.0
| 5,269,896,557,410,128,000 | 33.852535 | 80 | 0.685707 | false |
root-mirror/root
|
bindings/jupyroot/python/JupyROOT/helpers/cppcompleter.py
|
19
|
6732
|
# -*- coding:utf-8 -*-
#-----------------------------------------------------------------------------
# Author: Danilo Piparo <Danilo.Piparo@cern.ch> CERN
# Author: Enric Tejedor <enric.tejedor.saavedra@cern.ch> CERN
#-----------------------------------------------------------------------------
################################################################################
# Copyright (C) 1995-2020, Rene Brun and Fons Rademakers. #
# All rights reserved. #
# #
# For the licensing terms see $ROOTSYS/LICENSE. #
# For the list of contributors see $ROOTSYS/README/CREDITS. #
################################################################################
from JupyROOT.helpers import utils
import ROOT
# Jit a wrapper for the ttabcom
_TTabComHookCode = """
std::vector<std::string> _TTabComHook(const char* pattern){
static auto ttc = new TTabCom;
const size_t lineBufSize = 2*1024; // must be equal to/larger than BUF_SIZE in TTabCom.cxx
std::unique_ptr<char[]> completed(new char[lineBufSize]);
strncpy(completed.get(), pattern, lineBufSize);
completed[lineBufSize-1] = '\\0';
int pLoc = strlen(completed.get());
std::ostringstream oss;
Int_t firstChange = ttc->Hook(completed.get(), &pLoc, oss);
if (firstChange == -2) { // got some completions in oss
auto completions = oss.str();
vector<string> completions_v;
istringstream f(completions);
string s;
while (getline(f, s, '\\n')) {
completions_v.push_back(s);
}
return completions_v;
}
if (firstChange == -1) { // found no completions
return vector<string>();
}
// found exactly one completion
return vector<string>(1, completed.get());
}
"""
class CppCompleter(object):
'''
Completer which interfaces to the TTabCom of ROOT. It is activated
(deactivated) upon the load(unload) of the load of the extension.
>>> comp = CppCompleter()
>>> comp.activate()
>>> for suggestion in comp._completeImpl("TTreeF"):
... print(suggestion)
TTreeFormula
TTreeFormulaManager
TTreeFriendLeafIter
>>> garbage = ROOT.gInterpreter.ProcessLine("TH1F* h")
>>> for suggestion in comp._completeImpl("h->GetA"):
... print(suggestion)
h->GetArray
h->GetAsymmetry
h->GetAt
h->GetAxisColor
>>> garbage = ROOT.gInterpreter.ProcessLine("TH1F aa")
>>> for suggestion in comp._completeImpl("aa.Add("):
... print(suggestion.replace("\\t"," "))
<BLANKLINE>
Bool_t Add(TF1* h1, Double_t c1 = 1, Option_t* option = "")
Bool_t Add(const TH1* h, const TH1* h2, Double_t c1 = 1, Double_t c2 = 1) // *MENU*
Bool_t Add(const TH1* h1, Double_t c1 = 1)
>>> for suggestion in comp._completeImpl("TROOT::Is"):
... print(suggestion)
TROOT::IsA
TROOT::IsBatch
TROOT::IsEqual
TROOT::IsEscaped
TROOT::IsExecutingMacro
TROOT::IsFolder
TROOT::IsInterrupted
TROOT::IsLineProcessing
TROOT::IsModified
TROOT::IsOnHeap
TROOT::IsProofServ
TROOT::IsRootFile
TROOT::IsSortable
TROOT::IsWebDisplay
TROOT::IsWebDisplayBatch
TROOT::IsWritable
TROOT::IsZombie
>>> comp.deactivate()
>>> for suggestion in comp._completeImpl("TG"):
... print(suggestion)
'''
def __init__(self):
self.hook = None
self.active = True
self.firstActivation = True
self.accessors = [".", "->", "::"]
def activate(self):
self.active = True
if self.firstActivation:
utils.declareCppCode('#include "dlfcn.h"')
dlOpenRint = 'dlopen("libRint.so",RTLD_NOW);'
utils.processCppCode(dlOpenRint)
utils.declareCppCode(_TTabComHookCode)
self.hook = ROOT._TTabComHook
self.firstActivation = False
def deactivate(self):
self.active = False
def _getSuggestions(self,line):
if self.active:
return self.hook(line)
return []
def _getLastAccessorPos(self,line):
accessorPos = -1
for accessor in self.accessors:
tmpAccessorPos = line.rfind(accessor)
if accessorPos < tmpAccessorPos:
accessorPos = tmpAccessorPos+len(accessor)
return accessorPos
def _completeImpl(self, line):
line=line.split()[-1]
suggestions = [ str(s) for s in self._getSuggestions(line) ]
suggestions = filter(lambda s: len(s.strip()) != 0, suggestions)
suggestions = sorted(suggestions)
if not suggestions: return []
# Remove combinations of opening and closing brackets and just opening
# brackets at the end of a line. Jupyter seems to expect functions
# without these brackets to work properly. The brackets of'operator()'
# must not be removed
suggestions = [sugg[:-2] if sugg[-2:] == '()' and sugg != 'operator()' else sugg for sugg in suggestions]
suggestions = [sugg[:-1] if sugg[-1:] == '(' else sugg for sugg in suggestions]
# If a function signature is encountered, add an empty item to the
# suggestions. Try to guess a function signature by an opening bracket
# ignoring 'operator()'.
are_signatures = "(" in "".join(filter(lambda s: s != 'operator()', suggestions))
accessorPos = self._getLastAccessorPos(line)
if are_signatures:
suggestions = [" "] + suggestions
elif accessorPos > 0:
# Prepend variable name to suggestions. Do not prepend if the
# suggestion already contains the variable name, this can happen if
# e.g. there is only one valid completion
if len(suggestions) > 1 or line[:accessorPos] != suggestions[0][:accessorPos]:
suggestions = [line[:accessorPos]+sugg for sugg in suggestions]
return suggestions
def complete(self, ip, event) :
'''
Autocomplete interfacing to TTabCom. If an accessor of a scope is
present in the line, the suggestions are prepended with the line.
That's how completers work. For example:
myGraph.Set<tab> will return "myGraph.Set+suggestion in the list of
suggestions.
'''
return self._completeImpl(event.line)
_cppCompleter = CppCompleter()
def load_ipython_extension(ipython):
_cppCompleter.activate()
ipython.set_hook('complete_command', _cppCompleter.complete, re_key=r"[(.*)[\.,::,\->](.*)]|(.*)")
def unload_ipython_extension(ipython):
_cppCompleter.deactivate()
|
lgpl-2.1
| 678,121,358,243,730,700 | 37.25 | 113 | 0.581402 | false |
PythonProgramming/PyOpenGL-series
|
VideoPyOpenGL7.py
|
1
|
3973
|
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import random
vertices = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1)
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7)
)
surfaces = (
(0,1,2,3),
(3,2,7,6),
(6,7,5,4),
(4,5,1,0),
(1,5,7,2),
(4,0,3,6)
)
colors = (
(1,0,0),
(0,1,0),
(0,0,1),
(0,1,0),
(1,1,1),
(0,1,1),
(1,0,0),
(0,1,0),
(0,0,1),
(1,0,0),
(1,1,1),
(0,1,1),
)
##ground_vertices = (
## (-10, -1.1, 20),
## (10, -1.1, 20),
## (-10, -1.1, -300),
## (10, -1.1, -300),
## )
##
##
##def ground():
## glBegin(GL_QUADS)
## for vertex in ground_vertices:
## glColor3fv((0,0.5,0.5))
## glVertex3fv(vertex)
##
## glEnd()
def set_vertices(max_distance):
x_value_change = random.randrange(-10,10)
y_value_change = random.randrange(-10,10)
z_value_change = random.randrange(-1*max_distance,-20)
new_vertices = []
for vert in vertices:
new_vert = []
new_x = vert[0] + x_value_change
new_y = vert[1] + y_value_change
new_z = vert[2] + z_value_change
new_vert.append(new_x)
new_vert.append(new_y)
new_vert.append(new_z)
new_vertices.append(new_vert)
return new_vertices
def Cube(vertices):
glBegin(GL_QUADS)
for surface in surfaces:
x = 0
for vertex in surface:
x+=1
glColor3fv(colors[x])
glVertex3fv(vertices[vertex])
glEnd()
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def main():
pygame.init()
display = (800,600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)
glTranslatef(random.randrange(-5,5),random.randrange(-5,5), -40)
#object_passed = False
x_move = 0
y_move = 0
max_distance = 100
cube_dict = {}
for x in range(20):
cube_dict[x] =set_vertices(max_distance)
#glRotatef(25, 2, 1, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_move = 0.3
if event.key == pygame.K_RIGHT:
x_move = -0.3
if event.key == pygame.K_UP:
y_move = -0.3
if event.key == pygame.K_DOWN:
y_move = 0.3
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_move = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
y_move = 0
## if event.type == pygame.MOUSEBUTTONDOWN:
## if event.button == 4:
## glTranslatef(0,0,1.0)
##
## if event.button == 5:
## glTranslatef(0,0,-1.0)
#glRotatef(1, 3, 1, 1)
x = glGetDoublev(GL_MODELVIEW_MATRIX)
#print(x)
camera_x = x[3][0]
camera_y = x[3][1]
camera_z = x[3][2]
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glTranslatef(x_move,y_move,.50)
#ground()
for each_cube in cube_dict:
Cube(cube_dict[each_cube])
pygame.display.flip()
pygame.time.wait(10)
main()
pygame.quit()
quit()
|
mit
| 5,736,841,369,609,479,000 | 15.834746 | 77 | 0.445507 | false |
skirpichev/omg
|
diofant/integrals/deltafunctions.py
|
1
|
5793
|
from ..core import Mul
from ..core.compatibility import default_sort_key
from ..functions import DiracDelta, Heaviside
def change_mul(node, x):
"""change_mul(node, x)
Rearranges the operands of a product, bringing to front any simple
DiracDelta expression.
If no simple DiracDelta expression was found, then all the DiracDelta
expressions are simplified (using DiracDelta.simplify).
Return: (dirac, new node)
Where:
o dirac is either a simple DiracDelta expression or None (if no simple
expression was found);
o new node is either a simplified DiracDelta expressions or None (if it
could not be simplified).
Examples
========
>>> change_mul(x*y*DiracDelta(x)*cos(x), x)
(DiracDelta(x), x*y*cos(x))
>>> change_mul(x*y*DiracDelta(x**2 - 1)*cos(x), x)
(None, x*y*cos(x)*DiracDelta(x - 1)/2 + x*y*cos(x)*DiracDelta(x + 1)/2)
>>> change_mul(x*y*DiracDelta(cos(x))*cos(x), x)
(None, None)
See Also
========
diofant.functions.special.delta_functions.DiracDelta
deltaintegrate
"""
if not (node.is_Mul or node.is_Pow):
return node
new_args = []
dirac = None
# Sorting is needed so that we consistently collapse the same delta;
# However, we must preserve the ordering of non-commutative terms
c, nc = node.args_cnc()
sorted_args = sorted(c, key=default_sort_key)
sorted_args.extend(nc)
for arg in sorted_args:
if arg.is_Pow and isinstance(arg.base, DiracDelta):
new_args.append(arg.func(arg.base, arg.exp - 1))
arg = arg.base
if dirac is None and (isinstance(arg, DiracDelta) and arg.is_simple(x)
and (len(arg.args) <= 1 or arg.args[1] == 0)):
dirac = arg
else:
new_args.append(arg)
if not dirac: # there was no simple dirac
new_args = []
for arg in sorted_args:
if isinstance(arg, DiracDelta):
new_args.append(arg.simplify(x))
elif arg.is_Pow and isinstance(arg.base, DiracDelta):
new_args.append(arg.func(arg.base.simplify(x), arg.exp))
else:
new_args.append(change_mul(arg, x))
if new_args != sorted_args:
nnode = Mul(*new_args).expand()
else: # if the node didn't change there is nothing to do
nnode = None
return None, nnode
return dirac, Mul(*new_args)
def deltaintegrate(f, x):
"""
deltaintegrate(f, x)
The idea for integration is the following:
- If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)),
we try to simplify it.
If we could simplify it, then we integrate the resulting expression.
We already know we can integrate a simplified expression, because only
simple DiracDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the integral,
taking care if we are dealing with a Derivative or with a proper
DiracDelta.
2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do
nothing at all.
- If the node is a multiplication node having a DiracDelta term:
First we expand it.
If the expansion did work, then we try to integrate the expansion.
If not, we try to extract a simple DiracDelta term, then we have two
cases:
1) We have a simple DiracDelta term, so we return the integral.
2) We didn't have a simple term, but we do have an expression with
simplified DiracDelta terms, so we integrate this expression.
Examples
========
>>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x)
sin(1)*cos(1)*Heaviside(x - 1)
>>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y)
z**2*DiracDelta(x - z)*Heaviside(y - z)
See Also
========
diofant.functions.special.delta_functions.DiracDelta
diofant.integrals.integrals.Integral
"""
if not f.has(DiracDelta):
return
from .integrals import Integral, integrate
from ..solvers import solve
# g(x) = DiracDelta(h(x))
if f.func == DiracDelta:
h = f.simplify(x)
if h == f: # can't simplify the expression
# FIXME: the second term tells whether is DeltaDirac or Derivative
# For integrating derivatives of DiracDelta we need the chain rule
if f.is_simple(x):
if (len(f.args) <= 1 or f.args[1] == 0):
return Heaviside(f.args[0])
else:
return (DiracDelta(f.args[0], f.args[1] - 1) /
f.args[0].as_poly().LC())
else: # let's try to integrate the simplified expression
fh = integrate(h, x)
return fh
elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e
g = f.expand()
if f != g: # the expansion worked
fh = integrate(g, x)
if fh is not None and not isinstance(fh, Integral):
return fh
else:
# no expansion performed, try to extract a simple DiracDelta term
dg, rest_mult = change_mul(f, x)
if not dg:
if rest_mult:
fh = integrate(rest_mult, x)
return fh
else:
dg = dg.simplify(x)
if dg.is_Mul: # Take out any extracted factors
dg, rest_mult_2 = change_mul(dg, x)
rest_mult = rest_mult*rest_mult_2
point = solve(dg.args[0], x)[0][x]
return (rest_mult.subs({x: point})*Heaviside(x - point))
|
bsd-3-clause
| -5,038,044,230,527,641,000 | 33.278107 | 79 | 0.583463 | false |
cotonne/arduino
|
lcd/Adafruit_CharLCD.py
|
1
|
20507
|
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.I2C as I2C
import Adafruit_GPIO.MCP230xx as MCP
import Adafruit_GPIO.PWM as PWM
# Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Entry flags
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Control flags
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Move flags
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# Function set flags
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# Offset for up to 4 rows.
LCD_ROW_OFFSETS = (0x00, 0x40, 0x14, 0x54)
# Char LCD plate GPIO numbers.
LCD_PLATE_RS = 15
LCD_PLATE_RW = 14
LCD_PLATE_EN = 13
LCD_PLATE_D4 = 12
LCD_PLATE_D5 = 11
LCD_PLATE_D6 = 10
LCD_PLATE_D7 = 9
LCD_PLATE_RED = 6
LCD_PLATE_GREEN = 7
LCD_PLATE_BLUE = 8
# Char LCD plate button names.
SELECT = 0
RIGHT = 1
DOWN = 2
UP = 3
LEFT = 4
class Adafruit_CharLCD(object):
"""Class to represent and interact with an HD44780 character LCD display."""
def __init__(self, rs, en, d4, d5, d6, d7, cols, lines, onOff, backlight=None,
invert_polarity=True,
enable_pwm=False,
gpio=GPIO.get_platform_gpio(),
pwm=PWM.get_platform_pwm(),
initial_backlight=1.0):
"""Initialize the LCD. RS, EN, and D4...D7 parameters should be the pins
connected to the LCD RS, clock enable, and data line 4 through 7 connections.
The LCD will be used in its 4-bit mode so these 6 lines are the only ones
required to use the LCD. You must also pass in the number of columns and
lines on the LCD.
If you would like to control the backlight, pass in the pin connected to
the backlight with the backlight parameter. The invert_polarity boolean
controls if the backlight is one with a LOW signal or HIGH signal. The
default invert_polarity value is True, i.e. the backlight is on with a
LOW signal.
You can enable PWM of the backlight pin to have finer control on the
brightness. To enable PWM make sure your hardware supports PWM on the
provided backlight pin and set enable_pwm to True (the default is False).
The appropriate PWM library will be used depending on the platform, but
you can provide an explicit one with the pwm parameter.
The initial state of the backlight is ON, but you can set it to an
explicit initial state with the initial_backlight parameter (0 is off,
1 is on/full bright).
You can optionally pass in an explicit GPIO class,
for example if you want to use an MCP230xx GPIO extender. If you don't
pass in an GPIO instance, the default GPIO for the running platform will
be used.
"""
# Save column and line state.
self._cols = cols
self._lines = lines
# Save GPIO state and pin numbers.
self._gpio = gpio
self._rs = rs
self._en = en
self._d4 = d4
self._d5 = d5
self._d6 = d6
self._d7 = d7
self._onOff = onOff
# Save backlight state.
self._backlight = backlight
self._pwm_enabled = enable_pwm
self._pwm = pwm
self._blpol = not invert_polarity
self._isInitialized = False
self._initial_backlight = initial_backlight
self._initialize()
# Internal function used to initialize the LCD
def _initialize(self):
# Setup all pins as outputs.
for pin in (self._rs, self._en, self._d4, self._d5, self._d6, self._d7, self._onOff):
self._gpio.setup(pin, GPIO.OUT)
# Turn on the transistor
self._gpio.output(self._onOff, GPIO.HIGH)
# Let's give some time to the LCD to wake up
self._delay_microseconds(3000)
# Setup backlight.
if self._backlight is not None:
if self._pwm_enabled:
self._pwm.start(self._backlight, self._pwm_duty_cycle(self._initial_backlight))
else:
self._gpio.setup(self._backlight, GPIO.OUT)
self._gpio.output(self._backlight, self._blpol if self._initial_backlight else not self._blpol)
# Initialize the display.
self.write8(0x33)
self.write8(0x32)
# Initialize display control, function, and mode registers.
self.displaycontrol = LCD_DISPLAYON | LCD_CURSOROFF | LCD_BLINKOFF
self.displayfunction = LCD_4BITMODE | LCD_1LINE | LCD_2LINE | LCD_5x8DOTS
self.displaymode = LCD_ENTRYLEFT | LCD_ENTRYSHIFTDECREMENT
# Write registers.
self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)
self.write8(LCD_FUNCTIONSET | self.displayfunction)
self.write8(LCD_ENTRYMODESET | self.displaymode) # set the entry mode
self.clear()
self._isInitialized = True;
def turnOn(self):
"""Turn on the LCD"""
if not self._isInitialized:
self._initialize()
def turnOff(self):
"""Turn off the LCD"""
self._isInitialized = False
self._gpio.output(self._onOff, GPIO.LOW)
self._gpio.cleanup()
def home(self):
"""Move the cursor back to its home (first line and first column)."""
self.write8(LCD_RETURNHOME) # set cursor position to zero
self._delay_microseconds(3000) # this command takes a long time!
def clear(self):
"""Clear the LCD."""
self.write8(LCD_CLEARDISPLAY) # command to clear display
self._delay_microseconds(3000) # 3000 microsecond sleep, clearing the display takes a long time
def set_cursor(self, col, row):
"""Move the cursor to an explicit column and row position."""
# Clamp row to the last row of the display.
if row > self._lines:
row = self._lines - 1
# Set location.
self.write8(LCD_SETDDRAMADDR | (col + LCD_ROW_OFFSETS[row]))
def enable_display(self, enable):
"""Enable or disable the display. Set enable to True to enable."""
if enable:
self.displaycontrol |= LCD_DISPLAYON
else:
self.displaycontrol &= ~LCD_DISPLAYON
self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)
def show_cursor(self, show):
"""Show or hide the cursor. Cursor is shown if show is True."""
if show:
self.displaycontrol |= LCD_CURSORON
else:
self.displaycontrol &= ~LCD_CURSORON
self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)
def blink(self, blink):
"""Turn on or off cursor blinking. Set blink to True to enable blinking."""
if blink:
self.displaycontrol |= LCD_BLINKON
else:
self.displaycontrol &= ~LCD_BLINKON
self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)
def move_left(self):
"""Move display left one position."""
self.write8(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | LCD_MOVELEFT)
def move_right(self):
"""Move display right one position."""
self.write8(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | LCD_MOVERIGHT)
def set_left_to_right(self):
"""Set text direction left to right."""
self.displaymode |= LCD_ENTRYLEFT
self.write8(LCD_ENTRYMODESET | self.displaymode)
def set_right_to_left(self):
"""Set text direction right to left."""
self.displaymode &= ~LCD_ENTRYLEFT
self.write8(LCD_ENTRYMODESET | self.displaymode)
def autoscroll(self, autoscroll):
"""Autoscroll will 'right justify' text from the cursor if set True,
otherwise it will 'left justify' the text.
"""
if autoscroll:
self.displaymode |= LCD_ENTRYSHIFTINCREMENT
else:
self.displaymode &= ~LCD_ENTRYSHIFTINCREMENT
self.write8(LCD_ENTRYMODESET | self.displaymode)
def message(self, text):
"""Write text to display. Note that text can include newlines."""
line = 0
# Iterate through each character.
for char in text:
# Advance to next line if character is a new line.
if char == '\n':
line += 1
# Move to left or right side depending on text direction.
col = 0 if self.displaymode & LCD_ENTRYLEFT > 0 else self._cols-1
self.set_cursor(col, line)
# Write the character to the display.
else:
self.write8(ord(char), True)
def set_backlight(self, backlight):
"""Enable or disable the backlight. If PWM is not enabled (default), a
non-zero backlight value will turn on the backlight and a zero value will
turn it off. If PWM is enabled, backlight can be any value from 0.0 to
1.0, with 1.0 being full intensity backlight.
"""
if self._backlight is not None:
if self._pwm_enabled:
self._pwm.set_duty_cycle(self._backlight, self._pwm_duty_cycle(backlight))
else:
self._gpio.output(self._backlight, self._blpol if backlight else not self._blpol)
def write8(self, value, char_mode=False):
"""Write 8-bit value in character or data mode. Value should be an int
value from 0-255, and char_mode is True if character data or False if
non-character data (default).
"""
# One millisecond delay to prevent writing too quickly.
self._delay_microseconds(1000)
# Set character / data bit.
self._gpio.output(self._rs, char_mode)
# Write upper 4 bits.
self._gpio.output_pins({ self._d4: ((value >> 4) & 1) > 0,
self._d5: ((value >> 5) & 1) > 0,
self._d6: ((value >> 6) & 1) > 0,
self._d7: ((value >> 7) & 1) > 0 })
self._pulse_enable()
# Write lower 4 bits.
self._gpio.output_pins({ self._d4: (value & 1) > 0,
self._d5: ((value >> 1) & 1) > 0,
self._d6: ((value >> 2) & 1) > 0,
self._d7: ((value >> 3) & 1) > 0 })
self._pulse_enable()
def create_char(self, location, pattern):
"""Fill one of the first 8 CGRAM locations with custom characters.
The location parameter should be between 0 and 7 and pattern should
provide an array of 8 bytes containing the pattern. E.g. you can easyly
design your custom character at http://www.quinapalus.com/hd44780udg.html
To show your custom character use eg. lcd.message('\x01')
"""
# only position 0..7 are allowed
location &= 0x7
self.write8(LCD_SETCGRAMADDR | (location << 3))
for i in range(8):
self.write8(pattern[i], char_mode=True)
def _delay_microseconds(self, microseconds):
# Busy wait in loop because delays are generally very short (few microseconds).
end = time.time() + (microseconds/1000000.0)
while time.time() < end:
pass
def _pulse_enable(self):
# Pulse the clock enable line off, on, off to send command.
self._gpio.output(self._en, False)
self._delay_microseconds(1) # 1 microsecond pause - enable pulse must be > 450ns
self._gpio.output(self._en, True)
self._delay_microseconds(1) # 1 microsecond pause - enable pulse must be > 450ns
self._gpio.output(self._en, False)
self._delay_microseconds(1) # commands need > 37us to settle
def _pwm_duty_cycle(self, intensity):
# Convert intensity value of 0.0 to 1.0 to a duty cycle of 0.0 to 100.0
intensity = 100.0*intensity
# Invert polarity if required.
if not self._blpol:
intensity = 100.0-intensity
return intensity
class Adafruit_RGBCharLCD(Adafruit_CharLCD):
"""Class to represent and interact with an HD44780 character LCD display with
an RGB backlight."""
def __init__(self, rs, en, d4, d5, d6, d7, cols, lines, red, green, blue,
gpio=GPIO.get_platform_gpio(),
invert_polarity=True,
enable_pwm=False,
pwm=PWM.get_platform_pwm(),
initial_color=(1.0, 1.0, 1.0)):
"""Initialize the LCD with RGB backlight. RS, EN, and D4...D7 parameters
should be the pins connected to the LCD RS, clock enable, and data line
4 through 7 connections. The LCD will be used in its 4-bit mode so these
6 lines are the only ones required to use the LCD. You must also pass in
the number of columns and lines on the LCD.
The red, green, and blue parameters define the pins which are connected
to the appropriate backlight LEDs. The invert_polarity parameter is a
boolean that controls if the LEDs are on with a LOW or HIGH signal. By
default invert_polarity is True, i.e. the backlight LEDs are on with a
low signal. If you want to enable PWM on the backlight LEDs (for finer
control of colors) and the hardware supports PWM on the provided pins,
set enable_pwm to True. Finally you can set an explicit initial backlight
color with the initial_color parameter. The default initial color is
white (all LEDs lit).
You can optionally pass in an explicit GPIO class,
for example if you want to use an MCP230xx GPIO extender. If you don't
pass in an GPIO instance, the default GPIO for the running platform will
be used.
"""
super(Adafruit_RGBCharLCD, self).__init__(rs, en, d4, d5, d6, d7,
cols,
lines,
enable_pwm=enable_pwm,
backlight=None,
invert_polarity=invert_polarity,
gpio=gpio,
pwm=pwm)
self._red = red
self._green = green
self._blue = blue
# Setup backlight pins.
if enable_pwm:
# Determine initial backlight duty cycles.
rdc, gdc, bdc = self._rgb_to_duty_cycle(initial_color)
pwm.start(red, rdc)
pwm.start(green, gdc)
pwm.start(blue, bdc)
else:
gpio.setup(red, GPIO.OUT)
gpio.setup(green, GPIO.OUT)
gpio.setup(blue, GPIO.OUT)
self._gpio.output_pins(self._rgb_to_pins(initial_color))
def _rgb_to_duty_cycle(self, rgb):
# Convert tuple of RGB 0-1 values to tuple of duty cycles (0-100).
red, green, blue = rgb
# Clamp colors between 0.0 and 1.0
red = max(0.0, min(1.0, red))
green = max(0.0, min(1.0, green))
blue = max(0.0, min(1.0, blue))
return (self._pwm_duty_cycle(red),
self._pwm_duty_cycle(green),
self._pwm_duty_cycle(blue))
def _rgb_to_pins(self, rgb):
# Convert tuple of RGB 0-1 values to dict of pin values.
red, green, blue = rgb
return { self._red: self._blpol if red else not self._blpol,
self._green: self._blpol if green else not self._blpol,
self._blue: self._blpol if blue else not self._blpol }
def set_color(self, red, green, blue):
"""Set backlight color to provided red, green, and blue values. If PWM
is enabled then color components can be values from 0.0 to 1.0, otherwise
components should be zero for off and non-zero for on.
"""
if self._pwm_enabled:
# Set duty cycle of PWM pins.
rdc, gdc, bdc = self._rgb_to_duty_cycle((red, green, blue))
self._pwm.set_duty_cycle(self._red, rdc)
self._pwm.set_duty_cycle(self._green, gdc)
self._pwm.set_duty_cycle(self._blue, bdc)
else:
# Set appropriate backlight pins based on polarity and enabled colors.
self._gpio.output_pins({self._red: self._blpol if red else not self._blpol,
self._green: self._blpol if green else not self._blpol,
self._blue: self._blpol if blue else not self._blpol })
def set_backlight(self, backlight):
"""Enable or disable the backlight. If PWM is not enabled (default), a
non-zero backlight value will turn on the backlight and a zero value will
turn it off. If PWM is enabled, backlight can be any value from 0.0 to
1.0, with 1.0 being full intensity backlight. On an RGB display this
function will set the backlight to all white.
"""
self.set_color(backlight, backlight, backlight)
class Adafruit_CharLCDPlate(Adafruit_RGBCharLCD):
"""Class to represent and interact with an Adafruit Raspberry Pi character
LCD plate."""
def __init__(self, address=0x20, busnum=I2C.get_default_bus(), cols=16, lines=2):
"""Initialize the character LCD plate. Can optionally specify a separate
I2C address or bus number, but the defaults should suffice for most needs.
Can also optionally specify the number of columns and lines on the LCD
(default is 16x2).
"""
# Configure MCP23017 device.
self._mcp = MCP.MCP23017(address=address, busnum=busnum)
# Set LCD R/W pin to low for writing only.
self._mcp.setup(LCD_PLATE_RW, GPIO.OUT)
self._mcp.output(LCD_PLATE_RW, GPIO.LOW)
# Set buttons as inputs with pull-ups enabled.
for button in (SELECT, RIGHT, DOWN, UP, LEFT):
self._mcp.setup(button, GPIO.IN)
self._mcp.pullup(button, True)
# Initialize LCD (with no PWM support).
super(Adafruit_CharLCDPlate, self).__init__(LCD_PLATE_RS, LCD_PLATE_EN,
LCD_PLATE_D4, LCD_PLATE_D5, LCD_PLATE_D6, LCD_PLATE_D7, cols, lines,
LCD_PLATE_RED, LCD_PLATE_GREEN, LCD_PLATE_BLUE, enable_pwm=False,
gpio=self._mcp)
def is_pressed(self, button):
"""Return True if the provided button is pressed, False otherwise."""
if button not in set((SELECT, RIGHT, DOWN, UP, LEFT)):
raise ValueError('Unknown button, must be SELECT, RIGHT, DOWN, UP, or LEFT.')
return self._mcp.input(button) == GPIO.LOW
|
lgpl-3.0
| -2,129,930,900,372,531,500 | 42.081933 | 111 | 0.596674 | false |
proycon/foliatools
|
foliatools/foliasetdefinition.py
|
1
|
4178
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#---------------------------------------------------------------
# FoLiA Set Definition tool
# by Maarten van Gompel
# Centre for Language Studies
# Radboud University Nijmegen
# proycon AT anaproy DOT nl
#
# Licensed under GPLv3
#----------------------------------------------------------------
"""A tool to read FoLiA Set Definitions and perform some operations on them. By default it will print all sets and classes. This tool can also convert from legacy XML to SKOS/RDF."""
from __future__ import print_function, unicode_literals, division, absolute_import
import sys
import glob
import gzip
import os
import io
import json
import argparse
from folia import foliaset
import folia.main as folia
from folia.helpers import u, isstring
def printclass(classinfo, args, indent):
if args.outputuri:
printuri = " <" + classinfo['uri'] + ">"
else:
printuri = ""
assert isinstance(classinfo, dict)
print(indent + " -> CLASS " + classinfo['id'] + printuri + ": " + classinfo['label'])
if 'subclasses' in classinfo:
for subclassinfo in classinfo['subclasses'].values():
printclass(subclassinfo, args, indent + " ")
def main():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--basenamespace', type=str,help="Base RDF namespace to use when converting from legacy XML to RDF", action='store',default="",required=False)
parser.add_argument('--rdfttl', help="Output RDF in Turtle", action='store_true',required=False)
parser.add_argument('--rdfxml',help="Output RDF in XML", action='store_true',required=False)
parser.add_argument('--json', help="Output set definition in JSON", action='store_true',required=False)
parser.add_argument('--outputuri',help="Output full RDF URIs in text output", action='store_true',required=False)
parser.add_argument('--class', type=str,help="Test for the specified class, by ID", action='store',required=False)
parser.add_argument('--subset', type=str,help="Test for the specified subset (--class will be interpreted relative to subset then)", action='store',required=False)
parser.add_argument('--shell', help="Start an interactive Python shell for debugging (with PDB)", action='store_true',required=False)
parser.add_argument('url', nargs=1, help='URL or filename to a FoLiA Set Definition')
args = parser.parse_args()
url = args.url[0]
if url[0] not in ('.','/') and not url.startswith('http'):
url = './' + url
setdefinition = foliaset.SetDefinition(url, basens=args.basenamespace)
if args.rdfttl:
print(str(setdefinition.graph.serialize(None, 'turtle',base=setdefinition.basens),'utf-8') )
elif args.rdfxml:
print(str(setdefinition.graph.serialize(None, 'xml',base=setdefinition.basens),'utf-8') )
elif args.json:
print(json.dumps(setdefinition.json()))
elif args.shell:
print("Set Definition is loaded in variable: setdefinition; RDF graph in setdefinition.graph",file=sys.stderr)
import pdb; pdb.set_trace()
else:
#default visualization
setinfo = setdefinition.mainset()
if args.outputuri:
printuri = " <" + setinfo['uri'] + ">"
else:
printuri = ""
print("SET " + setinfo['id'] + printuri + ": " + setinfo['label'])
for classinfo in setdefinition.orderedclasses(setinfo['uri'], nestedhierarchy=True):
printclass(classinfo, args, " ")
print()
for subsetinfo in sorted(setdefinition.subsets(), key=lambda subsetinfo: subsetinfo['label'] if 'label' in subsetinfo else subsetinfo['id']):
if args.outputuri:
printuri = " <" + subsetinfo['uri'] + ">"
else:
printuri = ""
print("SUBSET " + subsetinfo['id'] + printuri + ": " + subsetinfo['label'])
for classinfo in setdefinition.orderedclasses(subsetinfo['uri'], nestedhierarchy=True):
printclass(classinfo, args, " ")
print()
if __name__ == "__main__":
main()
|
gpl-3.0
| -3,355,404,610,841,344,000 | 44.413043 | 182 | 0.641216 | false |
recap/pumpkin
|
examples/tweeter/filters-cat3/filterhasa.py
|
1
|
1595
|
__author__ = 'reggie'
###START-CONF
##{
##"object_name": "filterhasa",
##"object_poi": "qpwo-2345",
##"parameters": [
## {
## "name": "tweet",
## "description": "english tweets",
## "required": true,
## "type": "TweetString",
## "format": "",
## "state" : "ENGLISH"
## }
## ],
##"return": [
## {
## "name": "tweet",
## "description": "has a relation tweet",
## "required": true,
## "type": "TweetString",
## "format": "",
## "state" : "HASA|NO_HASA"
## }
##
## ] }
##END-CONF
import re
from pumpkin import PmkSeed
class filterhasa(PmkSeed.Seed):
def __init__(self, context, poi=None):
PmkSeed.Seed.__init__(self, context,poi)
pass
def on_load(self):
print "Loading: " + self.__class__.__name__
pass
def run(self, pkt, tweet):
m = re.search('W(\s+)(.*)(\n)', tweet, re.S)
if m:
tw = m.group(2)
if not self.has_a(tw):
pass
else:
self.dispatch(pkt, self.has_a(tw), "HASA")
pass
def has_a(self,text):
m = re.search('([A-Z]+[A-Za-z]+\s*[A-Za-z]*\s(has an|has a)\s[A-Z]+[A-Za-z]+\s[A-Z]*[A-Za-z]*)', text, re.S)
if m:
tw = m.group(0)
return tw
return False
|
mit
| -1,975,949,327,162,892,500 | 21.8 | 116 | 0.37931 | false |
s390guy/SATK
|
xforth/forth_words.py
|
1
|
12643
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
this_module="forth_words.py"
definitions="""\
\ vim:ft=forth
\
\ Forth core language definitions.
\
\ Thanks to Richard W.M. Jones <rich@annexia.org> http://annexia.org/forth
\ Most parts of this file are based on his jonesforth, which is licensed as
\ public domain.
: 2DROP ( n n -- ) DROP DROP ;
: 2DUP ( y x -- y x y x ) OVER OVER ;
\ The primitive word /MOD [DIVMOD] leaves both the quotient and the remainder
\ on the stack. Now we can define the / and MOD in terms of /MOD and a few
\ other primitives.
: / ( n -- n ) /MOD SWAP DROP ;
: MOD ( n -- n ) /MOD DROP ;
( simple math )
: 1+ ( n -- n ) 1 + ; ( increment by one )
: 1- ( n -- n ) 1 - ; ( decrement by one )
: 4+ ( n -- n ) 4 + ; ( increment by four )
: 4- ( n -- n ) 4 - ; ( decrement by four )
( Define some character constants )
( > ASCII code for line feed. )
: LF 10 ;
( > BL [BLank] is a standard FORTH word for space. )
: BL 32 ;
( > CR prints a carriage return. )
: CR 13 EMIT ;
( > SPACE prints a space. )
: SPACE BL EMIT ;
( > NEGATE leaves the negative of a number on the stack. )
: NEGATE 0 SWAP - ;
( Standard words for booleans. )
-1 CONSTANT TRUE
0 CONSTANT FALSE
: NOT 0= ;
( LITERAL takes whatever is on the stack and compiles LIT <foo> )
: LITERAL IMMEDIATE
' $LIT , \ compile LIT
, \ compile the literal itself (from the stack)
;
\ Now we can use [ and ] to insert literals which are calculated at compile
\ time. (Recall that [ and ] are the FORTH words which switch into and out of
\ immediate mode.) Within definitions, use [ ... ] LITERAL anywhere that '...' is
\ a constant expression which you would rather only compute once (at compile
\ time, rather than calculating it each time your word runs).
: ':'
[ \ go into immediate mode (temporarily)
CHAR : \ push the number 58 (ASCII code of colon) on the parameter stack
] \ go back to compile mode
LITERAL \ compile LIT 58 as the definition of ':' word
;
( A few more character constants defined the same way as above. )
: ';' [ CHAR ; ] LITERAL ;
: '(' [ CHAR 40 ] LITERAL ;
: ')' [ CHAR ) ] LITERAL ;
: '"' [ CHAR 34 ] LITERAL ; ( " vim syntax hack )
: 'A' [ CHAR A ] LITERAL ;
: '0' [ CHAR 48 ] LITERAL ;
: '-' [ CHAR - ] LITERAL ;
: '.' [ CHAR . ] LITERAL ;
\ CONTROL STRUCTURES ----------------------------------------------------------------------
\
\ So far we have defined only very simple definitions. Before we can go
\ further, we really need to make some control structures, like IF ... THEN and
\ loops. Luckily we can define arbitrary control structures directly in FORTH.
\
\ Please note that the control structures as I have defined them here will only
\ work inside compiled words. If you try to type in expressions using IF, etc.
\ in immediate mode, then they won't work. Making these work in immediate mode
\ is left as an exercise for the reader.
\
( > Examples:
( >
( > - ``condition IF true-part THEN rest`` )
\ -- compiles to: --> condition 0BRANCH OFFSET true-part rest
\ where OFFSET is the offset of 'rest'
( > - ``condition IF true-part ELSE false-part THEN`` )
\ -- compiles to: --> condition 0BRANCH OFFSET true-part BRANCH OFFSET2 false-part rest
\ where OFFSET if the offset of false-part and OFFSET2 is the offset of rest
\
\ alternate form with mor common wording
( > - ``condition IF true-part ELSE false-part ENDIF`` )
\ IF is an IMMEDIATE word which compiles 0BRANCH followed by a dummy offset, and places
\ the address of the 0BRANCH on the stack. Later when we see THEN, we pop that address
\ off the stack, calculate the offset, and back-fill the offset.
: IF IMMEDIATE
' $BRANCH0 , \ compile 0BRANCH
HERE \ save location of the offset on the stack
0 , \ compile a dummy offset
;
( > See IF_. )
: THEN IMMEDIATE
DUP
HERE SWAP - \ calculate the offset from the address saved on the stack
SWAP ! \ store the offset in the back-filled location
;
( > Alias for THEN_, See IF_. )
: ENDIF IMMEDIATE
DUP
HERE SWAP - \ calculate the offset from the address saved on the stack
SWAP ! \ store the offset in the back-filled location
;
( > See IF_. )
: ELSE IMMEDIATE
' $BRANCH , \ definite branch to just over the false-part
HERE \ save location of the offset on the stack
0 , \ compile a dummy offset
SWAP \ now back-fill the original (IF) offset
DUP \ same as for THEN word above
HERE SWAP -
SWAP !
;
( > Example: ``BEGIN loop-part condition UNTIL`` )
\ -- compiles to: --> loop-part condition 0BRANCH OFFSET
\ where OFFSET points back to the loop-part
\ This is like do { loop-part } while (condition) in the C language
: BEGIN IMMEDIATE
HERE \ save location on the stack
;
( > See BEGIN_. )
: UNTIL IMMEDIATE
' $BRANCH0 , \ compile 0BRANCH
HERE - \ calculate the offset from the address saved on the stack
, \ compile the offset here
;
( > BEGIN loop-part AGAIN )
\ -- compiles to: --> loop-part BRANCH OFFSET
\ where OFFSET points back to the loop-part
\ In other words, an infinite loop which can only be returned from with EXIT
: AGAIN IMMEDIATE
' $BRANCH , \ compile BRANCH
HERE - \ calculate the offset back
, \ compile the offset here
;
( > Example: ``BEGIN condition WHILE loop-part REPEAT`` )
\ -- compiles to: --> condition 0BRANCH OFFSET2 loop-part BRANCH OFFSET
\ where OFFSET points back to condition (the beginning) and OFFSET2
\ points to after the whole piece of code
\ So this is like a while (condition) { loop-part } loop in the C language
: WHILE IMMEDIATE
' $BRANCH0 , \ compile 0BRANCH
HERE \ save location of the offset2 on the stack
0 , \ compile a dummy offset2
;
( > See WHILE_. )
: REPEAT IMMEDIATE
' $BRANCH , \ compile BRANCH )
SWAP \ get the original offset (from BEGIN)
HERE - , \ and compile it after BRANCH
DUP
HERE SWAP - \ calculate the offset2
SWAP ! \ and back-fill it in the original location
;
( > UNLESS is the same as IF_ but the test is reversed. )
\ Note the use of [COMPILE]: Since IF is IMMEDIATE we don't want it to be
\ executed while UNLESS is compiling, but while UNLESS is running (which happens
\ to be when whatever word using UNLESS is being compiled -- whew!). So we use
\ [COMPILE] to reverse the effect of marking IF as immediate. This trick is
\ generally used when we want to write our own control words without having to
\ implement them all in terms of the primitives 0BRANCH and BRANCH, but instead
\ reusing simpler control words like (in this instance) IF.
: UNLESS IMMEDIATE
' NOT , \ compile NOT (to reverse the test)
[COMPILE] IF \ continue by calling the normal IF
;
( Some more complicated stack examples, showing the stack notation. )
: NIP ( x y -- y ) SWAP DROP ;
: TUCK ( x y -- y x y ) SWAP OVER ;
( With the looping constructs, we can now write SPACES, which writes n spaces to stdout. )
( > Write given number of spaces. Example:: ``20 SPACES``.)
: SPACES ( n -- )
BEGIN
DUP 0> \ while n > 0
WHILE
SPACE \ print a space
1- \ until we count down to 0
REPEAT
DROP
;
( > ? Fetches the integer at an address and prints it. )
: ? ( addr -- ) @ . ;
( > ``c a b WITHIN`` returns true if a <= c and c < b )
( > or define without ifs: ``OVER - >R - R> U<`` )
: WITHIN
-ROT ( b c a )
OVER ( b c a c )
<= IF
> IF ( b c -- )
TRUE
ELSE
FALSE
THEN
ELSE
2DROP ( b c -- )
FALSE
THEN
;
\ CASE ----------------------------------------------------------------------
\
\ CASE...ENDCASE is how we do switch statements in FORTH. There is no generally
\ agreed syntax for this, so I've gone for the syntax mandated by the ISO standard
\ FORTH (ANS-FORTH).
( > ::
( >
( > (some value on the stack)
( > CASE
( > test1 OF ... ENDOF
( > test2 OF ... ENDOF
( > testn OF ... ENDOF
( > ... (default case)
( > ENDCASE
)
\ The CASE statement tests the value on the stack by comparing it for equality with
\ test1, test2, ..., testn and executes the matching piece of code within OF ... ENDOF.
\ If none of the test values match then the default case is executed. Inside the ... of
\ the default case, the value is still at the top of stack (it is implicitly DROP-ed
\ by ENDCASE). When ENDOF is executed it jumps after ENDCASE (ie. there is no "fall-through"
\ and no need for a break statement like in C).
\
\ The default case may be omitted. In fact the tests may also be omitted so that you
\ just have a default case, although this is probably not very useful.
\
\ An example (assuming that 'q', etc. are words which push the ASCII value of the letter
\ on the stack):
\
\ 0 VALUE QUIT
\ 0 VALUE SLEEP
\ KEY CASE
\ 'q' OF 1 TO QUIT ENDOF
\ 's' OF 1 TO SLEEP ENDOF
\ (default case:)
\ ." Sorry, I didn't understand key <" DUP EMIT ." >, try again." CR
\ ENDCASE
\
\ (In some versions of FORTH, more advanced tests are supported, such as ranges, etc.
\ Other versions of FORTH need you to write OTHERWISE to indicate the default case.
\ As I said above, this FORTH tries to follow the ANS FORTH standard).
\
\ The implementation of CASE...ENDCASE is somewhat non-trivial. I'm following the
\ implementations from here:
\ http://www.uni-giessen.de/faq/archiv/forthfaq.case_endcase/msg00000.html
\
\ The general plan is to compile the code as a series of IF statements:
\
\ CASE (push 0 on the immediate-mode parameter stack)
\ test1 OF ... ENDOF test1 OVER = IF DROP ... ELSE
\ test2 OF ... ENDOF test2 OVER = IF DROP ... ELSE
\ testn OF ... ENDOF testn OVER = IF DROP ... ELSE
\ ... (default case) ...
\ ENDCASE DROP THEN [THEN [THEN ...]]
\
\ The CASE statement pushes 0 on the immediate-mode parameter stack, and that number
\ is used to count how many THEN statements we need when we get to ENDCASE so that each
\ IF has a matching THEN. The counting is done implicitly. If you recall from the
\ implementation above of IF, each IF pushes a code address on the immediate-mode stack,
\ and these addresses are non-zero, so by the time we get to ENDCASE the stack contains
\ some number of non-zeroes, followed by a zero. The number of non-zeroes is how many
\ times IF has been called, so how many times we need to match it with THEN.
\
\ This code uses [COMPILE] so that we compile calls to IF, ELSE, THEN instead of
\ actually calling them while we're compiling the words below.
\
\ As is the case with all of our control structures, they only work within word
\ definitions, not in immediate mode.
: CASE IMMEDIATE
0 \ push 0 to mark the bottom of the stack
;
( > See CASE_. )
: OF IMMEDIATE
' OVER , \ compile OVER
' = , \ compile =
[COMPILE] IF \ compile IF
' DROP , \ compile DROP
;
( > See CASE_. )
: ENDOF IMMEDIATE
[COMPILE] ELSE \ ENDOF is the same as ELSE
;
( > See CASE_. )
: ENDCASE IMMEDIATE
' DROP , \ compile DROP
( keep compiling THEN until we get to our zero marker )
BEGIN
?DUP
WHILE
[COMPILE] THEN \ aka ENDIF
REPEAT
;
( > Compile LIT_. )
: ['] IMMEDIATE
' $LIT , \ compile LIT
;
"""
if __name__ == '__main__':
raise NotImplementedError("module %s only intended for import use" % this_module)
|
gpl-3.0
| 5,702,980,280,763,613,000 | 35.862974 | 99 | 0.58396 | false |
ValvePython/vpk
|
setup.py
|
1
|
1446
|
#!/usr/bin/env python
from setuptools import setup
from codecs import open
from os import path
import vpk
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='vpk',
version=vpk.__version__,
description='Library for working with Valve Pak files',
long_description=long_description,
url='https://github.com/ValvePython/vpk',
author='Rossen Georgiev',
author_email='hello@rgp.io',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='valve pak vpk tf2 dota2 csgo dota',
packages=['vpk'],
zip_safe=True,
entry_points={
'console_scripts': [
'vpk = vpk.cli:main',
],
},
)
|
mit
| -4,161,693,860,461,095,000 | 31.133333 | 71 | 0.60166 | false |
rbarrois/mpdlcd
|
mpdlcd/utils.py
|
1
|
3005
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2013 Raphaël Barrois
import functools
import logging
import socket
import time
class AutoRetryConfig(object):
"""Hold the auto-retry configuration.
Attributes:
retry_attempts (int): Maximum number of connection retries
retry_wait (int): The initial time to wait between retries
retry_backoff (int): Amount by which wait time should be multiplied
after each failure
"""
def __init__(self, retry_attempts, retry_wait, retry_backoff):
if retry_backoff <= 1:
raise ValueError('retry_backoff should be greater than 1.')
self.retry_backoff = retry_backoff
if retry_wait <= 0:
raise ValueError('retry_wait should be positive.')
self.retry_wait = retry_wait
if retry_attempts < 0:
raise ValueError('retry_attempts should be positive or zero')
self.retry_attempts = retry_attempts
class AutoRetryCandidate(object):
"""Base class for objects wishing to use the @auto_retry decorator.
Attributes:
_retry_config (AutoRetryConfig): auto-retry configuration
_retry_logger (logging.Logger): where to log connection failures
"""
def __init__(self, retry_config, logger=None, *args, **kwargs):
self._retry_config = retry_config
if not logger:
logger = logging.getLogger(self.__class__.__module__)
self._retry_logger = logger
super(AutoRetryCandidate, self).__init__(*args, **kwargs)
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
cfg = instance._retry_config
remaining_tries = cfg.retry_attempts
current_wait = cfg.retry_wait
retry_backoff = cfg.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(instance, *args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
def extract_pattern(fmt):
"""Extracts used strings from a %(foo)s pattern."""
class FakeDict(object):
def __init__(self):
self.seen_keys = set()
def __getitem__(self, key):
self.seen_keys.add(key)
return ''
def keys(self):
return self.seen_keys
fake = FakeDict()
try:
fmt % fake
except TypeError:
# Formatting error
pass
return set(fake.keys())
|
mit
| 1,361,603,677,624,080,100 | 28.165049 | 76 | 0.596538 | false |
googleapis/python-apigee-connect
|
google/cloud/apigeeconnect_v1/types/tether.py
|
1
|
7720
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.apigeeconnect.v1",
manifest={
"Action",
"TetherEndpoint",
"Scheme",
"EgressRequest",
"Payload",
"StreamInfo",
"EgressResponse",
"HttpRequest",
"Url",
"Header",
"HttpResponse",
},
)
class Action(proto.Enum):
r"""The action taken by agent."""
ACTION_UNSPECIFIED = 0
OPEN_NEW_STREAM = 1
class TetherEndpoint(proto.Enum):
r"""Endpoint indicates where the messages will be delivered."""
TETHER_ENDPOINT_UNSPECIFIED = 0
APIGEE_MART = 1
APIGEE_RUNTIME = 2
APIGEE_MINT_RATING = 3
class Scheme(proto.Enum):
r"""HTTP Scheme."""
SCHEME_UNSPECIFIED = 0
HTTPS = 1
class EgressRequest(proto.Message):
r"""gRPC request payload for tether.
Attributes:
id (str):
Unique identifier for the request.
payload (google.cloud.apigeeconnect_v1.types.Payload):
Actual payload to send to agent.
endpoint (google.cloud.apigeeconnect_v1.types.TetherEndpoint):
Tether Endpoint.
project (str):
GCP Project. Format: ``projects/{project_number}``.
trace_id (str):
Unique identifier for clients to trace their
request/response.
timeout (google.protobuf.duration_pb2.Duration):
Timeout for the HTTP request.
"""
id = proto.Field(proto.STRING, number=1,)
payload = proto.Field(proto.MESSAGE, number=2, message="Payload",)
endpoint = proto.Field(proto.ENUM, number=3, enum="TetherEndpoint",)
project = proto.Field(proto.STRING, number=4,)
trace_id = proto.Field(proto.STRING, number=5,)
timeout = proto.Field(proto.MESSAGE, number=6, message=duration_pb2.Duration,)
class Payload(proto.Message):
r"""Payload for EgressRequest.
Attributes:
http_request (google.cloud.apigeeconnect_v1.types.HttpRequest):
The HttpRequest proto.
stream_info (google.cloud.apigeeconnect_v1.types.StreamInfo):
The information of stream.
action (google.cloud.apigeeconnect_v1.types.Action):
The action taken by agent.
"""
http_request = proto.Field(
proto.MESSAGE, number=1, oneof="kind", message="HttpRequest",
)
stream_info = proto.Field(
proto.MESSAGE, number=2, oneof="kind", message="StreamInfo",
)
action = proto.Field(proto.ENUM, number=3, oneof="kind", enum="Action",)
class StreamInfo(proto.Message):
r"""The Information of bi-directional stream.
Attributes:
id (str):
Unique identifier for the stream.
"""
id = proto.Field(proto.STRING, number=1,)
class EgressResponse(proto.Message):
r"""gRPC response payload for tether.
Attributes:
id (str):
Unique identifier for the response. Matches
the EgressRequest's id.
http_response (google.cloud.apigeeconnect_v1.types.HttpResponse):
HttpResponse.
status (google.rpc.status_pb2.Status):
Errors from application when handling the
http request.
project (str):
GCP Project. Format: ``projects/{project_number}``.
trace_id (str):
Unique identifier for clients to trace their
request/response. Matches the EgressRequest's
trace id
endpoint (google.cloud.apigeeconnect_v1.types.TetherEndpoint):
Tether Endpoint.
name (str):
Name is the full resource path of endpoint. Format:
``projects/{project_number or project_id}/endpoints/{endpoint}``
"""
id = proto.Field(proto.STRING, number=1,)
http_response = proto.Field(proto.MESSAGE, number=2, message="HttpResponse",)
status = proto.Field(proto.MESSAGE, number=3, message=status_pb2.Status,)
project = proto.Field(proto.STRING, number=4,)
trace_id = proto.Field(proto.STRING, number=5,)
endpoint = proto.Field(proto.ENUM, number=6, enum="TetherEndpoint",)
name = proto.Field(proto.STRING, number=7,)
class HttpRequest(proto.Message):
r"""The proto definition of http request.
Attributes:
id (str):
A unique identifier for the request.
method (str):
The HTTP request method.
Valid methods: "GET", "HEAD", "POST", "PUT",
"PATCH","DELETE".
url (google.cloud.apigeeconnect_v1.types.Url):
The HTTP request URL.
headers (Sequence[google.cloud.apigeeconnect_v1.types.Header]):
The HTTP request headers.
body (bytes):
HTTP request body.
"""
id = proto.Field(proto.STRING, number=1,)
method = proto.Field(proto.STRING, number=2,)
url = proto.Field(proto.MESSAGE, number=3, message="Url",)
headers = proto.RepeatedField(proto.MESSAGE, number=4, message="Header",)
body = proto.Field(proto.BYTES, number=5,)
class Url(proto.Message):
r"""The proto definition of url. A url represents a URL and the general
form represented is:
``[scheme://][google.cloud.apigeeconnect.v1.Url.host][path]``
Attributes:
scheme (google.cloud.apigeeconnect_v1.types.Scheme):
Scheme.
host (str):
Host or Host:Port.
path (str):
Path starts with ``/``.
"""
scheme = proto.Field(proto.ENUM, number=1, enum="Scheme",)
host = proto.Field(proto.STRING, number=2,)
path = proto.Field(proto.STRING, number=3,)
class Header(proto.Message):
r"""The http headers.
Attributes:
key (str):
values (Sequence[str]):
"""
key = proto.Field(proto.STRING, number=1,)
values = proto.RepeatedField(proto.STRING, number=2,)
class HttpResponse(proto.Message):
r"""The proto definition of http response.
Attributes:
id (str):
A unique identifier that matches the request
ID.
status (str):
Status of http response, e.g. "200 OK".
status_code (int):
Status code of http response, e.g. 200.
body (bytes):
The HTTP 1.1 response body.
headers (Sequence[google.cloud.apigeeconnect_v1.types.Header]):
The HTTP response headers.
content_length (int):
Content length records the length of the
associated content. The value -1 indicates that
the length is unknown. Unless http method is
"HEAD", values >= 0 indicate that the given
number of bytes may be read from Body.
"""
id = proto.Field(proto.STRING, number=1,)
status = proto.Field(proto.STRING, number=2,)
status_code = proto.Field(proto.INT32, number=3,)
body = proto.Field(proto.BYTES, number=4,)
headers = proto.RepeatedField(proto.MESSAGE, number=5, message="Header",)
content_length = proto.Field(proto.INT64, number=6,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -2,795,002,658,291,813,400 | 31.711864 | 82 | 0.631995 | false |
AGMMGA/EM_scripts
|
EM_scripts/junk/e2rawdata_silence_errors.py
|
1
|
9718
|
#!/Xsoftware64/EM/EMAN2/extlib/bin/python
#
# Author: John Flanagan (jfflanag@bcm.edu)
# Copyright (c) 2000-2011 Baylor College of Medicine
# This software is issued under a joint BSD/GNU license. You may use the
# source code in this file under either license. However, note that the
# complete EMAN2 and SPARX software packages have some GPL dependencies,
# so you are responsible for compliance with the licenses of these packages
# if you opt to use BSD licensing. The warranty disclaimer below holds
# in either instance.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 2111-1307 USA
#
#
import re, os, traceback
from EMAN2 import *
def main():
progname = os.path.basename(sys.argv[0])
usage = """prog [options] <micrograph1, micrograph2....>
Use this program to import and filter raw micrographs. If you choose to filter and/or convert format, this program will process each micrograph
and dump them into the directory './micrographs.', otherwise the micrographs will simply be moved into './micrographs'. If you select the option
--moverawdata AND you filter or change format, your original micrographs will be moved into the directory './raw_micrographs' and your
filtered micrographs will be in './micrographs as usual. BDB files are not moved, but they can be processed."""
parser = EMArgumentParser(usage=usage,version=EMANVERSION)
parser.add_pos_argument(name="micrographs",help="List the micrographs to filter here.", default="", guitype='filebox', browser="EMRawDataTable(withmodal=True,multiselect=True)", row=0, col=0,rowspan=1, colspan=2, mode='filter')
parser.add_pos_argument(name="import_files",help="List the files to import/filter here.", default="", guitype='filebox', browser="EMBrowserWidget(withmodal=True,multiselect=True)", row=0, col=0,rowspan=1, colspan=2, mode='import')
parser.add_header(name="filterheader", help='Options below this label are specific to filtering', title="### filtering options ###", row=1, col=0, rowspan=1, colspan=2, mode='import,filter')
parser.add_argument("--invert",action="store_true",help="Invert contrast",default=False, guitype='boolbox', row=2, col=0, rowspan=1, colspan=1, mode='filter[True]')
parser.add_argument("--edgenorm",action="store_true",help="Edge normalize",default=False, guitype='boolbox', row=2, col=1, rowspan=1, colspan=1, mode='filter[True]')
parser.add_argument("--usefoldername",action="store_true",help="If you have the same image filename in multiple folders, and need to import into the same project, this will prepend the folder name on each image name",default=False,guitype='boolbox',row=2, col=2, rowspan=1, colspan=1, mode="import[False]")
parser.add_argument("--xraypixel",action="store_true",help="Filter X-ray pixels",default=False, guitype='boolbox', row=3, col=0, rowspan=1, colspan=1, mode='filter[True]')
parser.add_argument("--ctfest",action="store_true",help="Estimate defocus from whole micrograph",default=False, guitype='boolbox', row=3, col=1, rowspan=1, colspan=1, mode='filter[True]')
parser.add_argument("--astigmatism",action="store_true",help="Includes astigmatism in automatic fitting",default=False, guitype='boolbox', row=3, col=2, rowspan=1, colspan=1, mode='filter[False]')
parser.add_argument("--moverawdata",action="store_true",help="Move raw data to directory ./raw_micrographs after filtration",default=False)
parser.add_argument("--apix",type=float,help="Angstroms per pixel for all images",default=None, guitype='floatbox', row=5, col=0, rowspan=1, colspan=1, mode="filter['self.pm().getAPIX()']")
parser.add_argument("--voltage",type=float,help="Microscope voltage in KV",default=None, guitype='floatbox', row=5, col=1, rowspan=1, colspan=1, mode="filter['self.pm().getVoltage()']")
parser.add_argument("--cs",type=float,help="Microscope Cs (spherical aberation)",default=None, guitype='floatbox', row=6, col=0, rowspan=1, colspan=1, mode="filter['self.pm().getCS()']")
parser.add_argument("--ac",type=float,help="Amplitude contrast (percentage, default=10)",default=10, guitype='floatbox', row=6, col=1, rowspan=1, colspan=1, mode="filter")
parser.add_argument("--defocusmin",type=float,help="Minimum autofit defocus",default=0.6, guitype='floatbox', row=8, col=0, rowspan=1, colspan=1, mode="filter[0.6]")
parser.add_argument("--defocusmax",type=float,help="Maximum autofit defocus",default=4, guitype='floatbox', row=8, col=1, rowspan=1, colspan=1, mode='filter[4.0]')
parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)
(options, args) = parser.parse_args()
microdir = os.path.join(".","micrographs")
if not os.access(microdir, os.R_OK):
os.mkdir("micrographs")
logid=E2init(sys.argv,options.ppid)
# After filtration we move micrographs to a directory 'raw_micrographs', if desired
if options.moverawdata:
originalsdir = os.path.join(".","raw_micrographs")
if not os.access(originalsdir, os.R_OK):
os.mkdir("raw_micrographs")
for i,arg in enumerate(args):
try:
base = base_name(arg,nodir=not options.usefoldername)
output = os.path.join(os.path.join(".","micrographs"),base+".hdf")
cmd = "e2proc2d.py %s %s --inplace"%(arg,output)
if options.invert: cmd += " --mult=-1"
if options.edgenorm: cmd += " --process=normalize.edgemean"
if options.xraypixel: cmd += " --process=threshold.clampminmax.nsigma:nsigma=4"
launch_childprocess(cmd)
if options.moverawdata:
os.rename(arg,os.path.join(originalsdir,os.path.basename(arg)))
# We estimate the defocus and B-factor (no astigmatism) from the micrograph and store it in info and the header
if options.ctfest :
d=EMData(output,0)
if d["nx"]<1200 or d["ny"]<1200 :
print "CTF estimation will only work with images at least 1200x1200 in size"
sys.exit(1)
import e2ctf
ds=1.0/(options.apix*512)
ffta=None
nbx=0
for x in range(100,d["nx"]-512,512):
for y in range(100,d["ny"]-512,512):
clip=d.get_clip(Region(x,y,512,512))
clip.process_inplace("normalize.edgemean")
fft=clip.do_fft()
fft.ri2inten()
if ffta==None: ffta=fft
else: ffta+=fft
nbx+=1
ffta.mult(1.0/(nbx*512**2))
ffta.process_inplace("math.sqrt")
ffta["is_intensity"]=0 # These 2 steps are done so the 2-D display of the FFT looks better. Things would still work properly in 1-D without it
fftbg=ffta.process("math.nonconvex")
fft1d=ffta.calc_radial_dist(ffta.get_ysize()/2,0.0,1.0,1) # note that this handles the ri2inten averages properly
# Compute 1-D curve and background
bg_1d=e2ctf.low_bg_curve(fft1d,ds)
#initial fit, background adjustment, refine fit, final background adjustment
ctf=e2ctf.ctf_fit(fft1d,bg_1d,bg_1d,ffta,fftbg,options.voltage,options.cs,options.ac,options.apix,1,dfhint=(options.defocusmin,options.defocusmax))
bgAdj(ctf,fft1d)
ctf=e2ctf.ctf_fit(fft1d,ctf.background,ctf.background,ffta,fftbg,options.voltage,options.cs,options.ac,options.apix,1,dfhint=(options.defocusmin,options.defocusmax))
bgAdj(ctf,fft1d)
if options.astigmatism : e2ctf.ctf_fit_stig(ffta,fftbg,ctf)
#ctf.background=bg_1d
#ctf.dsbg=ds
db=js_open_dict(info_name(arg,nodir=not options.usefoldername))
db["ctf_frame"]=[512,ctf,(256,256),set(),5,1]
print info_name(arg,nodir=not options.usefoldername),ctf
E2progress(logid,(float(i)/float(len(args))))
except:
with open('errorlog.txt','w') as e:
e.write('error in image: {0}:\n'.format(str(base)))
traceback.print_exc(None, e)
e.write('')
pass
E2end(logid)
def bgAdj(ctf,fg_1d):
"""Smooths the background based on the values of the foreground near the CTF zeroes and puts the
smoothed background into the CTF object"""
ds=ctf.dsbg
ctf=ctf
bg_1d=list(ctf.background)
xyd=XYData()
# Find the minimum value near the origin, which we'll use as a zero (though it likely should not be)
mv=(fg_1d[1],1)
fz=int(ctf.zero(0)/(ds*2))
for lz in xrange(1,fz):
mv=min(mv,(fg_1d[lz],lz))
xyd.insort(mv[1],mv[0])
# now we add all of the zero locations to our XYData object
for i in xrange(100):
z=int(ctf.zero(i)/ds)
if z>=len(bg_1d)-1: break
if fg_1d[z-1]<fg_1d[z] and fg_1d[z-1]<fg_1d[z+1]: mv=(z-1,fg_1d[z-1])
elif fg_1d[z]<fg_1d[z+1] : mv=(z,fg_1d[z])
else : mv=(z+1,fg_1d[z+1])
xyd.insort(mv[0],mv[1])
# new background is interpolated XYData
ctf.background=[xyd.get_yatx_smooth(i,1) for i in xrange(len(bg_1d))]
# if our first point (between the origin and the first 0) is too high, we readjust it once
bs=[fg_1d[i]-ctf.background[i] for i in xrange(fz)]
if min(bs)<0 :
mv=(bs[0],fg_1d[0],0)
for i in xrange(1,fz): mv=min(mv,(bs[i],fg_1d[i],i))
xyd.set_x(0,mv[2])
xyd.set_y(0,mv[1])
ctf.background=[xyd.get_yatx_smooth(i,1) for i in xrange(len(bg_1d))]
if __name__ == "__main__":
main()
|
gpl-2.0
| -7,468,594,814,304,820,000 | 50.417989 | 307 | 0.71311 | false |
google/skia-buildbot
|
ct/py/csv-django-settings.py
|
1
|
1281
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'html-templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Set an unused dummy value here to bypass django error that shows up when
# this field is missing.
SECRET_KEY = '*lk^6@0l0(iulgar$j)fbvfy&^(^u+qk3j73d18@&+ur^xuTxY'
|
bsd-3-clause
| 8,169,559,527,647,430,000 | 34.583333 | 74 | 0.593286 | false |
micgro42/ReceiptEval
|
receipteval/tests/test_collection_collecting.py
|
1
|
4092
|
# encoding: utf-8
'''
@author: Michael Große <mic.grosse@posteo.de>
@repository: https://github.com/micgro42/ReceiptEval
'''
import pytest
from receipteval.receipt_collection import ReceiptCollection
from receipteval.purchase import Purchase
@pytest.fixture() # Registering this function as a fixture.
def purchase_list_fixture(request):
purchase_list = []
test_purchase1 = Purchase('2015-11-29', 'Bio Company')
test_purchase1.add_item(name='Blanc de Pomm',
price=1.69,
count=1,
category='Zubrot')
test_purchase1.add_item('Seidentofu', 5.18, 2)
purchase_list.append(test_purchase1)
test_purchase2 = Purchase('2014-11-01', 'Bio Company')
test_purchase2.add_item('Risotto', 4.38, 2, category='Kochzutaten')
test_purchase2.add_item('Sesam', 3.69, 1)
test_purchase2.add_item('Cashewbruch', 10.99, 1)
test_purchase2.add_item('Bananen', 1.22, 1, category='Obst')
test_purchase2.add_item('Roggenmehl', 3.98, 2, category='Mehl')
test_purchase2.add_item('Walnusskerne', 3.49, 1)
test_purchase2.add_item('Datteln', 5.29, 1)
test_purchase2.add_item('Safranfäden', 6.29, 1, category='Gewürze')
test_purchase2.add_item('Vanillepulver', 2.49, 1, category='Gewürze')
test_purchase2.add_item('Kakaopulver', 2.19, 1)
test_purchase2.add_item('Basilikum, frisch', 1.99, 1, category='Gewürze')
test_purchase2.add_item('Item without Price', '', 1, category='Mehl')
test_purchase2.add_item('Roggenmehl', 3.98, 2, category='Obst')
purchase_list.append(test_purchase2)
test_purchase3 = Purchase('2015-11-17',
'Übertrag',
payment_method='Giro',
flags='L')
test_purchase3.add_item('Abhebung', 100, 1, category='Aktiva:Portmonaie')
purchase_list.append(test_purchase3)
return purchase_list
def test_collect_unsane_items(purchase_list_fixture):
rc = ReceiptCollection(purchase_list_fixture)
rc.collect_items()
assert rc.unsane_items == ['Item without Price', 'Roggenmehl']
def test_categories(purchase_list_fixture):
receipt_collection = ReceiptCollection(purchase_list_fixture)
receipt_collection.collect_items()
assert sorted(['Zubrot',
'',
'Mehl',
'Kochzutaten',
'Obst',
'Gewürze'
]) == sorted(receipt_collection.categories.keys())
def test_items_in_categories(purchase_list_fixture):
rc = ReceiptCollection(purchase_list_fixture)
rc.collect_items()
assert sorted(rc.categories[''][1]) == sorted(['Kakaopulver',
'Seidentofu',
'Sesam',
'Cashewbruch',
'Datteln',
'Walnusskerne'])
assert sorted(rc.categories['Zubrot'][1]) == sorted(['Blanc de Pomm'])
assert sorted(rc.categories['Gewürze'][1]) == sorted(['Safranfäden',
'Vanillepulver',
'Basilikum, frisch'])
assert sorted(rc.categories['Mehl'][1]) == sorted(['Roggenmehl'])
assert sorted(rc.categories['Kochzutaten'][1]) == sorted(['Risotto'])
assert sorted(rc.categories['Obst'][1]) == sorted(['Bananen',
'Roggenmehl'])
def test_category_prices(purchase_list_fixture):
rc = ReceiptCollection(purchase_list_fixture)
rc.collect_items()
assert round(rc.categories[''][0], 2) == 30.83
assert round(rc.categories['Zubrot'][0], 2) == 1.69
assert round(rc.categories['Mehl'][0], 2) == 3.98
assert round(rc.categories['Kochzutaten'][0], 2) == 4.38
assert round(rc.categories['Obst'][0], 2) == 5.20
assert round(rc.categories['Gewürze'][0], 2) == 10.77
|
gpl-3.0
| 8,633,513,128,408,846,000 | 42.425532 | 79 | 0.570064 | false |
z0rr0/licdb
|
main/key/migrations/0002_auto__add_field_key_net.py
|
1
|
4773
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Key.net'
db.add_column('key_key', 'net', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Key.net'
db.delete_column('key_key', 'net')
models = {
'key.client': {
'Meta': {'ordering': "['student', 'name', 'key__program__name']", 'object_name': 'Client'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['key.Key']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'student': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'key.key': {
'Meta': {'ordering': "['use', 'program__name', 'key']", 'object_name': 'Key'},
'attach': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'manyuse': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'net': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'program': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['key.Program']"}),
'use': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'key.license': {
'Meta': {'ordering': "['name']", 'object_name': 'License'},
'attach': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'free': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'key.program': {
'Meta': {'ordering': "['name']", 'object_name': 'Program'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['key.License']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'use_student': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['key']
|
gpl-3.0
| 2,877,677,137,838,317,000 | 65.291667 | 137 | 0.538655 | false |
paktor132/phoenix_pipeline
|
pipeline.py
|
1
|
6171
|
from __future__ import print_function
from __future__ import unicode_literals
import sys
import logging
import datetime
import dateutil
import uploader
import utilities
import formatter
import postprocess
import oneaday_filter
import result_formatter
import scraper_connection
import to_mongo
from petrarch import petrarch
def main(file_details, server_details, logger_file=None, run_filter=None,
run_date=''):
"""
Main function to run all the things.
Parameters
----------
file_details: Named tuple.
All the other config information not in ``server_details``.
server_details: Named tuple.
Config information specifically related to the remote
server for FTP uploading.
logger_file: String.
Path to a log file. Defaults to ``None`` and opens a
``PHOX_pipeline.log`` file in the current working
directory.
run_filter: String.
Whether to run the ``oneaday_formatter``. Takes True or False
(strings) as values.
run_date: String.
Date of the format YYYYMMDD. The pipeline will run using this
date. If not specified the pipeline will run with
``current_date`` minus one day.
"""
if logger_file:
utilities.init_logger(logger_file)
else:
utilities.init_logger('PHOX_pipeline.log')
# get a local copy for the pipeline
logger = logging.getLogger('pipeline_log')
print('\nPHOX.pipeline run:', datetime.datetime.utcnow())
if run_date:
process_date = dateutil.parser.parse(run_date)
date_string = '{:02d}{:02d}{:02d}'.format(process_date.year,
process_date.month,
process_date.day)
logger.info('Date string: {}'.format(date_string))
print('Date string:', date_string)
else:
process_date = datetime.datetime.utcnow() - datetime.timedelta(days=0)
date_string = '{:02d}{:02d}{:02d}'.format(process_date.year,
process_date.month,
process_date.day)
logger.info('Date string: {}'.format(date_string))
print('Date string:', date_string)
results, scraperfilename, story_collection = scraper_connection.main(process_date,
file_details)
if scraperfilename:
logger.info("Scraper file name: " + scraperfilename)
print("Scraper file name:", scraperfilename)
logger.info("Running Mongo.formatter.py")
print("Running Mongo.formatter.py")
formatted = formatter.main(results, file_details,
process_date, date_string)
logger.info("Running PETRARCH")
file_details.fullfile_stem + date_string
if run_filter == 'False':
print('Running PETRARCH and writing to a file. No one-a-day.')
logger.info('Running PETRARCH and writing to a file. No one-a-day.')
#Command to write output to a file directly from PETR
# petrarch.run_pipeline(formatted,
# '{}{}.txt'.format(file_details.fullfile_stem,
# date_string), parsed=True)
petr_results = petrarch.run_pipeline(formatted, write_output=False,
parsed=True)
elif run_filter == 'True':
print('Running PETRARCH and returning output.')
logger.info('Running PETRARCH and returning output.')
petr_results = petrarch.run_pipeline(formatted, write_output=False,
parsed=True)
else:
print("""Can't run with the options you've specified. You need to fix
something.""")
logger.warning("Can't run with the options you've specified. Exiting.")
sys.exit()
if run_filter == 'True':
logger.info("Running oneaday_formatter.py")
print("Running oneaday_formatter.py")
formatted_results = oneaday_filter.main(petr_results)
else:
logger.info("Running result_formatter.py")
print("Running result_formatter.py")
formatted_results = result_formatter.main(petr_results)
logger.info("Running postprocess.py")
print("Running postprocess.py")
postprocess.main(formatted_results, date_string,
file_details, server_details)
logger.info("Running phox_uploader.py")
print("Running phox_uploader.py")
try:
uploader.main(date_string, server_details, file_details)
except Exception as e:
logger.warning("Error on the upload portion. {}".format(e))
print("""Error on the uploader. This step isn't absolutely necessary.
Valid events should still be generated.""")
logger.info("Updating story collection to set phoenix = 1")
print("Updating story collection to set phoenix = 1")
#here's a stab at the update part; needs to be tested
for i, story in enumerate(list(results)):
story_collection.update({"_id": story['_id']},{"$set": {"phoenix": 1}})
logger.info('PHOX.pipeline end')
print('PHOX.pipeline end:', datetime.datetime.utcnow())
if __name__ == '__main__':
# initialize the various utilities globals
server_details, file_details = utilities.parse_config('PHOX_config.ini')
while 1:
main(file_details, server_details, file_details.log_file, run_filter=file_details.oneaday_filter)
to_mongo.main()
yesterday_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
yesterday_date_string = '{:02d}{:02d}{:02d}'.format(yesterday_date.year, yesterday_date.month, yesterday_date.day)
main(file_details, server_details, file_details.log_file, run_filter=file_details.oneaday_filter, run_date = yesterday_date_string)
to_mongo.main()
#may want to re-run main on one day previous in case events that show up in the database just before midnight are not caught
|
mit
| -7,422,046,759,884,744,000 | 40.416107 | 140 | 0.609626 | false |
przybys/poker-planning
|
poker/models.py
|
1
|
9099
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import json
import datetime
from jinja2.utils import urlize
from google.appengine.ext import db
from poker.firebase import send_firebase_message
__all__ = [
'Game',
'Participant',
'Story',
'Round',
'Estimate',
]
class Game(db.Model):
DECK_CHOICES = (
(1 , ('1', '2', '3', '5', '8', '13', '21', '100', '?', 'Coffee')),
(2 , ('0', '1/2' , '1', '2', '3', '5', '8', '13', '20', '40', '60', '100', '?', 'oo')),
(3 , ('0', '1', '2', '3', '5', '8', '13', '21', '44', '?', 'oo')),
)
name = db.StringProperty(required = True)
deck = db.IntegerProperty(required = True, choices = [deck[0] for deck in DECK_CHOICES])
completed = db.BooleanProperty(default = False)
user = db.UserProperty(required = True)
current_story_id = db.IntegerProperty()
created = db.DateTimeProperty(auto_now_add = True)
def get_deck(self):
for deck in self.DECK_CHOICES:
if self.deck == deck[0]:
return deck[1]
return ()
def get_participants(self):
return Participant.all().ancestor(self).order("created")
def get_stories(self):
return Story.all().ancestor(self).order("created")
def get_url(self):
game_url = '/game/' + str(self.key().id())
return game_url
def get_current_story(self):
if not self.current_story_id:
return None
return Story.get_by_id(self.current_story_id, self)
def get_participant_messages(self):
messages = []
for participant in self.get_participants():
message = participant.get_message()
messages.append(message)
return messages
def get_story_messages(self):
messages = []
for story in self.get_stories():
message = story.get_message()
messages.append(message)
return messages
def get_current_story_message(self):
current_story = self.get_current_story()
if not current_story:
return None
return current_story.get_message()
def get_message(self):
message = {
'id': self.key().id(),
'name': self.name,
'deck': self.get_deck(),
'completed': self.completed,
'user': self.user.user_id(),
'current_story': self.get_current_story_message(),
'url': self.get_url(),
'participants': self.get_participant_messages(),
'stories': self.get_story_messages(),
}
return message
def send_update(self, force = True, user = None):
message = self.get_message()
message = json.dumps(message)
participants = self.get_participants()
for participant in participants:
if user and participant.user == user:
force = True
participant.send_update(message, force)
def get_user_estimates(self, user):
estimates = {}
if not user:
return estimates
for story in self.get_stories():
for round in story.get_rounds():
estimate = round.get_estimate(user)
round_id = round.key().id()
if estimate:
card = estimate.card
estimates[round_id] = card
return estimates
def delete(self, **kwargs):
participants = self.get_participants()
for participant in participants:
participant.send_update(None, True)
db.delete(Participant.all(keys_only = True).ancestor(self))
stories = self.get_stories()
for story in stories:
story.delete()
super(Game, self).delete(**kwargs)
class Participant(db.Model):
user = db.UserProperty(required = True)
name = db.StringProperty()
photo = db.StringProperty()
created = db.DateTimeProperty(auto_now_add = True)
observer = db.BooleanProperty(required = True, default = False)
last_update = db.DateTimeProperty(auto_now_add = True)
def get_url(self):
game_url = self.parent().get_url()
participant_url = game_url + '/participant/' + self.key().name()
return participant_url
def get_name(self):
if self.name:
return self.name
else:
return self.user.nickname()
def get_message(self):
message = {
'user': self.user.user_id(),
'name': self.get_name(),
'photo': self.photo,
'observer': self.observer,
'url': self.get_url()
}
return message
def send_update(self, message, force):
if force or self.need_update():
self.last_update = datetime.datetime.now()
self.put()
send_firebase_message(self.key().name(), message)
def need_update(self):
return datetime.datetime.now() - self.last_update > datetime.timedelta(seconds = 1)
class Story(db.Model):
SKIPPED = -1
name = db.StringProperty(required = True)
estimate = db.IntegerProperty()
created = db.DateTimeProperty(auto_now_add = True)
def get_rounds(self):
return Round.all().ancestor(self).order("created")
def get_estimate(self):
game = self.parent()
deck = game.get_deck()
card = self.estimate
if card == self.SKIPPED:
return card
if card is None:
return None
try:
estimate = deck[card]
except IndexError:
return None
return estimate
def get_name_display(self):
return urlize(self.name, 80)
def get_url(self):
game_url = self.parent().get_url()
story_url = game_url + '/story/' + str(self.key().id())
return story_url
def is_current(self):
game = self.parent()
is_current = game.current_story_id == self.key().id()
return is_current
def new_round(self):
rounds = self.get_rounds()
for round in rounds:
round.completed = True
round.put()
round = Round(
parent = self
)
round.put()
self.estimate = None
self.put()
return round
def get_round_messages(self):
messages = []
if not self.is_current():
return messages
for round in self.get_rounds():
message = round.get_message()
messages.append(message)
return messages
def get_message(self):
message = {
'id': self.key().id(),
'name': self.get_name_display(),
'estimate': self.get_estimate(),
'url': self.get_url(),
'is_current': self.is_current(),
'rounds': self.get_round_messages(),
}
return message
def delete(self, **kwargs):
rounds = self.get_rounds()
for round in rounds:
round.delete()
super(Story, self).delete(**kwargs)
class Round(db.Model):
completed = db.BooleanProperty(default = False)
created = db.DateTimeProperty(auto_now_add = True)
def get_estimates(self):
return Estimate.all().ancestor(self).order("created")
def get_url(self):
story_url = self.parent().get_url()
round_url = story_url + '/round/' + str(self.key().id())
return round_url
def get_estimate(self, user):
if not user:
return None
estimate_key = str(self.key().id()) + str(user.user_id())
estimate = Estimate.get_by_key_name(estimate_key, self)
return estimate
def get_estimate_messages(self):
messages = []
for estimate in self.get_estimates():
message = estimate.get_message()
messages.append(message)
return messages
def get_message(self):
message = {
'id': self.key().id(),
'completed': self.completed,
'url': self.get_url(),
'estimates': self.get_estimate_messages(),
}
return message
def delete(self, **kwargs):
db.delete(Estimate.all(keys_only = True).ancestor(self))
super(Round, self).delete(**kwargs)
class Estimate(db.Model):
user = db.UserProperty(required = True)
card = db.IntegerProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
def get_message(self):
message = {
'user': self.user.user_id(),
'name': self.user.nickname(),
'card': self.get_card(),
}
return message
def get_card(self):
round = self.parent()
if not round.completed:
return None
story = round.parent()
game = story.parent()
deck = game.get_deck()
card = self.card
try:
estimate = deck[card]
except IndexError:
return None
return estimate
|
mit
| 1,969,652,898,089,457,400 | 29.533557 | 95 | 0.544455 | false |
chris48s/UK-Polling-Stations
|
polling_stations/apps/uk_geo_utils/management/commands/import_onspd.py
|
1
|
2103
|
import os
import glob
from django.db import connection
from django.core.management.base import BaseCommand
from uk_geo_utils.helpers import get_onspd_model
class Command(BaseCommand):
"""
To import ONSPD, grab the latest release:
https://ons.maps.arcgis.com/home/search.html?t=content&q=ONS%20Postcode%20Directory
and run
python manage.py import_onspd /path/to/data
"""
def add_arguments(self, parser):
parser.add_argument(
'path',
help='Path to the directory containing the ONSPD CSVs'
)
def handle(self, *args, **kwargs):
self.table_name = get_onspd_model()._meta.db_table
cursor = connection.cursor()
self.stdout.write("clearing existing data..")
cursor.execute("TRUNCATE TABLE %s;" % (self.table_name))
glob_str = os.path.join(kwargs['path'], "*.csv")
self.stdout.write("importing from files..")
for f in glob.glob(glob_str):
self.stdout.write(f)
fp = open(f, 'r')
cursor.copy_expert("""
COPY %s (
pcd, pcd2, pcds, dointr, doterm, oscty, oslaua, osward,
usertype, oseast1m, osnrth1m, osgrdind, oshlthau, hro, ctry,
gor, streg, pcon, eer, teclec, ttwa, pct, nuts, psed, cened,
edind, oshaprev, lea, oldha, wardc91, wardo91, ward98,
statsward, oa01, casward, park, lsoa01, msoa01, ur01ind, oac01,
oldpct, oa11, lsoa11, msoa11, parish, wz11, ccg, bua11,
buasd11, ru11ind, oac11, lat, long, lep1, lep2, pfa, imd
) FROM STDIN (FORMAT CSV, DELIMITER ',', quote '"', HEADER);
""" % (self.table_name), fp)
# turn text lng/lat into a Point() field
cursor.execute("""
UPDATE %s SET location=CASE
WHEN ("long"='0.000000' AND lat='99.999999')
THEN NULL
ELSE ST_GeomFromText('POINT(' || "long" || ' ' || lat || ')',4326)
END
""" % (self.table_name))
self.stdout.write("...done")
|
bsd-3-clause
| 7,411,854,068,189,791,000 | 36.553571 | 87 | 0.566809 | false |
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/subplots_axes_and_figures/custom_figure_class.py
|
1
|
1517
|
"""
===================
Custom Figure Class
===================
You can pass a custom Figure constructor to figure if you want to derive from
the default Figure. This simple example creates a figure with a figure title.
"""
import matplotlib.pyplot as plt #import figure, show
from matplotlib.figure import Figure
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
class MyFigure(Figure):
def __init__(self, *args, **kwargs):
"""
custom kwarg figtitle is a figure title
"""
figtitle = kwargs.pop('figtitle', 'hi mom')
Figure.__init__(self, *args, **kwargs)
self.text(0.5, 0.95, figtitle, ha='center')
fig = plt.figure(FigureClass=MyFigure, figtitle='my title')
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
pltshow(plt)
|
mit
| 8,111,102,881,483,984,000 | 24.711864 | 82 | 0.58998 | false |
hanteng/babel
|
scripts/geoname_cldr.py
|
1
|
2479
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# url_target="https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv"
import csv
import pandas as pd
import codecs
def export_to_csv(df, ex_filename, sep=','):
if sep==',':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_ALL, na_rep='{na}', encoding='utf-8') #+'.csv'
if sep=='\t':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_NONE, na_rep='{na}', encoding='utf-8') #+'.tsv' , escapechar="'", quotechar=""
def import_from_babel_cldr():
from babel import Locale
#staring from the en-US to retrieve keys
locale = Locale('en', 'US')
completelist_territories = locale.territories.keys()
completelist_languages = locale.languages.keys()
#intiate the output dataframe from this
df_cldr=pd.DataFrame.from_dict(locale.territories, orient="index")
df_cldr.index.name='geocode'
df_cldr.columns = ['name_en']
df_cldr.sort_index(inplace=True)
for i_lang in completelist_languages:
#print(i_lang)
try:
locale = Locale.parse(i_lang)
df=pd.DataFrame.from_dict(locale.territories, orient="index")
df.columns = ['name_{0}'.format(i_lang)]
df.sort_index(inplace=True)
df_cldr=df_cldr.join(df)
except:
pass
return df_cldr
###################### MAIN ########################
import os
path_script=os.path.dirname(os.path.abspath(__file__))
#print path_script
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Fetch and generate the country and territory names in languages that are supported by the Unicode CLDR 25.""")
parser.add_argument("-o", "--output", dest="outputpath", default="geoname_CLDR25_babel.csv",
help="write data to a csv file or a tsv file", metavar="OUTPUTPATH")
args = parser.parse_args()
fn = args.outputpath
#print fn
df_cldr=import_from_babel_cldr()
if fn[-3:]=='csv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep=',')
elif fn[-3:]=='tsv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep='\t')
else:
print ("Only csv and tsv formats can be generated. Sorry.")
|
bsd-3-clause
| 7,863,930,118,899,022,000 | 34.347826 | 162 | 0.613366 | false |
kaushik94/sympy
|
sympy/benchmarks/bench_symbench.py
|
1
|
2832
|
#!/usr/bin/env python
from __future__ import print_function, division
from sympy.core.compatibility import range
from random import random
from sympy import factor, I, Integer, pi, simplify, sin, sqrt, Symbol, sympify
from sympy.abc import x, y, z
from timeit import default_timer as clock
def bench_R1():
"real(f(f(f(f(f(f(f(f(f(f(i/2)))))))))))"
def f(z):
return sqrt(Integer(1)/3)*z**2 + I/3
f(f(f(f(f(f(f(f(f(f(I/2)))))))))).as_real_imag()[0]
def bench_R2():
"Hermite polynomial hermite(15, y)"
def hermite(n, y):
if n == 1:
return 2*y
if n == 0:
return 1
return (2*y*hermite(n - 1, y) - 2*(n - 1)*hermite(n - 2, y)).expand()
hermite(15, y)
def bench_R3():
"a = [bool(f==f) for _ in range(10)]"
f = x + y + z
[bool(f == f) for _ in range(10)]
def bench_R4():
# we don't have Tuples
pass
def bench_R5():
"blowup(L, 8); L=uniq(L)"
def blowup(L, n):
for i in range(n):
L.append( (L[i] + L[i + 1]) * L[i + 2] )
def uniq(x):
v = set(x)
return v
L = [x, y, z]
blowup(L, 8)
L = uniq(L)
def bench_R6():
"sum(simplify((x+sin(i))/x+(x-sin(i))/x) for i in range(100))"
sum(simplify((x + sin(i))/x + (x - sin(i))/x) for i in range(100))
def bench_R7():
"[f.subs(x, random()) for _ in range(10**4)]"
f = x**24 + 34*x**12 + 45*x**3 + 9*x**18 + 34*x**10 + 32*x**21
[f.subs(x, random()) for _ in range(10**4)]
def bench_R8():
"right(x^2,0,5,10^4)"
def right(f, a, b, n):
a = sympify(a)
b = sympify(b)
n = sympify(n)
x = f.atoms(Symbol).pop()
Deltax = (b - a)/n
c = a
est = 0
for i in range(n):
c += Deltax
est += f.subs(x, c)
return est*Deltax
right(x**2, 0, 5, 10**4)
def _bench_R9():
"factor(x^20 - pi^5*y^20)"
factor(x**20 - pi**5*y**20)
def bench_R10():
"v = [-pi,-pi+1/10..,pi]"
def srange(min, max, step):
v = [min]
while (max - v[-1]).evalf() > 0:
v.append(v[-1] + step)
return v[:-1]
srange(-pi, pi, sympify(1)/10)
def bench_R11():
"a = [random() + random()*I for w in [0..1000]]"
[random() + random()*I for w in range(1000)]
def bench_S1():
"e=(x+y+z+1)**7;f=e*(e+1);f.expand()"
e = (x + y + z + 1)**7
f = e*(e + 1)
f.expand()
if __name__ == '__main__':
benchmarks = [
bench_R1,
bench_R2,
bench_R3,
bench_R5,
bench_R6,
bench_R7,
bench_R8,
#_bench_R9,
bench_R10,
bench_R11,
#bench_S1,
]
report = []
for b in benchmarks:
t = clock()
b()
t = clock() - t
print("%s%65s: %f" % (b.__name__, b.__doc__, t))
|
bsd-3-clause
| -4,173,381,737,773,035,500 | 20.618321 | 78 | 0.46822 | false |
HewlettPackard/oneview-ansible
|
library/oneview_appliance_device_read_community_facts.py
|
1
|
2333
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_appliance_device_read_community_facts
short_description: Retrieve the facts about the OneView appliance device read community.
description:
- Retrieve the facts about the OneView appliance device read community.
version_added: "2.5"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 4.8.0"
author:
"Gianluca Zecchi (@gzecchi)"
extends_documentation_fragment:
- oneview
'''
EXAMPLES = '''
- name: Gather facts about the Appliance snmp configuration
oneview_appliance_device_read_community_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
- debug:
var: appliance_device_read_community
'''
RETURN = '''
appliance_device_read_community:
description: Has all the OneView facts about the OneView appliance device read community.
returned: Always.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class ApplianceDeviceReadCommunityFactsModule(OneViewModuleBase):
def __init__(self):
super(ApplianceDeviceReadCommunityFactsModule, self).__init__(additional_arg_spec=dict())
def execute_module(self):
appliance_device_read_community = self.oneview_client.appliance_device_read_community.get()
return dict(changed=False,
ansible_facts=dict(appliance_device_read_community=appliance_device_read_community))
def main():
ApplianceDeviceReadCommunityFactsModule().run()
if __name__ == '__main__':
main()
|
apache-2.0
| 3,321,506,982,458,706,400 | 29.697368 | 104 | 0.712816 | false |
gonadarian/kagen
|
kagen/youtube.py
|
1
|
2932
|
import os
import csv
import json
import pymongo
from kagen import utils
from kagen.utils import config
from datetime import datetime
logger = utils.get_logger("youtube")
def work():
dtf = "%Y-%m-%dT%H:%M:%S.%fZ"
yt_key = config["keys"]["youtube_key"]
yt_user = config["run"]["youtube_user"]
youtube = utils.get_conn_youtube()
db = utils.get_conn_mongo()
query_base = "/youtube/v3/channels?part=id&forUsername={}&maxResults=50&key={}"
query = query_base.format(yt_user, yt_key)
doc = utils.get_response_json(youtube, query)
chid = doc["items"][0]["id"]
logger.info("Channel ID: {}".format(chid))
playlists = []
query_base = "/youtube/v3/playlists?part=snippet&channelId={}&maxResults=50&key={}"
query = query_base.format(chid, yt_key)
doc = utils.get_response_json(youtube, query)
playlists.extend(doc["items"])
logger.info("Playlist count: {}".format(len(playlists)))
query_base = "/youtube/v3/playlistItems?part=contentDetails&playlistId={}&maxResults=50&key={}"
for playlist in playlists:
plid = playlist["id"]
query = query_base.format(plid, yt_key)
doc = utils.get_response_json(youtube, query)
playlist["items"] = doc["items"]
for playlist in playlists:
playlist["_id"] = playlist["id"]
playlist["etag"] = playlist["etag"].strip("\"")
playlist.update(playlist["snippet"])
del(playlist["snippet"])
playlist["publishedAt"] = datetime.strptime(playlist["publishedAt"], dtf)
for item in playlist["items"]:
item["ytid"] = item["contentDetails"]["videoId"]
del(item["contentDetails"])
db.youtube_playlists.drop()
db.youtube_playlists.insert(playlists)
videos = []
ytids = []
for playlist in playlists:
message = "\tPlaylist '{}' count: {}"
logger.info(message.format(playlist["_id"], len(playlist["items"])))
for item in playlist["items"]:
ytid = item["ytid"]
query = "/youtube/v3/videos?part=snippet&id={}&maxResults=50&key={}"
query = query.format(ytid, yt_key)
doc = utils.get_response_json(youtube, query)
for video in doc["items"]:
if ytid not in ytids:
videos.append(video)
ytids.append(ytid)
else:
logger.warn("\t\tDuplicate video ID: {}".format(ytid))
for video in videos:
video["_id"] = video["id"]
video["etag"] = video["etag"].strip("\"")
video.update(video["snippet"])
del(video["snippet"])
video["publishedAt"] = datetime.strptime(video["publishedAt"], dtf)
video["categoryId"] = int(video["categoryId"])
db.youtube_videos.drop()
db.youtube_videos.insert(videos)
@utils.entry_point
def main():
logger.info("START youtube")
work()
logger.info("DONE youtube")
|
mit
| 4,931,035,741,735,050,000 | 33.093023 | 99 | 0.601978 | false |
lebrice/SimpleParsing
|
test/nesting/example_use_cases.py
|
1
|
4329
|
from dataclasses import dataclass, field
from typing import *
from . import TestSetup
__all__ = [
"HParams",
"RunConfig",
"TrainConfig",
"TaskHyperParameters",
"HyperParameters",
]
@dataclass
class HParams(TestSetup):
"""
Model Hyper-parameters
"""
# Number of examples per batch
batch_size: int = 32
# fixed learning rate passed to the optimizer.
learning_rate: float = 0.005
# name of the optimizer class to use
optimizer: str = "ADAM"
default_num_layers: ClassVar[int] = 10
# number of layers.
num_layers: int = default_num_layers
# the number of neurons at each layer
neurons_per_layer: List[int] = field(default_factory=lambda: [128] * HParams.default_num_layers)
@dataclass
class RunConfig(TestSetup):
"""
Group of settings used during a training or validation run.
"""
# the set of hyperparameters for this run.
hparams: HParams = HParams()
log_dir: str = "logs" # The logging directory where
checkpoint_dir: str = field(init=False)
def __post_init__(self):
"""Post-Init to set the fields that shouldn't be constructor arguments."""
import os
self.checkpoint_dir = os.path.join(self.log_dir, "checkpoints")
@dataclass
class TrainConfig(TestSetup):
"""
Top-level settings for multiple runs.
"""
# run config to be used during training
train: RunConfig = RunConfig(log_dir="train")
# run config to be used during validation.
valid: RunConfig = RunConfig(log_dir="valid")
@dataclass
class TaskHyperParameters(TestSetup):
"""
HyperParameters for a task-specific model
"""
# name of the task
name: str
# number of dense layers
num_layers: int = 1
# units per layer
num_units: int = 8
# activation function
activation: str = "tanh"
# wether or not to use batch normalization after each dense layer
use_batchnorm: bool = False
# wether or not to use dropout after each dense layer
use_dropout: bool = True
# the dropout rate
dropout_rate: float = 0.1
# wether or not image features should be used as input
use_image_features: bool = True
# wether or not 'likes' features should be used as input
use_likes: bool = True
# L1 regularization coefficient
l1_reg: float = 0.005
# L2 regularization coefficient
l2_reg: float = 0.005
# Wether or not a task-specific Embedding layer should be used on the 'likes' features.
# When set to 'True', it is expected that there no shared embedding is used.
embed_likes: bool = False
@dataclass
class HyperParameters(TestSetup):
"""Hyperparameters of our model."""
# the batch size
batch_size: int = 128
# Which optimizer to use during training.
optimizer: str = "sgd"
# Learning Rate
learning_rate: float = 0.001
# number of individual 'pages' that were kept during preprocessing of the 'likes'.
# This corresponds to the number of entries in the multi-hot like vector.
num_like_pages: int = 10_000
gender_loss_weight: float = 1.0
age_loss_weight: float = 1.0
num_text_features: ClassVar[int] = 91
num_image_features: ClassVar[int] = 65
max_number_of_likes: int = 2000
embedding_dim: int = 8
shared_likes_embedding: bool = True
# Wether or not to use Rémi's better kept like pages
use_custom_likes: bool = True
# Gender model settings:
gender: TaskHyperParameters = TaskHyperParameters(
"gender",
num_layers=1,
num_units=32,
use_batchnorm=False,
use_dropout=True,
dropout_rate=0.1,
use_image_features=True,
use_likes=True,
)
# Age Group Model settings:
age_group: TaskHyperParameters = TaskHyperParameters(
"age_group",
num_layers=2,
num_units=64,
use_batchnorm=False,
use_dropout=True,
dropout_rate=0.1,
use_image_features=True,
use_likes=True,
)
# Personality Model(s) settings:
personality: TaskHyperParameters = TaskHyperParameters(
"personality",
num_layers=1,
num_units=8,
use_batchnorm=False,
use_dropout=True,
dropout_rate=0.1,
use_image_features=False,
use_likes=False,
)
|
mit
| -6,797,513,807,726,035,000 | 27.287582 | 100 | 0.644871 | false |
tombstone/models
|
research/object_detection/core/freezable_batch_norm.py
|
1
|
2992
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A freezable batch norm layer that uses Keras batch normalization."""
import tensorflow.compat.v1 as tf
class FreezableBatchNorm(tf.keras.layers.BatchNormalization):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
This is a `freezable` batch norm layer that supports setting the `training`
parameter in the __init__ method rather than having to set it either via
the Keras learning phase or via the `call` method parameter. This layer will
forward all other parameters to the default Keras `BatchNormalization`
layer
This is class is necessary because Object Detection model training sometimes
requires batch normalization layers to be `frozen` and used as if it was
evaluation time, despite still training (and potentially using dropout layers)
Like the default Keras BatchNormalization layer, this will normalize the
activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Arguments:
training: If False, the layer will normalize using the moving average and
std. dev, without updating the learned avg and std. dev.
If None or True, the layer will follow the keras BatchNormalization layer
strategy of checking the Keras learning phase at `call` time to decide
what to do.
**kwargs: The keyword arguments to forward to the keras BatchNormalization
layer constructor.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, training=None, **kwargs):
super(FreezableBatchNorm, self).__init__(**kwargs)
self._training = training
def call(self, inputs, training=None):
# Override the call arg only if the batchnorm is frozen. (Ignore None)
if self._training is False: # pylint: disable=g-bool-id-comparison
training = self._training
return super(FreezableBatchNorm, self).call(inputs, training=training)
|
apache-2.0
| 968,853,710,624,408,300 | 43 | 80 | 0.726604 | false |
racemidev/RegAdminForLinux
|
python/rregadmin/hive/cell_wrapper.py
|
1
|
11246
|
# generated by 'xml2py'
# flags '-c -d -v -k defst -lrregadmin -m rregadmin.util.glib_wrapper -m rregadmin.util.icu_wrapper -m rregadmin.util.path_wrapper -m rregadmin.util.icu_wrapper -m rregadmin.util.path_info_wrapper -m rregadmin.util.ustring_wrapper -m rregadmin.util.offset_wrapper -m rregadmin.util.value_wrapper -m rregadmin.util.ustring_list_wrapper -m rregadmin.hive.types_wrapper -r ^cell_.* -r ^CellType$ -r ^CellMark$ -r ^CellId$ -o cell_wrapper.py cell_wrapper.xml'
from ctypes import *
from rregadmin.hive.types_wrapper import Cell
from rregadmin.hive.types_wrapper import Hive
from rregadmin.util.offset_wrapper import offset
_libraries = {}
_libraries['librregadmin.so.1'] = CDLL('librregadmin.so.1')
from rregadmin.util.ustring_list_wrapper import gboolean
from rregadmin.hive.types_wrapper import Bin
from rregadmin.hive.types_wrapper import guint32
from rregadmin.util.value_wrapper import guint8
STRING = c_char_p
from rregadmin.util.ustring_list_wrapper import ustring
# values for enumeration 'CellType'
CELL_TYPE_KEY = 1
CELL_TYPE_VALUE_KEY = 2
CELL_TYPE_KEY_LIST = 3
CELL_TYPE_VALUE_LIST = 4
CELL_TYPE_SECURITY = 5
CELL_TYPE_UNKNOWN = 20
CELL_TYPE_EMPTY = 21
CellType = c_int # enum
# ../../../rregadmin/hive/cell.h 94
cell_alloc = _libraries['librregadmin.so.1'].cell_alloc
cell_alloc.restype = POINTER(Cell)
# cell_alloc(hdesc, ofs, size)
cell_alloc.argtypes = [POINTER(Hive), offset, c_int]
cell_alloc.__doc__ = \
"""Cell * cell_alloc(Hive * hdesc, offset ofs, int size)
../../../rregadmin/hive/cell.h:94"""
# ../../../rregadmin/hive/cell.h 107
cell_unalloc = _libraries['librregadmin.so.1'].cell_unalloc
cell_unalloc.restype = gboolean
# cell_unalloc(in_cell)
cell_unalloc.argtypes = [POINTER(Cell)]
cell_unalloc.__doc__ = \
"""gboolean cell_unalloc(Cell * in_cell)
../../../rregadmin/hive/cell.h:107"""
# ../../../rregadmin/hive/cell.h 114
cell_init = _libraries['librregadmin.so.1'].cell_init
cell_init.restype = gboolean
# cell_init(hdesc, bin, data, data_len)
cell_init.argtypes = [POINTER(Hive), POINTER(Bin), offset, guint32]
cell_init.__doc__ = \
"""gboolean cell_init(Hive * hdesc, Bin * bin, offset data, guint32 data_len)
../../../rregadmin/hive/cell.h:114"""
# ../../../rregadmin/hive/cell.h 120
cell_get = _libraries['librregadmin.so.1'].cell_get
cell_get.restype = POINTER(Cell)
# cell_get(hdesc, in_bin, ptr)
cell_get.argtypes = [POINTER(Hive), POINTER(Bin), offset]
cell_get.__doc__ = \
"""Cell * cell_get(Hive * hdesc, Bin * in_bin, offset ptr)
../../../rregadmin/hive/cell.h:120"""
# ../../../rregadmin/hive/cell.h 128
cell_delete = _libraries['librregadmin.so.1'].cell_delete
cell_delete.restype = None
# cell_delete(in_cell)
cell_delete.argtypes = [POINTER(Cell)]
cell_delete.__doc__ = \
"""void cell_delete(Cell * in_cell)
../../../rregadmin/hive/cell.h:128"""
# ../../../rregadmin/hive/cell.h 134
cell_get_data_length = _libraries['librregadmin.so.1'].cell_get_data_length
cell_get_data_length.restype = guint32
# cell_get_data_length(in_cell)
cell_get_data_length.argtypes = [POINTER(Cell)]
cell_get_data_length.__doc__ = \
"""guint32 cell_get_data_length(Cell * in_cell)
../../../rregadmin/hive/cell.h:134"""
# ../../../rregadmin/hive/cell.h 140
cell_get_data = _libraries['librregadmin.so.1'].cell_get_data
cell_get_data.restype = POINTER(guint8)
# cell_get_data(in_cell)
cell_get_data.argtypes = [POINTER(Cell)]
cell_get_data.__doc__ = \
"""guint8 * cell_get_data(Cell * in_cell)
../../../rregadmin/hive/cell.h:140"""
# ../../../rregadmin/hive/cell.h 149
cell_clear = _libraries['librregadmin.so.1'].cell_clear
cell_clear.restype = gboolean
# cell_clear(in_cell)
cell_clear.argtypes = [POINTER(Cell)]
cell_clear.__doc__ = \
"""gboolean cell_clear(Cell * in_cell)
../../../rregadmin/hive/cell.h:149"""
# ../../../rregadmin/hive/cell.h 155
cell_get_hive = _libraries['librregadmin.so.1'].cell_get_hive
cell_get_hive.restype = POINTER(Hive)
# cell_get_hive(in_cell)
cell_get_hive.argtypes = [POINTER(Cell)]
cell_get_hive.__doc__ = \
"""Hive * cell_get_hive(Cell * in_cell)
../../../rregadmin/hive/cell.h:155"""
# ../../../rregadmin/hive/cell.h 161
cell_get_offset = _libraries['librregadmin.so.1'].cell_get_offset
cell_get_offset.restype = offset
# cell_get_offset(in_cell)
cell_get_offset.argtypes = [POINTER(Cell)]
cell_get_offset.__doc__ = \
"""offset cell_get_offset(Cell * in_cell)
../../../rregadmin/hive/cell.h:161"""
# ../../../rregadmin/hive/cell.h 167
cell_get_bin = _libraries['librregadmin.so.1'].cell_get_bin
cell_get_bin.restype = POINTER(Bin)
# cell_get_bin(in_cell)
cell_get_bin.argtypes = [POINTER(Cell)]
cell_get_bin.__doc__ = \
"""Bin * cell_get_bin(Cell * in_cell)
../../../rregadmin/hive/cell.h:167"""
# ../../../rregadmin/hive/cell.h 175
cell_equal = _libraries['librregadmin.so.1'].cell_equal
cell_equal.restype = gboolean
# cell_equal(in_cella, in_cellb)
cell_equal.argtypes = [POINTER(Cell), POINTER(Cell)]
cell_equal.__doc__ = \
"""gboolean cell_equal(unknown * in_cella, unknown * in_cellb)
../../../rregadmin/hive/cell.h:175"""
# ../../../rregadmin/hive/cell.h 181
cell_is_valid = _libraries['librregadmin.so.1'].cell_is_valid
cell_is_valid.restype = gboolean
# cell_is_valid(in_cell)
cell_is_valid.argtypes = [POINTER(Cell)]
cell_is_valid.__doc__ = \
"""gboolean cell_is_valid(Cell * in_cell)
../../../rregadmin/hive/cell.h:181"""
# values for enumeration 'CellID'
ID_SK_KEY = 27507
ID_LF_KEY = 26220
ID_LH_KEY = 26732
ID_LI_KEY = 26988
ID_RI_KEY = 26994
ID_VK_KEY = 27510
ID_NK_KEY = 27502
CellID = c_int # enum
# ../../../rregadmin/hive/cell.h 189
cell_get_id = _libraries['librregadmin.so.1'].cell_get_id
cell_get_id.restype = CellID
# cell_get_id(in_cell)
cell_get_id.argtypes = [POINTER(Cell)]
cell_get_id.__doc__ = \
"""CellID cell_get_id(Cell * in_cell)
../../../rregadmin/hive/cell.h:189"""
# ../../../rregadmin/hive/cell.h 197
cell_get_type = _libraries['librregadmin.so.1'].cell_get_type
cell_get_type.restype = CellType
# cell_get_type(in_cell)
cell_get_type.argtypes = [POINTER(Cell)]
cell_get_type.__doc__ = \
"""CellType cell_get_type(Cell * in_cell)
../../../rregadmin/hive/cell.h:197"""
# ../../../rregadmin/hive/cell.h 203
cell_get_id_str = _libraries['librregadmin.so.1'].cell_get_id_str
cell_get_id_str.restype = STRING
# cell_get_id_str(in_cell)
cell_get_id_str.argtypes = [POINTER(Cell)]
cell_get_id_str.__doc__ = \
"""unknown * cell_get_id_str(Cell * in_cell)
../../../rregadmin/hive/cell.h:203"""
# ../../../rregadmin/hive/cell.h 211
cell_size = _libraries['librregadmin.so.1'].cell_size
cell_size.restype = guint32
# cell_size(in_cell)
cell_size.argtypes = [POINTER(Cell)]
cell_size.__doc__ = \
"""guint32 cell_size(Cell * in_cell)
../../../rregadmin/hive/cell.h:211"""
# ../../../rregadmin/hive/cell.h 217
cell_is_allocd = _libraries['librregadmin.so.1'].cell_is_allocd
cell_is_allocd.restype = gboolean
# cell_is_allocd(in_cell)
cell_is_allocd.argtypes = [POINTER(Cell)]
cell_is_allocd.__doc__ = \
"""gboolean cell_is_allocd(Cell * in_cell)
../../../rregadmin/hive/cell.h:217"""
# ../../../rregadmin/hive/cell.h 225
cell_get_next = _libraries['librregadmin.so.1'].cell_get_next
cell_get_next.restype = POINTER(Cell)
# cell_get_next(in_cell)
cell_get_next.argtypes = [POINTER(Cell)]
cell_get_next.__doc__ = \
"""Cell * cell_get_next(Cell * in_cell)
../../../rregadmin/hive/cell.h:225"""
# ../../../rregadmin/hive/cell.h 233
cell_get_prev = _libraries['librregadmin.so.1'].cell_get_prev
cell_get_prev.restype = POINTER(Cell)
# cell_get_prev(in_cell)
cell_get_prev.argtypes = [POINTER(Cell)]
cell_get_prev.__doc__ = \
"""Cell * cell_get_prev(Cell * in_cell)
../../../rregadmin/hive/cell.h:233"""
# ../../../rregadmin/hive/cell.h 241
cell_debug_print = _libraries['librregadmin.so.1'].cell_debug_print
cell_debug_print.restype = None
# cell_debug_print(in_cell)
cell_debug_print.argtypes = [POINTER(Cell)]
cell_debug_print.__doc__ = \
"""void cell_debug_print(Cell * in_cell)
../../../rregadmin/hive/cell.h:241"""
# ../../../rregadmin/hive/cell.h 249
cell_debug_print_long = _libraries['librregadmin.so.1'].cell_debug_print_long
cell_debug_print_long.restype = None
# cell_debug_print_long(in_cell)
cell_debug_print_long.argtypes = [POINTER(Cell)]
cell_debug_print_long.__doc__ = \
"""void cell_debug_print_long(Cell * in_cell)
../../../rregadmin/hive/cell.h:249"""
# ../../../rregadmin/hive/cell.h 258
cell_get_xml_output = _libraries['librregadmin.so.1'].cell_get_xml_output
cell_get_xml_output.restype = gboolean
# cell_get_xml_output(in_bin, in_output, in_verbose)
cell_get_xml_output.argtypes = [POINTER(Cell), POINTER(ustring), gboolean]
cell_get_xml_output.__doc__ = \
"""gboolean cell_get_xml_output(unknown * in_bin, ustring * in_output, gboolean in_verbose)
../../../rregadmin/hive/cell.h:258"""
# ../../../rregadmin/hive/cell.h 266
cell_vdump = _libraries['librregadmin.so.1'].cell_vdump
cell_vdump.restype = None
# cell_vdump(in_ustr, in_cell)
cell_vdump.argtypes = [POINTER(ustring), POINTER(Cell)]
cell_vdump.__doc__ = \
"""void cell_vdump(ustring * in_ustr, Cell * in_cell)
../../../rregadmin/hive/cell.h:266"""
# values for enumeration 'CellMark'
CELL_FLAG_VALID = 1
CELL_FLAG_SUBTYPE_VALID = 2
CELL_FLAG_LINKED = 65536
CellMark = c_int # enum
# ../../../rregadmin/hive/cell.h 288
cell_set_mark = _libraries['librregadmin.so.1'].cell_set_mark
cell_set_mark.restype = None
# cell_set_mark(in_cell, in_mark)
cell_set_mark.argtypes = [POINTER(Cell), CellMark]
cell_set_mark.__doc__ = \
"""void cell_set_mark(Cell * in_cell, CellMark in_mark)
../../../rregadmin/hive/cell.h:288"""
# ../../../rregadmin/hive/cell.h 296
cell_check_mark = _libraries['librregadmin.so.1'].cell_check_mark
cell_check_mark.restype = gboolean
# cell_check_mark(in_cell, in_mark)
cell_check_mark.argtypes = [POINTER(Cell), CellMark]
cell_check_mark.__doc__ = \
"""gboolean cell_check_mark(Cell * in_cell, CellMark in_mark)
../../../rregadmin/hive/cell.h:296"""
# ../../../rregadmin/hive/cell.h 304
cell_clear_mark = _libraries['librregadmin.so.1'].cell_clear_mark
cell_clear_mark.restype = None
# cell_clear_mark(in_cell, in_mark)
cell_clear_mark.argtypes = [POINTER(Cell), CellMark]
cell_clear_mark.__doc__ = \
"""void cell_clear_mark(Cell * in_cell, CellMark in_mark)
../../../rregadmin/hive/cell.h:304"""
cell_data_deleter = CFUNCTYPE(None, c_void_p)
__all__ = ['CELL_TYPE_VALUE_LIST', 'ID_VK_KEY', 'CELL_TYPE_UNKNOWN',
'CELL_FLAG_SUBTYPE_VALID', 'cell_clear_mark', 'ID_LI_KEY',
'CellMark', 'cell_is_valid', 'CELL_TYPE_VALUE_KEY',
'cell_check_mark', 'cell_get_bin', 'ID_LF_KEY',
'cell_delete', 'cell_get_next', 'cell_set_mark',
'cell_unalloc', 'cell_clear', 'cell_get',
'cell_get_offset', 'cell_init', 'cell_is_allocd',
'cell_alloc', 'cell_vdump', 'CELL_TYPE_KEY_LIST',
'cell_size', 'cell_get_data', 'CELL_TYPE_EMPTY',
'CELL_FLAG_LINKED', 'cell_get_hive', 'cell_get_xml_output',
'cell_get_data_length', 'ID_SK_KEY', 'ID_NK_KEY', 'CellID',
'cell_data_deleter', 'cell_get_id_str', 'ID_LH_KEY',
'cell_debug_print', 'CELL_FLAG_VALID', 'cell_get_prev',
'cell_get_id', 'cell_get_type', 'CellType',
'CELL_TYPE_KEY', 'cell_debug_print_long', 'cell_equal',
'ID_RI_KEY', 'CELL_TYPE_SECURITY']
|
gpl-2.0
| 1,746,740,988,433,074,000 | 39.746377 | 455 | 0.674551 | false |
bshp/xtrabackup
|
python/setup.py
|
1
|
1595
|
from setuptools import setup, find_packages
import os
package_root = os.path.dirname(__file__)
about = {}
with open("xtrabackup/__about__.py") as fp:
exec(fp.read(), about)
# Error-handling here is to allow package to be built w/o README.rst included
try:
long_description = open(os.path.join(package_root, 'README.rst')).read()
except IOError:
long_description = ''
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
license=about["__license__"],
classifiers=[
'Intended Audience :: System Administrators',
'Topic :: System :: Archiving :: Backup',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='mysql mariadb database backup percona xtrabackup',
packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'sql']),
install_requires=['docopt', 'requests'],
# extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
entry_points={
'console_scripts': [
'pyxtrabackup=xtrabackup.full_backup:main',
'pyxtrabackup-inc=xtrabackup.incremental_backup:main',
'pyxtrabackup-restore=xtrabackup.restoration:main'
],
},
)
|
gpl-3.0
| -8,797,729,788,662,614,000 | 25.583333 | 77 | 0.603135 | false |
GeoffreyFrogeye/syncthing-gtk
|
syncthing_gtk/foldereditor.py
|
1
|
8413
|
#!/usr/bin/env python2
"""
Syncthing-GTK - FolderEditorDialog
Universal dialog handler for all Syncthing settings and editing
"""
from __future__ import unicode_literals
from gi.repository import Gtk, Gdk
from syncthing_gtk.tools import check_device_id
from syncthing_gtk.editordialog import EditorDialog, strip_v
from syncthing_gtk import EditorDialog, HAS_INOTIFY
import os, sys, re, logging
_ = lambda (a) : a
log = logging.getLogger("FolderEditor")
COLOR_NEW = "#A0A0A0"
# Regexp to check if folder id is valid
RE_FOLDER_ID = re.compile("^([a-zA-Z0-9\-\._]{1,64})$")
# Regexp to generate folder id from filename
RE_GEN_ID = re.compile("([a-zA-Z0-9\-\._]{1,64}).*")
VALUES = [ "vid", "vpath", "vreadOnly", "vignorePerms", "vdevices",
"vversioning", "vkeepVersions", "vrescanIntervalS", "vmaxAge",
"vversionsPath", "vinotify"
]
class FolderEditorDialog(EditorDialog):
MESSAGES = {
# Displayed when folder id is invalid
"vid" : _("The Folder ID must be a short, unique identifier"
" (64 characters or less) consisting of letters, numbers "
"and the the dot (.), dash (-) and underscode (_) "
"characters only"),
}
def __init__(self, app, is_new, id=None, path=None):
EditorDialog.__init__(self, app,
"folder-edit.glade",
"New Shared Folder" if is_new else "Edit Shared Folder"
)
self.id = id
self.path = path
self.is_new = is_new
def on_btBrowse_clicked(self, *a):
"""
Display folder browser dialog to browse for folder... folder.
Oh god, this new terminology sucks...
"""
if not self.is_new: return
# Prepare dialog
d = Gtk.FileChooserDialog(
_("Select Folder for new Folder"), # fuck me...
self["editor"],
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
# Set default path to home directory
d.set_current_folder(os.path.expanduser("~"))
# Get response
if d.run() == Gtk.ResponseType.OK:
self["vpath"].set_text(d.get_filename())
if len(self["vid"].get_text().strip()) == 0:
# ID is empty, fill it with last path element
try:
lpl = os.path.split(d.get_filename())[-1]
id = RE_GEN_ID.search(lpl).group(0).lower()
self["vid"].set_text(id)
except AttributeError:
# Can't regexp anything
pass
d.destroy()
#@Overrides
def get_value(self, key):
if key == "keepVersions":
return self.get_burried_value("versioning/params/keep", self.values, 0, int)
elif key == "maxAge":
return self.get_burried_value("versioning/params/maxAge", self.values, 0, int) / 86400 # seconds to days
elif key == "versionsPath":
return self.get_burried_value("versioning/params/versionsPath", self.values, "")
elif key == "versioning":
return self.get_burried_value("versioning/type", self.values, "")
elif key == "inotify":
return self.id in self.app.config["use_inotify"]
else:
return EditorDialog.get_value(self, key)
#@Overrides
def set_value(self, key, value):
if key == "versioning":
# Create structure if needed
self.create_dicts(self.values, ("versioning", "type"))
self.values["versioning"]["type"] = value
elif key == "keepVersions":
# Create structure if needed
self.create_dicts(self.values, ("versioning", "params", "keep"))
self.values["versioning"]["params"]["keep"] = str(int(value))
elif key == "maxAge":
# Create structure if needed
self.create_dicts(self.values, ("versioning", "params", "maxAge"))
self.values["versioning"]["params"]["maxAge"] = str(int(value) * 86400) # days to seconds
elif key == "versionsPath":
# Create structure if needed
self.create_dicts(self.values, ("versioning", "params", "versionsPath"))
self.values["versioning"]["params"]["versionsPath"] = value
elif key == "inotify":
l = self.app.config["use_inotify"]
if value:
if not self.id in l:
l.append(self.id)
else:
while self.id in l:
l.remove(self.id)
self.app.config["use_inotify"] = l
else:
EditorDialog.set_value(self, key, value)
#@Overrides
def on_data_loaded(self):
try:
if self.is_new:
self.values = { strip_v(x) : "" for x in VALUES }
self.checks = {
"vid" : self.check_folder_id,
"vpath" : self.check_path
}
if self.id != None:
try:
v = [ x for x in self.config["folders"] if x["id"] == self.id ][0]
self.values = v
self.is_new = False
except IndexError:
pass
if not self.path is None:
self.set_value("path", self.path)
self["vpath"].set_sensitive(False)
self.set_value("versioning", "simple")
self.set_value("rescanIntervalS", 30)
self.set_value("keepVersions", 10)
else:
self.values = [ x for x in self.config["folders"] if x["id"] == self.id ][0]
self.checks = {}
self["vpath"].set_sensitive(False)
self["btBrowse"].set_sensitive(False)
except KeyError, e:
# ID not found in configuration. This is practicaly impossible,
# so it's handled only by self-closing dialog.
log.exception(e)
self.close()
return False
if not HAS_INOTIFY:
self["vinotify"].set_sensitive(False)
self["lblinotify"].set_sensitive(False)
self["vinotify"].set_tooltip_text(_("Please, install pyinotify package to use this feature"))
self["lblinotify"].set_tooltip_text(_("Please, install pyinotify package to use this feature"))
return self.display_values(VALUES)
#@Overrides
def display_value(self, key, w):
if key == "vdevices":
# Very special case
nids = [ n["deviceID"] for n in self.get_value("devices") ]
for device in self.app.devices.values():
if device["id"] != self.app.daemon.get_my_id():
b = Gtk.CheckButton(device.get_title(), False)
b.set_tooltip_text(device["id"])
self["vdevices"].pack_end(b, False, False, 0)
b.set_active(device["id"] in nids)
self["vdevices"].show_all()
else:
EditorDialog.display_value(self, key, w)
#@Overrides
def update_special_widgets(self, *a):
self["vid"].set_sensitive(self.id is None)
v = self.get_value("versioning")
if v == "":
if self["rvversioning"].get_reveal_child():
self["rvversioning"].set_reveal_child(False)
else:
self["bxVersioningSimple"].set_visible(self.get_value("versioning") == "simple")
self["bxVersioningStaggered"].set_visible(self.get_value("versioning") == "staggered")
if not self["rvversioning"].get_reveal_child():
self["rvversioning"].set_reveal_child(True)
#@Overrides
def on_save_reuqested(self):
self.store_values(VALUES)
print self.values
if self.is_new:
# Add new dict to configuration (edited dict is already there)
self.config["folders"].append(self.values)
# Post configuration back to daemon
self.post_config()
#@Overrides
def store_value(self, key, w):
if key == "vdevices": # Still very special case
devices = [ {
"deviceID" : b.get_tooltip_text(),
} for b in self["vdevices"].get_children()
if b.get_active()
]
self.set_value("devices", devices)
else:
EditorDialog.store_value(self, key, w)
#@Overrides
def on_saved(self):
self.close()
# If new folder/device was added, show dummy item UI, so user will
# see that something happen even before daemon gets restarted
if self.is_new:
box = self.app.show_folder(
self.get_value("id"), self.get_value("path"), self.get_value("path"),
self.get_value("readOnly"), self.get_value("ignorePerms"),
self.get_value("rescanIntervalS"),
sorted(
[ self.app.devices[n["deviceID"]] for n in self.get_value("devices") ],
key=lambda x : x.get_title().lower()
))
box.set_color_hex(COLOR_NEW)
def check_folder_id(self, value):
if value in self.app.folders:
# Duplicate folder id
return False
if RE_FOLDER_ID.match(value) is None:
# Invalid string
return False
return True
def check_path(self, value):
# Any non-empty path is OK
return True
def fill_folder_id(self, rid):
""" Pre-fills folder Id for new-folder dialog """
self["vid"].set_text(rid)
self.id = rid
self.update_special_widgets()
def mark_device(self, nid):
""" Marks (checks) checkbox for specified device """
if "vdevices" in self: # ... only if there are checkboxes here
for child in self["vdevices"].get_children():
if child.get_tooltip_text() == nid:
l = child.get_children()[0] # Label in checkbox
l.set_markup("<b>%s</b>" % (l.get_label()))
child.set_active(True)
|
gpl-2.0
| 3,870,237,024,728,849,400 | 32.517928 | 107 | 0.658624 | false |
Zaneh-/bearded-tribble-back
|
tests/integration/test_stats.py
|
1
|
4190
|
import pytest
from django.conf import settings
from django.core.urlresolvers import reverse
from .. import factories as f
from tests.utils import disconnect_signals, reconnect_signals
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.user = f.UserFactory.create()
m.project = f.ProjectFactory(is_private=False, owner=m.user)
m.role1 = f.RoleFactory(project=m.project)
m.role2 = f.RoleFactory(project=m.project)
m.null_points = f.PointsFactory(project=m.project, value=None)
m.points1 = f.PointsFactory(project=m.project, value=1)
m.points2 = f.PointsFactory(project=m.project, value=2)
m.points3 = f.PointsFactory(project=m.project, value=4)
m.points4 = f.PointsFactory(project=m.project, value=8)
m.open_status = f.UserStoryStatusFactory(is_closed=False)
m.closed_status = f.UserStoryStatusFactory(is_closed=True)
m.role_points1 = f.RolePointsFactory(role=m.role1,
points=m.points1,
user_story__project=m.project,
user_story__status=m.open_status)
m.role_points2 = f.RolePointsFactory(role=m.role1,
points=m.points2,
user_story__project=m.project,
user_story__status=m.open_status)
m.role_points3 = f.RolePointsFactory(role=m.role1,
points=m.points3,
user_story__project=m.project,
user_story__status=m.open_status)
m.role_points4 = f.RolePointsFactory(role=m.project.roles.all()[0],
points=m.points4,
user_story__project=m.project,
user_story__status=m.open_status)
m.user_story1 = m.role_points1.user_story
m.user_story2 = m.role_points2.user_story
m.user_story3 = m.role_points3.user_story
m.user_story4 = m.role_points4.user_story
m.milestone = f.MilestoneFactory(project=m.project)
return m
def test_project_defined_points(client, data):
assert data.project.defined_points == {data.role1.pk: 15}
data.role_points1.role = data.role2
data.role_points1.save()
assert data.project.defined_points == {data.role1.pk: 14, data.role2.pk: 1}
def test_project_closed_points(client, data):
assert data.project.closed_points == {}
data.role_points1.role = data.role2
data.role_points1.save()
assert data.project.closed_points == {}
data.user_story1.is_closed = True
data.user_story1.save()
assert data.project.closed_points == {data.role2.pk: 1}
data.user_story2.is_closed = True
data.user_story2.save()
assert data.project.closed_points == {data.role1.pk: 2, data.role2.pk: 1}
data.user_story3.is_closed = True
data.user_story3.save()
assert data.project.closed_points == {data.role1.pk: 6, data.role2.pk: 1}
data.user_story4.is_closed = True
data.user_story4.save()
assert data.project.closed_points == {data.role1.pk: 14, data.role2.pk: 1}
def test_project_assigned_points(client, data):
assert data.project.assigned_points == {}
data.role_points1.role = data.role2
data.role_points1.save()
assert data.project.assigned_points == {}
data.user_story1.milestone = data.milestone
data.user_story1.save()
assert data.project.assigned_points == {data.role2.pk: 1}
data.user_story2.milestone = data.milestone
data.user_story2.save()
assert data.project.assigned_points == {data.role1.pk: 2, data.role2.pk: 1}
data.user_story3.milestone = data.milestone
data.user_story3.save()
assert data.project.assigned_points == {data.role1.pk: 6, data.role2.pk: 1}
data.user_story4.milestone = data.milestone
data.user_story4.save()
assert data.project.assigned_points == {data.role1.pk: 14, data.role2.pk: 1}
|
agpl-3.0
| 7,031,326,740,203,970,000 | 37.440367 | 80 | 0.626014 | false |
votervoice/openstates
|
openstates/ny/events.py
|
1
|
4556
|
import re
import datetime as dt
import pytz
from pupa.scrape import Scraper, Event
from openstates.utils import LXMLMixin
url = "http://assembly.state.ny.us/leg/?sh=hear"
class NYEventScraper(Scraper, LXMLMixin):
_tz = pytz.timezone('US/Eastern')
def lower_parse_page(self, url):
page = self.lxmlize(url)
tables = page.xpath("//table[@class='pubhrgtbl']")
date = None
for table in tables:
metainf = {}
rows = table.xpath(".//tr")
for row in rows:
tds = row.xpath("./*")
if len(tds) < 2:
continue
key, value = tds
if key.tag == 'th' and key.get("class") == 'hrgdate':
date = key.text_content()
date = re.sub(r"\s+", " ", date)
date = re.sub(".*POSTPONED NEW DATE", "", date).strip()
# Due to the html structure this shouldn't be an elif
# It needs to fire twice in the same loop iteration
if value.tag == 'th' and value.get("class") == 'commtitle':
coms = value.xpath('.//div[contains(@class,"comm-txt")]/text()')
elif key.tag == 'td':
key = key.text_content().strip()
value = value.text_content().strip()
value = value.replace(u'\x96', '-')
value = re.sub(r"\s+", " ", value)
metainf[key] = value
time = metainf['Time:']
repl = {
"A.M.": "AM",
"P.M.": "PM",
}
drepl = {
"Sept": "Sep"
}
for r in repl:
time = time.replace(r, repl[r])
for r in drepl:
date = date.replace(r, drepl[r])
time = re.sub("-.*", "", time)
time = time.strip()
year = dt.datetime.now().year
date = "%s %s %s" % (
date,
year,
time
)
if "tbd" in date.lower():
continue
date = date.replace(' PLEASE NOTE NEW TIME', '')
# Check if the event has been postponed.
postponed = 'POSTPONED' in date
if postponed:
date = date.replace(' POSTPONED', '')
date_formats = ["%B %d %Y %I:%M %p", "%b. %d %Y %I:%M %p"]
datetime = None
for fmt in date_formats:
try:
datetime = dt.datetime.strptime(date, fmt)
except ValueError:
pass
# If the datetime can't be parsed, bail.
if datetime is None:
return
title_key = set(metainf) & set([
'Public Hearing:', 'Summit:', 'Roundtable:',
'Public Roundtable:', 'Public Meeting:', 'Public Forum:',
'Meeting:'])
assert len(title_key) == 1, "Couldn't determine event title."
title_key = list(title_key).pop()
title = metainf[title_key]
title = re.sub(
r"\*\*Click here to view public hearing notice\*\*",
"",
title
)
# If event was postponed, add a warning to the title.
if postponed:
title = 'POSTPONED: %s' % title
event = Event(
name=title,
start_date=self._tz.localize(datetime),
location_name=metainf['Place:'],
)
event.extras = {'contact': metainf['Contact:']}
if 'Media Contact:' in metainf:
event.extras.update(media_contact=metainf['Media Contact:'])
event.add_source(url)
for com in coms:
event.add_participant(
com.strip(),
type='committee',
note='host',
)
participant = event.participants[-1]
participant['extras'] = {'chamber': self.classify_committee(com)},
yield event
def scrape(self):
yield from self.lower_parse_page(url)
def classify_committee(self, name):
chamber = 'other'
if "senate" in name.lower():
chamber = 'upper'
if "assembly" in name.lower():
chamber = 'lower'
if "joint" in name.lower():
chamber = 'joint'
return chamber
|
gpl-3.0
| 7,908,856,798,909,407,000 | 31.084507 | 84 | 0.449737 | false |
proger/offlineimap
|
offlineimap/folder/Maildir.py
|
1
|
12082
|
# Maildir folder support
# Copyright (C) 2002 - 2007 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import socket
import time
import re
import os
from Base import BaseFolder
from threading import Lock
try:
from hashlib import md5
except ImportError:
from md5 import md5
from offlineimap import OfflineImapError
uidmatchre = re.compile(',U=(\d+)')
flagmatchre = re.compile(':.*2,([A-Z]+)')
timestampmatchre = re.compile('(\d+)');
timeseq = 0
lasttime = long(0)
timelock = Lock()
def gettimeseq():
global lasttime, timeseq, timelock
timelock.acquire()
try:
thistime = long(time.time())
if thistime == lasttime:
timeseq += 1
return (thistime, timeseq)
else:
lasttime = thistime
timeseq = 0
return (thistime, timeseq)
finally:
timelock.release()
class MaildirFolder(BaseFolder):
def __init__(self, root, name, sep, repository, accountname, config):
self.name = name
self.config = config
self.dofsync = config.getdefaultboolean("general", "fsync", True)
self.root = root
self.sep = sep
self.messagelist = None
self.repository = repository
self.accountname = accountname
BaseFolder.__init__(self)
#self.ui is set in BaseFolder.init()
# Cache the full folder path, as we use getfullname() very often
self._fullname = os.path.join(self.getroot(), self.getname())
def getaccountname(self):
return self.accountname
def getfullname(self):
"""Return the absolute file path to the Maildir folder (sans cur|new)"""
return self._fullname
def getuidvalidity(self):
"""Maildirs have no notion of uidvalidity, so we just return a magic
token."""
return 42
#Checks to see if the given message is within the maximum age according
#to the maildir name which should begin with a timestamp
def _iswithinmaxage(self, messagename, maxage):
#In order to have the same behaviour as SINCE in an IMAP search
#we must convert this to the oldest time and then strip off hrs/mins
#from that day
oldest_time_utc = time.time() - (60*60*24*maxage)
oldest_time_struct = time.gmtime(oldest_time_utc)
oldest_time_today_seconds = ((oldest_time_struct[3] * 3600) \
+ (oldest_time_struct[4] * 60) \
+ oldest_time_struct[5])
oldest_time_utc -= oldest_time_today_seconds
timestampmatch = timestampmatchre.search(messagename)
timestampstr = timestampmatch.group()
timestamplong = long(timestampstr)
if(timestamplong < oldest_time_utc):
return False
else:
return True
def _scanfolder(self):
"""Cache the message list. Maildir flags are:
R (replied)
S (seen)
T (trashed)
D (draft)
F (flagged)
and must occur in ASCII order."""
retval = {}
files = []
nouidcounter = -1 # Messages without UIDs get
# negative UID numbers.
foldermd5 = md5(self.getvisiblename()).hexdigest()
folderstr = ',FMD5=' + foldermd5
for dirannex in ['new', 'cur']:
fulldirname = os.path.join(self.getfullname(), dirannex)
files.extend(os.path.join(fulldirname, filename) for
filename in os.listdir(fulldirname))
for file in files:
messagename = os.path.basename(file)
#check if there is a parameter for maxage / maxsize - then see if this
#message should be considered or not
maxage = self.config.getdefaultint("Account " + self.accountname, "maxage", -1)
maxsize = self.config.getdefaultint("Account " + self.accountname, "maxsize", -1)
if(maxage != -1):
isnewenough = self._iswithinmaxage(messagename, maxage)
if(isnewenough != True):
#this message is older than we should consider....
continue
#Check and see if the message is too big if the maxsize for this account is set
if(maxsize != -1):
filesize = os.path.getsize(file)
if(filesize > maxsize):
continue
foldermatch = messagename.find(folderstr) != -1
if not foldermatch:
# If there is no folder MD5 specified, or if it mismatches,
# assume it is a foreign (new) message and generate a
# negative uid for it
uid = nouidcounter
nouidcounter -= 1
else: # It comes from our folder.
uidmatch = uidmatchre.search(messagename)
uid = None
if not uidmatch:
uid = nouidcounter
nouidcounter -= 1
else:
uid = long(uidmatch.group(1))
flagmatch = flagmatchre.search(messagename)
flags = []
if flagmatch:
flags = [x for x in flagmatch.group(1)]
flags.sort()
retval[uid] = {'uid': uid,
'flags': flags,
'filename': file}
return retval
def quickchanged(self, statusfolder):
"""Returns True if the Maildir has changed"""
self.cachemessagelist()
# Folder has different uids than statusfolder => TRUE
if sorted(self.getmessageuidlist()) != \
sorted(statusfolder.getmessageuidlist()):
return True
# Also check for flag changes, it's quick on a Maildir
for (uid, message) in self.getmessagelist().iteritems():
if message['flags'] != statusfolder.getmessageflags(uid):
return True
return False #Nope, nothing changed
def cachemessagelist(self):
if self.messagelist is None:
self.messagelist = self._scanfolder()
def getmessagelist(self):
return self.messagelist
def getmessage(self, uid):
"""Return the content of the message"""
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
file = open(filepath, 'rt')
retval = file.read()
file.close()
#TODO: WHY are we replacing \r\n with \n here? And why do we
# read it as text?
return retval.replace("\r\n", "\n")
def getmessagetime( self, uid ):
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
st = os.stat(filepath)
return st.st_mtime
def savemessage(self, uid, content, flags, rtime):
# This function only ever saves to tmp/,
# but it calls savemessageflags() to actually save to cur/ or new/.
self.ui.debug('maildir', 'savemessage: called to write with flags %s '
'and content %s' % (repr(flags), repr(content)))
if uid < 0:
# We cannot assign a new uid.
return uid
if uid in self.messagelist:
# We already have it, just update flags.
self.savemessageflags(uid, flags)
return uid
# Otherwise, save the message in tmp/ and then call savemessageflags()
# to give it a permanent home.
tmpdir = os.path.join(self.getfullname(), 'tmp')
timeval, timeseq = gettimeseq()
messagename = '%d_%d.%d.%s,U=%d,FMD5=%s' % \
(timeval,
timeseq,
os.getpid(),
socket.gethostname(),
uid,
md5(self.getvisiblename()).hexdigest())
# open file and write it out
try:
fd = os.open(os.path.join(tmpdir, messagename),
os.O_EXCL|os.O_CREAT|os.O_WRONLY)
except OSError, e:
if e.errno == 17:
#FILE EXISTS ALREADY
severity = OfflineImapError.ERROR.MESSAGE
raise OfflineImapError("Unique filename %s already existing." %\
messagename, severity)
else:
raise
file = os.fdopen(fd, 'wt')
file.write(content)
# Make sure the data hits the disk
file.flush()
if self.dofsync:
os.fsync(fd)
file.close()
if rtime != None:
os.utime(os.path.join(tmpdir, messagename), (rtime, rtime))
self.messagelist[uid] = {'uid': uid, 'flags': [],
'filename': os.path.join('tmp', messagename)}
# savemessageflags moves msg to 'cur' or 'new' as appropriate
self.savemessageflags(uid, flags)
self.ui.debug('maildir', 'savemessage: returning uid %d' % uid)
return uid
def getmessageflags(self, uid):
return self.messagelist[uid]['flags']
def savemessageflags(self, uid, flags):
oldfilename = self.messagelist[uid]['filename']
dir_prefix, newname = os.path.split(oldfilename)
tmpdir = os.path.join(self.getfullname(), 'tmp')
if 'S' in flags:
# If a message has been seen, it goes into the cur
# directory. CR debian#152482
dir_prefix = 'cur'
else:
dir_prefix = 'new'
infostr = ':'
infomatch = re.search('(:.*)$', newname)
if infomatch: # If the info string is present..
infostr = infomatch.group(1)
newname = newname.split(':')[0] # Strip off the info string.
infostr = re.sub('2,[A-Z]*', '', infostr)
flags.sort()
infostr += '2,' + ''.join(flags)
newname += infostr
newfilename = os.path.join(dir_prefix, newname)
if (newfilename != oldfilename):
os.rename(os.path.join(self.getfullname(), oldfilename),
os.path.join(self.getfullname(), newfilename))
self.messagelist[uid]['flags'] = flags
self.messagelist[uid]['filename'] = newfilename
# By now, the message had better not be in tmp/ land!
final_dir, final_name = os.path.split(self.messagelist[uid]['filename'])
assert final_dir != 'tmp'
def deletemessage(self, uid):
"""Unlinks a message file from the Maildir.
:param uid: UID of a mail message
:type uid: String
:return: Nothing, or an Exception if UID but no corresponding file
found.
"""
if not self.uidexists(uid):
return
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
try:
os.unlink(filepath)
except OSError:
# Can't find the file -- maybe already deleted?
newmsglist = self._scanfolder()
if uid in newmsglist: # Nope, try new filename.
filename = newmsglist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
os.unlink(filepath)
# Yep -- return.
del(self.messagelist[uid])
|
gpl-2.0
| -8,432,343,763,043,916,000 | 36.874608 | 93 | 0.569442 | false |
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/init.py
|
1
|
17437
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workflow to set up gcloud environment."""
import argparse
import os
import sys
import types
from googlecloudsdk.api_lib.projects import projects_api
from googlecloudsdk.api_lib.source import source
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import files
class Init(base.Command):
"""Initialize or reinitialize gcloud."""
detailed_help = {
'DESCRIPTION': """\
{description}
{command} launches an interactive Getting Started workflow for gcloud.
It replaces `gcloud auth login` as the recommended command to execute
after you install the Cloud SDK. {command} performs the following
setup steps:
- Authorizes gcloud and other SDK tools to access Google Cloud
Platform using your user account credentials, or lets you select
from accounts whose credentials are already available. {command}
uses the same browser-based authorization flow as
`gcloud auth login`.
- Sets properties in a gcloud configuration, including the current
project and the default Google Compute Engine region and zone.
- Clones a Cloud source repository (optional).
Most users run {command} to get started with gcloud. You can use
subsequent {command} invocations to create new gcloud configurations
or to reinitialize existing configurations. See `gcloud topic
configurations` for additional information.
Properties set by `gcloud init` are local and persistent. They are
not affected by remote changes to your project. For instance, the
default Compute Engine zone in your configuration remains stable,
even if you or another user changes the project-level default zone in
the Cloud Platform Console. You can resync your configuration at any
time by rerunning `gcloud init`.
(Available since version 0.9.79. Run $ gcloud --version to see which
version you are running.)
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'obsolete_project_arg',
nargs='?',
help=argparse.SUPPRESS)
parser.add_argument(
'--console-only',
action='store_true',
help=('Prevent the command from launching a browser for '
'authorization.'))
def Run(self, args):
"""Allows user to select configuration, and initialize it."""
if args.obsolete_project_arg:
raise c_exc.InvalidArgumentException(
args.obsolete_project_arg,
'`gcloud init` has changed and no longer takes a PROJECT argument. '
'Please use `gcloud source repos clone` to clone this '
'project\'s source repositories.')
log.status.write('Welcome! This command will take you through '
'the configuration of gcloud.\n\n')
if properties.VALUES.core.disable_prompts.GetBool():
raise c_exc.InvalidArgumentException(
'disable_prompts/--quiet',
'gcloud init command cannot run with disabled prompts.')
configuration_name = None
try:
configuration_name = self._PickConfiguration()
if not configuration_name:
return
log.status.write('Your current configuration has been set to: [{0}]\n\n'
.format(configuration_name))
if not self._PickAccount(args.console_only):
return
project_id = self._PickProject()
if not project_id:
return
self._PickDefaultRegionAndZone()
self._PickRepo(project_id)
log.status.write('\ngcloud has now been configured!\n')
finally:
log.status.write('You can use [gcloud config] to '
'change more gcloud settings.\n\n')
log.status.flush()
# Not using self._RunCmd to get command actual output.
self.cli.Execute(['config', 'list'])
def _PickAccount(self, console_only):
"""Checks if current credentials are valid, if not runs auth login.
Args:
console_only: bool, True if the auth flow shouldn't use the browser
Returns:
bool, True if valid credentials are setup.
"""
auth_info = self._RunCmd(['auth', 'list'])
if auth_info and auth_info.accounts:
idx = console_io.PromptChoice(
auth_info.accounts + ['Log in with new credentials'],
message='Pick credentials to use:',
prompt_string=None)
if idx is None:
return None
new_credentials = idx == len(auth_info.accounts)
else:
answer = console_io.PromptContinue(
prompt_string='To continue, you must log in. Would you like to log '
'in')
if not answer:
return False
new_credentials = True
if new_credentials:
# gcloud auth login may have user interaction, do not suppress it.
browser_args = ['--no-launch-browser'] if console_only else []
if not self._RunCmd(['auth', 'login'],
['--force', '--brief'] + browser_args,
disable_user_output=False):
return None
else:
account = auth_info.accounts[idx]
self._RunCmd(['config', 'set'], ['account', account])
log.status.write('You are now logged in as: [{0}]\n\n'
.format(properties.VALUES.core.account.Get()))
return True
def _PickConfiguration(self):
"""Allows user to re-initialize, create or pick new configuration.
Returns:
Configuration name or None.
"""
configs = self._RunCmd(['config', 'configurations', 'list'])
if not configs:
new_config_name = 'default'
if self._RunCmd(['config', 'configurations', 'create'],
[new_config_name]):
self._RunCmd(['config', 'configurations', 'activate'],
[new_config_name])
properties.PropertiesFile.Invalidate()
return new_config_name
config_names = [cfg.name for cfg in configs]
active_configs = [cfg.name for cfg in configs
if getattr(cfg, 'is_active', False)]
if not active_configs:
return None
choices = []
active_config = active_configs[0]
log.status.write('Settings from your current configuration [{0}] are:\n'
.format(active_config))
log.status.flush()
# Not using self._RunCmd to get command actual output.
self.cli.Execute(['config', 'list'])
log.out.flush()
log.status.write('\n')
log.status.flush()
choices.append(
'Re-initialize this configuration [{0}] with new settings '.format(
active_config))
choices.append('Create a new configuration')
config_choices = [name for name in config_names if name != active_config]
choices.extend('Switch to and re-initialize '
'existing configuration: [{0}]'.format(name)
for name in config_choices)
idx = console_io.PromptChoice(choices, message='Pick configuration to use:')
if idx is None:
return None
if idx == 0: # If reinitialize was selected.
self._CleanCurrentConfiguration()
return active_config
if idx == 1: # Second option is to create new configuration.
return self._CreateConfiguration()
config_name = config_choices[idx - 2]
self._RunCmd(['config', 'configurations', 'activate'], [config_name])
return config_name
def _PickProject(self):
"""Allows user to select a project.
Returns:
str, project_id or None if was not selected.
"""
try:
projects = list(projects_api.List(http=self.Http()))
except Exception: # pylint: disable=broad-except
log.debug('Failed to execute projects list: %s, %s, %s', *sys.exc_info())
projects = None
if projects is None: # Failed to get the list.
project_id = console_io.PromptResponse(
'Enter project id you would like to use: ')
if not project_id:
return None
else:
projects = sorted(projects, key=lambda prj: prj.projectId)
choices = ['[{0}]'.format(project.projectId) for project in projects]
if not choices:
log.status.write('\nThis account has no projects. Please create one in '
'developers console '
'(https://console.developers.google.com/project) '
'before running this command.\n')
return None
if len(choices) == 1:
project_id = projects[0].projectId
else:
idx = console_io.PromptChoice(
choices,
message='Pick cloud project to use: ',
prompt_string=None)
if idx is None:
return
project_id = projects[idx].projectId
self._RunCmd(['config', 'set'], ['project', project_id])
log.status.write('Your current project has been set to: [{0}].\n\n'
.format(project_id))
return project_id
def _PickDefaultRegionAndZone(self):
"""Pulls metadata properties for region and zone and sets them in gcloud."""
try:
project_info = self._RunCmd(['compute', 'project-info', 'describe'])
except c_exc.FailedSubCommand:
log.status.write("""\
Not setting default zone/region (this feature makes it easier to use
[gcloud compute] by setting an appropriate default value for the
--zone and --region flag).
See https://cloud.google.com/compute/docs/gcloud-compute section on how to set
default compute region and zone manually. If you would like [gcloud init] to be
able to do this for you the next time you run it, make sure the
Compute Engine API is enabled for your project on the
https://console.developers.google.com/apis page.
""")
return None
default_zone = None
default_region = None
if project_info is not None:
metadata = project_info.get('commonInstanceMetadata', {})
for item in metadata.get('items', []):
if item['key'] == 'google-compute-default-zone':
default_zone = item['value']
elif item['key'] == 'google-compute-default-region':
default_region = item['value']
# Same logic applies to region and zone properties.
def SetProperty(name, default_value, list_command):
"""Set named compute property to default_value or get via list command."""
if not default_value:
values = self._RunCmd(list_command)
if values is None:
return
values = list(values)
idx = console_io.PromptChoice(
['[{0}]'.format(value['name']) for value in values]
+ ['Do not set default {0}'.format(name)],
message=('Which compute {0} would you like '
'to use as project default?'.format(name)),
prompt_string=None)
if idx is None or idx == len(values):
return
default_value = values[idx]
self._RunCmd(['config', 'set'],
['compute/{0}'.format(name), default_value['name']])
log.status.write('Your project default compute {0} has been set to '
'[{1}].\nYou can change it by running '
'[gcloud config set compute/{0} NAME].\n\n'
.format(name, default_value['name']))
return default_value
if default_zone:
default_zone = self._RunCmd(['compute', 'zones', 'describe'],
[default_zone])
zone = SetProperty('zone', default_zone, ['compute', 'zones', 'list'])
if zone and not default_region:
default_region = zone['region']
if default_region:
default_region = self._RunCmd(['compute', 'regions', 'describe'],
[default_region])
SetProperty('region', default_region, ['compute', 'regions', 'list'])
def _PickRepo(self, project_id):
"""Allows user to clone one of the projects repositories."""
answer = console_io.PromptContinue(
prompt_string='Do you want to use Google\'s source hosting (see '
'"https://cloud.google.com/source-repositories/docs/")')
if not answer:
return
try:
source.Source.SetApiEndpoint(self.Http())
project = source.Project(project_id)
repos = project.ListRepos()
except Exception: # pylint: disable=broad-except
# This command is experimental right now; its failures shouldn't affect
# operation.
repos = None
if repos:
repos = sorted(repo.name or 'default' for repo in repos)
log.status.write(
'This project has one or more associated Git repositories.\n')
idx = console_io.PromptChoice(
['[{0}]'.format(repo) for repo in repos] + ['Do not clone'],
message='Pick Git repository to clone to your local machine:',
prompt_string=None)
if idx >= 0 and idx < len(repos):
repo_name = repos[idx]
else:
return
elif repos is None:
answer = console_io.PromptContinue(
prompt_string='Generally projects have a Git repository named '
'[default]. Would you like to try clone it')
if not answer:
return
repo_name = 'default'
else:
return
self._CloneRepo(repo_name)
def _CloneRepo(self, repo_name):
"""Queries user for output path and clones selected repo to it."""
default_clone_path = os.path.join(os.getcwd(), repo_name)
while True:
clone_path = console_io.PromptResponse(
'Where would you like to clone [{0}] repository to [{1}]:'
.format(repo_name, default_clone_path))
if not clone_path:
clone_path = default_clone_path
if os.path.exists(clone_path):
log.status.write('Directory [{0}] already exists\n'.format(clone_path))
continue
clone_path = os.path.abspath(clone_path)
parent_dir = os.path.dirname(clone_path)
if not os.path.isdir(parent_dir):
log.status.write('No such directory [{0}]\n'.format(parent_dir))
answer = console_io.PromptContinue(
prompt_string='Would you like to create it')
if answer:
files.MakeDir(parent_dir)
break
else:
break
# Show output from this command in case there are errors.
try:
self._RunCmd(['source', 'repos', 'clone'], [repo_name, clone_path],
disable_user_output=False)
except c_exc.FailedSubCommand:
log.warning(
'Was not able to run\n '
'[gcloud source repos clone {0} {1}]\n'
'at this time. You can try running this command any time later.\n'
.format(repo_name, clone_path))
def _CreateConfiguration(self):
configuration_name = console_io.PromptResponse(
'Enter configuration name: ')
new_config_name = self._RunCmd(['config', 'configurations', 'create'],
[configuration_name])
if new_config_name:
self._RunCmd(['config', 'configurations', 'activate'],
[configuration_name])
properties.PropertiesFile.Invalidate()
return new_config_name
def _CleanCurrentConfiguration(self):
self._RunCmd(['config', 'unset'], ['account'])
self._RunCmd(['config', 'unset'], ['project'])
self._RunCmd(['config', 'unset'], ['compute/zone'])
self._RunCmd(['config', 'unset'], ['compute/region'])
def _RunCmd(self, cmd, params=None, disable_user_output=True):
if not self.cli.IsValidCommand(cmd):
log.info('Command %s does not exist.', cmd)
return None
if params is None:
params = []
args = cmd + params
log.info('Executing: [gcloud %s]', ' '.join(args))
try:
# Disable output from individual commands, so that we get
# command run results, and don't clutter output of init.
if disable_user_output:
args.append('--no-user-output-enabled')
if (properties.VALUES.core.verbosity.Get() is None and
disable_user_output):
# Unless user explicitly set verbosity, suppress from subcommands.
args.append('--verbosity=none')
result = self.cli.Execute(args)
# Best effort to force result of Execute eagerly. Don't just check
# that result is iterable to avoid category errors (e.g., accidently
# converting a string or dict to a list).
if isinstance(result, types.GeneratorType):
return list(result)
return result
except SystemExit as exc:
log.info('[%s] has failed\n', ' '.join(cmd + params))
raise c_exc.FailedSubCommand(cmd + params, exc.code)
except BaseException:
log.info('Failed to run [%s]\n', ' '.join(cmd + params))
raise
|
bsd-3-clause
| 2,195,455,491,288,916,000 | 37.577434 | 80 | 0.625681 | false |
atantet/ergoPack
|
example/numericalFP/numericalFP_Hopf.py
|
1
|
5115
|
import numpy as np
import pylibconfig2
from scipy import sparse
from scipy.sparse import linalg
import matplotlib.pyplot as plt
from matplotlib import cm
from ergoNumAna import ChangCooper
readEigVal = False
#readEigVal = True
def hopf(x, mu, omega):
f = np.empty((2,))
f[0] = x[0] * (mu - (x[0]**2 + x[1]**2)) - omega*x[1]
f[1] = x[1] * (mu - (x[0]**2 + x[1]**2)) + omega*x[0]
return f
# Get model
omega = 1.
#q = 0.5
#q = 0.75
#q = 1.
#q = 1.25
#q = 1.5
#q = 1.75
#q = 2.
#q = 2.25
#q = 2.5
#q = 2.75
#q = 3.
#q = 3.25
#q = 3.5
#q = 3.75
q = 4.
muRng = np.arange(-10, 15., 0.1)
k0 = 0
#muRng = np.arange(6.6, 15., 0.1)
#k0 = 166
#muRng = np.arange(-4, 2, 0.1)
#k0 = 60
#muRng = np.arange(2, 8, 0.1)
#k0 = 120
#muRng = np.arange(8, 15, 0.1)
#k0 = 180
#muRng = np.arange(5., 10., 0.1)
#k0 = 150
#muRng = np.array([8.])
#k0 = 180
# Grid definition
dim = 2
nx0 = 100
#nx0 = 200
# give limits for the size of the periodic orbit
# at maximum value of control parameter (when noise
# effects transversally are small)
xlim = np.ones((dim,)) * np.sqrt(15) * 2
# Number of eigenvalues
nev = 100
tol = 1.e-6
B = np.eye(dim) * q
# Get standard deviations
Q = np.dot(B, B.T)
# Get grid points and steps
x = []
dx = np.empty((dim,))
nx = np.ones((dim,), dtype=int) * nx0
for d in np.arange(dim):
x.append(np.linspace(-xlim[d], xlim[d], nx[d]))
dx[d] = x[d][1] - x[d][0]
N = np.prod(nx)
idx = np.indices(nx).reshape(dim, -1)
X = np.meshgrid(*x, indexing='ij')
points = np.empty((dim, N))
for d in np.arange(dim):
points[d] = X[d].flatten()
alpha = 0.0
levels = 20
fs_default = 'x-large'
fs_latex = 'xx-large'
fs_xlabel = fs_default
fs_ylabel = fs_default
fs_xticklabels = fs_default
fs_yticklabels = fs_default
fs_legend_title = fs_default
fs_legend_labels = fs_default
fs_cbar_label = fs_default
#figFormat = 'png'
figFormat = 'eps'
dpi = 300
msize = 32
bbox_inches = 'tight'
plt.rc('font',**{'family':'serif'})
print 'For q = ', q
for k in np.arange(muRng.shape[0]):
mu = muRng[k]
print 'For mu = ', mu
if mu < 0:
signMu = 'm'
else:
signMu = 'p'
postfix = '_nx%d_k%03d_mu%s%02d_q%03d' \
% (nx0, k0 + k, signMu, int(round(np.abs(mu) * 10)), int(round(q * 100)))
if not readEigVal:
# Define drift
def drift(x):
return hopf(x, mu, omega)
# Get discretized Fokker-Planck operator
print 'Discretizing Fokker-Planck operator'
FPO = ChangCooper(points, nx, dx, drift, Q)
print 'Solving eigenvalue problem'
(w, v) = linalg.eigs(FPO, k=nev, which='LR', tol=tol)
isort = np.argsort(-w.real)
w = w[isort]
v = v[:, isort]
rho0 = v[:, 0].real
rho0 /= rho0.sum()
rho0_tile = np.tile(rho0, (dim, 1))
meanPoints = (points * rho0_tile).sum(1)
stdPoints = np.sqrt(((points - np.tile(meanPoints, (N, 1)).T)**2 * rho0_tile).sum(1))
print 'Mean points = ', meanPoints
print 'Std points = ', stdPoints
print 'Saving eigenvalues'
np.savetxt('../results/numericalFP/w_hopf%s.txt' % postfix, w)
np.savetxt('../results/numericalFP/statDist_hopf%s.txt' % postfix, rho0)
else:
print 'Reading eigenvalues'
srcFile = '../results/numericalFP/w_hopf%s.txt' % postfix
fp = open(srcFile, 'r')
w = np.empty((nev,), dtype=complex)
for ev in np.arange(nev):
line = fp.readline()
line = line.replace('+-', '-')
w[ev] = complex(line)
rho0 = np.loadtxt('../results/numericalFP/statDist_hopf%s.txt' % postfix)
print 'Plotting'
fig = plt.figure()
#fig.set_visible(False)
ax = fig.add_subplot(111)
ax.scatter(w.real, w.imag, edgecolors='face')
ax.set_xlim(-30, 0.1)
ax.set_ylim(-10, 10)
ax.text(-29, -9, r'$\mu = %.1f$' % mu, fontsize='xx-large')
fig.savefig('../results/plot/numericalFP/numFP_hopf%s.%s' \
% (postfix, figFormat), bbox_inches='tight', dpi=300)
fig = plt.figure()
ax = fig.add_subplot(111)
vect = rho0.copy()
vecAlpha = vect[vect != 0]
if alpha > 0:
vmax = np.sort(vecAlpha)[int((1. - alpha) \
* vecAlpha.shape[0])]
vect[vect > vmax] = vmax
else:
vmax = np.max(vect)
h = ax.contourf(X[0].T, X[1].T, vect.reshape(nx), levels,
cmap=cm.hot_r, vmin=0., vmax=vmax)
ax.set_xlim(X[0][:, 0].min(), X[0][:, 0].max())
ax.set_ylim(X[1][0].min(), X[1][0].max())
#cbar = plt.colorbar(h)
ax.set_xlabel(r'$x$', fontsize=fs_latex)
ax.set_ylabel(r'$y$', fontsize=fs_latex)
# plt.setp(cbar.ax.get_yticklabels(), fontsize=fs_yticklabels)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
ax.text(-7, -7, r'$\mu = %.1f$' % mu, fontsize='xx-large')
fig.savefig('../results/plot/numericalFP/statDist_hopf%s.%s' \
% (postfix, figFormat), bbox_inches='tight', dpi=300)
plt.close()
|
gpl-3.0
| -2,602,404,210,002,750,500 | 26.5 | 93 | 0.564223 | false |
Yukarumya/Yukarum-Redfoxes
|
testing/mozbase/mozdevice/tests/sut_movetree.py
|
1
|
2795
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import mozdevice
import logging
import unittest
import mozunit
from sut import MockAgent
class MoveTreeTest(unittest.TestCase):
def test_moveFile(self):
commands = [('mv /mnt/sdcard/tests/test.txt /mnt/sdcard/tests/test1.txt', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'test1.txt'),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'test1.txt')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
self.assertEqual(None, d.moveTree('/mnt/sdcard/tests/test.txt',
'/mnt/sdcard/tests/test1.txt'))
self.assertFalse(d.fileExists('/mnt/sdcard/tests/test.txt'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/test1.txt'))
def test_moveDir(self):
commands = [("mv /mnt/sdcard/tests/foo /mnt/sdcard/tests/bar", ""),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'bar')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
self.assertEqual(None, d.moveTree('/mnt/sdcard/tests/foo',
'/mnt/sdcard/tests/bar'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/bar'))
def test_moveNonEmptyDir(self):
commands = [('isdir /mnt/sdcard/tests/foo/bar', 'TRUE'),
('mv /mnt/sdcard/tests/foo /mnt/sdcard/tests/foo2', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'foo2'),
('isdir /mnt/sdcard/tests/foo2', 'TRUE'),
('cd /mnt/sdcard/tests/foo2', ''),
('ls', 'bar')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port,
logLevel=logging.DEBUG)
self.assertTrue(d.dirExists('/mnt/sdcard/tests/foo/bar'))
self.assertEqual(None, d.moveTree('/mnt/sdcard/tests/foo',
'/mnt/sdcard/tests/foo2'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/foo2'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/foo2/bar'))
if __name__ == "__main__":
mozunit.main()
|
mpl-2.0
| 8,198,250,175,775,828,000 | 40.102941 | 86 | 0.532379 | false |
ardi69/pyload-0.4.10
|
pyload/plugin/addon/WindowsPhoneNotify.py
|
1
|
4278
|
# -*- coding: utf-8 -*-
import httplib
import time
from pyload.plugin.Addon import Addon, Expose
class WindowsPhoneNotify(Addon):
__name = "WindowsPhoneNotify"
__type = "addon"
__version = "0.10"
__config = [("push-id" , "str" , "Push ID" , "" ),
("push-url" , "str" , "Push url" , "" ),
("notifycaptcha" , "bool", "Notify captcha request" , True ),
("notifypackage" , "bool", "Notify package finished" , True ),
("notifyprocessed", "bool", "Notify packages processed" , True ),
("notifyupdate" , "bool", "Notify plugin updates" , True ),
("notifyexit" , "bool", "Notify pyLoad shutdown" , True ),
("sendtimewait" , "int" , "Timewait in seconds between notifications", 5 ),
("sendpermin" , "int" , "Max notifications per minute" , 12 ),
("ignoreclient" , "bool", "Send notifications if client is connected", False)]
__description = """Send push notifications to Windows Phone"""
__license = "GPLv3"
__authors = [("Andy Voigt" , "phone-support@hotmail.de"),
("Walter Purcaro", "vuolter@gmail.com" )]
def setup(self):
self.event_list = ["allDownloadsProcessed", "plugin_updated"]
self.last_notify = 0
self.notifications = 0
def plugin_updated(self, type_plugins):
if not self.getConfig('notifyupdate'):
return
self.notify(_("Plugins updated"), str(type_plugins))
def coreReady(self):
self.key = (self.getConfig('push-id'), self.getConfig('push-url'))
def exit(self):
if not self.getConfig('notifyexit'):
return
if self.core.do_restart:
self.notify(_("Restarting pyLoad"))
else:
self.notify(_("Exiting pyLoad"))
def newCaptchaTask(self, task):
if not self.getConfig('notifycaptcha'):
return
self.notify(_("Captcha"), _("New request waiting user input"))
def packageFinished(self, pypack):
if self.getConfig('notifypackage'):
self.notify(_("Package finished"), pypack.name)
def allDownloadsProcessed(self):
if not self.getConfig('notifyprocessed'):
return
if any(True for pdata in self.core.api.getQueue() if pdata.linksdone < pdata.linkstotal):
self.notify(_("Package failed"), _("One or more packages was not completed successfully"))
else:
self.notify(_("All packages finished"))
def getXmlData(self, msg):
return ("<?xml version='1.0' encoding='utf-8'?> <wp:Notification xmlns:wp='WPNotification'> "
"<wp:Toast> <wp:Text1>pyLoad</wp:Text1> <wp:Text2>%s</wp:Text2> "
"</wp:Toast> </wp:Notification>" % msg)
@Expose
def notify(self,
event,
msg="",
key=(None, None)):
id, url = key or self.key
if not id or not url:
return
if self.core.isClientConnected() and not self.getConfig('ignoreclient'):
return
elapsed_time = time.time() - self.last_notify
if elapsed_time < self.getConfig("sendtimewait"):
return
if elapsed_time > 60:
self.notifications = 0
elif self.notifications >= self.getConfig("sendpermin"):
return
request = self.getXmlData("%s: %s" % (event, msg) if msg else event)
webservice = httplib.HTTP(url)
webservice.putrequest("POST", id)
webservice.putheader("Host", url)
webservice.putheader("Content-type", "text/xml")
webservice.putheader("X-NotificationClass", "2")
webservice.putheader("X-WindowsPhone-Target", "toast")
webservice.putheader("Content-length", "%d" % len(request))
webservice.endheaders()
webservice.send(request)
webservice.close()
self.last_notify = time.time()
self.notifications += 1
return True
|
gpl-3.0
| -8,406,304,048,525,787,000 | 32.162791 | 102 | 0.538569 | false |
xhochy/arrow
|
python/pyarrow/tests/test_hdfs.py
|
1
|
13325
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.util import guid
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client():
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
with pytest.warns(DeprecationWarning):
return pa.hdfs.connect(host, port, user)
@pytest.mark.hdfs
class HdfsTestCases:
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client()
cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
# TODO for URI it should not be needed to pass this argument
result = pq.read_table(path, use_legacy_dataset=True)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(
path, filesystem=self.hdfs, use_legacy_dataset=True
).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
@pytest.mark.pandas
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
message = 'No libhdfs available on system'
if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):
pytest.fail(message)
else:
pytest.skip(message)
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.hdfs
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
def test_fastparquet_read_with_hdfs():
from pandas.testing import assert_frame_equal
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client()
df = util.make_dataframe()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
|
apache-2.0
| -1,749,521,619,231,526,100 | 29.284091 | 77 | 0.59227 | false |
moonlet/fuli
|
src/fuli_spiders/fuli_spiders/spiders/youdianying.py
|
1
|
1705
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from scrapy import Request
from scrapy.selector import Selector
from datetime import datetime
from base import BaseSpider
import db
class YouDianYin(BaseSpider):
name = 'youdianying'
ch_name = u'有点硬'
start_urls = ['https://youdian.in/']
def parse(self, response):
selector = Selector(response=response)
articles = selector.xpath('//*[@id="main"]/*/div[@class="post-box"]')
timeline = db.get_collection('timeline')
for item in articles:
try:
title = item.xpath('div[@class="post-header"]/p/a/text()').extract()[0]
# link URL
url = item.xpath('div[@class="post-header"]/p/a/@href').extract()[0]
description = item.xpath('*/div[@class="post-expert"]/text()').extract()[0]
description = self._join_text(description)
# image URL
img = item.xpath('*/div[@class="post-info"]/a/img/@data-original').extract()[0]
# YYYY-MM-DD
#date = item.xpath('*/div[@class="post-date"]/text()').extract()[0].strip()
date = item.xpath('div[@class="post-content"]/div[@class="post-footer"]/div[@class="post-date"]/text()').extract()[0]
date = datetime.strptime(date, '%Y-%m-%d')
self.save(title=title, url=url, description=description,
img=img, date=date)
except IndexError:
continue
next_page = selector.xpath(u'//*/div[@class="page-navigator"]/li/a[text()="下一页 »"]/@href').extract()[0]
yield Request(response.urljoin(next_page), self.parse)
|
mit
| 5,278,915,593,901,573,000 | 43.526316 | 133 | 0.562057 | false |
copperwire/SIMS
|
file_handling/file_handler.py
|
1
|
5910
|
import sys
import os
import numpy as np
class file_handler:
"""
Public methods:
__init__
file_iteration
data_conversion
runtime
Class to get data from the SIMS output files.
The data is collected in groups by the delimiters in the data file, e.g. **** DATA START*** contains all numerical data.
Operations are then preformed to convert the data to human and machine readable formats.
The class can easily be changed to take an arbitrary data file with a known delimiter between types of parameters.
If there are several data sets, submit a list with strings denoting the start of the dataset and the corresponding
attribute of the class will be a dictionary with the denoted rows as keys with substance as the first element
and the x and y axis as the second and third element.
The class is initialized with the plotting module, if you wish to only use the file decoding aspect please view the sample
running in the docstring of the runtime method.
"""
def __init__(self, filename):
"""Gets the filename from which the data is collected. """
self.filename = filename
self.has_iterated = False
def file_iteration(self, delim = "***"):
"""
Walks through the supplied data file and stores all lines in string format. The data is saved
by assigning each set of rows to the corresponding delimiter. The delimiting value (by default \" *** \")
determines where the method separates the objects. Each part of the file can be accessed through
getattr(ClassInstance, File Partition)
where File Partition is the string directly following the delimiter.
Method will be expanded to be more robust in taking delimiters with spaces adjacent to the string following the
delimiter.
"""
self.has_iterated = True
num_lines = sum(1 for line in open(self.filename))
with open(self.filename) as self.file_object:
line_indices = np.arange(0, num_lines, 1, dtype = int)
data_type_indices = []
lines = []
for line, i in zip(self.file_object, line_indices):
decoded = " ".join(line.split())
lines.append(decoded)
if line[:3] == delim:
data_type_indices.append(i)
self.attribute_names = []
for index_of_data_type, i in zip(data_type_indices, np.arange(0, len(data_type_indices), 1, dtype = int)):
attribute_name = str(lines[index_of_data_type][4:-4])
self.attribute_names.append(attribute_name)
try:
setattr(self, attribute_name, lines[index_of_data_type + 1: data_type_indices[i + 1 ] - 1])
except IndexError:
setattr(self, attribute_name, lines[index_of_data_type + 1: ])
def data_conversion(self, data_name = "DATA START", key_row= [2, 3], nonposy = True):
"""
Strictly needs to be run after the file_iteration method.
Formats the strings contained in the data_name attribute of the class to float.
To accomodate log-plots the default is to make a correction to the y axis such that
non-positive and small numbers are set to 1.
Keyword arguments:
data_name (string) = string following delimiter before the data set.
key_row (list) = list with the rows in the data set which contains information about the data
returns list of datasets where each dictionary has the keys:
data = 2 x n nested list where data[0] contains all data points corresponding to the key \" x_unit \"
x_unit = string with physical unit of data[0]
y_unit = string with physical unit of data[1]
sample info = nested list containing sample id, original filename, sample code etc.
sample name = name of the element measured by the SIMS process
"""
try:
data_set = getattr(self, data_name)
except AttributeError:
print("class has no attribute named %s. Trying to fix") %data_name
if self.has_iterated:
new_name = data_name.strip()
try:
data_set = getattr(self, new_name)
except AttributeError:
sys.exit("Unfixable:(")
else:
self.file_iteration()
try:
data_set = getattr(self, data_name)
print("fix'd. Don't thank me or anything. I'm just a program")
except AttributeError:
sys.exit("Unfixable:(")
self.substances = data_set[key_row[0]].split(" ")
units = data_set[key_row[1]].split(" ")
x = []
for line in data_set[key_row[1] + 1:] :
dat = line.split(" ")
a = [float(c) for c in dat]
y = []
"""
Making the arrays log-friendly by adding 1 to all zero or less than one elements.
"""
y.append(a[0])
for i in range(1, len(a)):
if i % 2 == 1:
if a[i] < 1:
a[i] = a[i] + 1
else:
a[i] = a[i]
else:
a[i] = a[i]
x.append(a)
reshaping = np.shape(x)
u = np.reshape(x, (reshaping[0], reshaping[1]))
variables_per_substance = len(x[0])/len(self.substances)
self.list_of_datasets = []
for name, number in zip(self.substances, np.arange(0, len(x[0]), 2, dtype = int)):
y = {}
if u[0][number] < 1e-1:
units.pop(number)
else:
units.pop(number+1)
y["data"] = {"x": np.array(u[:,number]), "y": np.array(u[:,number + 1]), "element": [name for i in range(len(np.array(u[:,number + 1])))]}
y["x_unit"] = units[number]
y["y_unit"] = units[number + 1]
for attribute in self.attribute_names:
"""
Shold implement conversion to one string with float argument appended as dictionary, maybe?
"""
value = getattr(self, attribute)
value = [line.split(" ") for line in value]
y[attribute] = value
y["sample element"] = name
setattr(self, name, y)
self.list_of_datasets.append(getattr(self, name))
return self.list_of_datasets
def runtime(self, delim = "***", data_name = "DATA START", key_row= [2, 3]):
"""
Runs the file iterator and data conversion and returns a touple of the names of the analyzed elements
and dictionaries containing all data
"""
self.file_iteration(delim)
x = self.data_conversion(data_name, key_row)
return (self.substances, x)
|
cc0-1.0
| -7,246,006,517,227,118,000 | 30.945946 | 141 | 0.674619 | false |
walter-weinmann/wwe-euler
|
problems/solved/029_Solution.py
|
1
|
1344
|
'''
Created on 19.08.2013
@author: Walter
Consider all integer combinations of a^b for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
2^2=4, 2^3=8, 2^4=16, 2^5=32
3^2=9, 3^3=27, 3^4=81, 3^5=243
4^2=16, 4^3=64, 4^4=256, 4^5=1024
5^2=25, 5^3=125, 5^4=625, 5^5=3125
If they are then placed in numerical order, with any repeats removed, we get
the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by a^b for 2 ≤ a ≤ 100
and 2 ≤ b ≤ 100?
'''
import unittest
def distinct_powers(pA, pB):
lList = []
for i in range(2, pA + 1):
for j in range(2, pB + 1):
lInt = i ** j
if not lInt in lList:
lList.append(lInt)
return len(lList)
# =============================================================================
# Unit tests
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
def test_distinct_powers_10(self):
self.assertEqual(distinct_powers(5, 5), 15)
# =============================================================================
# Solution of the Euler task
# -----------------------------------------------------------------------------
print(distinct_powers(100, 100))
|
apache-2.0
| 3,219,445,063,219,351,600 | 24.538462 | 79 | 0.454819 | false |
ASU-CodeDevils/DemonHacks2017
|
gameFiles/Python/helpers.py
|
1
|
7120
|
from __future__ import print_function
from __future__ import division
import numpy as np
from datetime import datetime
from scipy.stats import norm
from scipy.optimize import minimize
def acq_max(ac, gp, y_max, bounds, random_state):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling 1e5 points at random, and then
running L-BFGS-B from 250 random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(100000, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(250, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi):
"""
If UCB is to be used, a constant kappa is needed.
"""
self.kappa = kappa
self.xi = xi
if kind not in ['ucb', 'ei', 'poi']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
@staticmethod
def _ucb(x, gp, kappa):
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _poi(x, gp, y_max, xi):
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
def unique_rows(a):
"""
A functions to trim repeated rows that may appear when optimizing.
This is necessary to avoid the sklearn GP object from breaking
:param a: array to trim repeated rows from
:return: mask of unique rows
"""
# Sort array and kep track of where things should go back to
order = np.lexsort(a.T)
reorder = np.argsort(order)
a = a[order]
diff = np.diff(a, axis=0)
ui = np.ones(len(a), 'bool')
ui[1:] = (diff != 0).any(axis=1)
return ui[reorder]
class BColours(object):
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
RED = '\033[31m'
ENDC = '\033[0m'
class PrintLog(object):
def __init__(self, params):
self.ymax = None
self.xmax = None
self.params = params
self.ite = 1
self.start_time = datetime.now()
self.last_round = datetime.now()
# sizes of parameters name and all
self.sizes = [max(len(ps), 7) for ps in params]
# Sorted indexes to access parameters
self.sorti = sorted(range(len(self.params)),
key=self.params.__getitem__)
def reset_timer(self):
self.start_time = datetime.now()
self.last_round = datetime.now()
def print_header(self, initialization=True):
if initialization:
print("{}Initialization{}".format(BColours.RED,
BColours.ENDC))
else:
print("{}Bayesian Optimization{}".format(BColours.RED,
BColours.ENDC))
print(BColours.BLUE + "-" * (29 + sum([s + 5 for s in self.sizes])) +
BColours.ENDC)
print("{0:>{1}}".format("Step", 5), end=" | ")
print("{0:>{1}}".format("Time", 6), end=" | ")
print("{0:>{1}}".format("Value", 10), end=" | ")
for index in self.sorti:
print("{0:>{1}}".format(self.params[index],
self.sizes[index] + 2),
end=" | ")
print('')
def print_step(self, x, y, warning=False):
print("{:>5d}".format(self.ite), end=" | ")
m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60)
print("{:>02d}m{:>02d}s".format(int(m), int(s)), end=" | ")
if self.ymax is None or self.ymax < y:
self.ymax = y
self.xmax = x
print("{0}{2: >10.5f}{1}".format(BColours.MAGENTA,
BColours.ENDC,
y),
end=" | ")
for index in self.sorti:
print("{0}{2: >{3}.{4}f}{1}".format(
BColours.GREEN, BColours.ENDC,
x[index],
self.sizes[index] + 2,
min(self.sizes[index] - 3, 6 - 2)
),
end=" | ")
else:
print("{: >10.5f}".format(y), end=" | ")
for index in self.sorti:
print("{0: >{1}.{2}f}".format(x[index],
self.sizes[index] + 2,
min(self.sizes[index] - 3, 6 - 2)),
end=" | ")
if warning:
print("{}Warning: Test point chose at "
"random due to repeated sample.{}".format(BColours.RED,
BColours.ENDC))
print()
self.last_round = datetime.now()
self.ite += 1
def print_summary(self):
pass
|
mit
| 4,082,479,898,961,499,600 | 30.091703 | 81 | 0.505618 | false |
Tan0/ironic
|
ironic/tests/api/test_base.py
|
1
|
3304
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from webob import exc
from ironic.api.controllers import base as cbase
from ironic.tests.api import base
class TestBase(base.FunctionalTest):
def test_api_setup(self):
pass
def test_bad_uri(self):
response = self.get_json('/bad/path',
expect_errors=True,
headers={"Accept": "application/json"})
self.assertEqual(404, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertTrue(response.json['error_message'])
class TestVersion(base.FunctionalTest):
@mock.patch('ironic.api.controllers.base.Version.parse_headers')
def test_init(self, mock_parse):
a = mock.Mock()
b = mock.Mock()
mock_parse.return_value = (a, b)
v = cbase.Version('test', 'foo', 'bar')
mock_parse.assert_called_with('test', 'foo', 'bar')
self.assertEqual(a, v.major)
self.assertEqual(b, v.minor)
@mock.patch('ironic.api.controllers.base.Version.parse_headers')
def test_repr(self, mock_parse):
mock_parse.return_value = (123, 456)
v = cbase.Version('test', mock.ANY, mock.ANY)
result = "%s" % v
self.assertEqual('123.456', result)
@mock.patch('ironic.api.controllers.base.Version.parse_headers')
def test_repr_with_strings(self, mock_parse):
mock_parse.return_value = ('abc', 'def')
v = cbase.Version('test', mock.ANY, mock.ANY)
result = "%s" % v
self.assertEqual('abc.def', result)
def test_parse_headers_ok(self):
version = cbase.Version.parse_headers(
{cbase.Version.string: '123.456'}, mock.ANY, mock.ANY)
self.assertEqual((123, 456), version)
def test_parse_headers_latest(self):
for s in ['latest', 'LATEST']:
version = cbase.Version.parse_headers(
{cbase.Version.string: s}, mock.ANY, '1.9')
self.assertEqual((1, 9), version)
def test_parse_headers_bad_length(self):
self.assertRaises(
exc.HTTPNotAcceptable,
cbase.Version.parse_headers,
{cbase.Version.string: '1'},
mock.ANY,
mock.ANY)
self.assertRaises(
exc.HTTPNotAcceptable,
cbase.Version.parse_headers,
{cbase.Version.string: '1.2.3'},
mock.ANY,
mock.ANY)
def test_parse_no_header(self):
# this asserts that the minimum version string of "1.1" is applied
version = cbase.Version.parse_headers({}, '1.1', '1.5')
self.assertEqual((1, 1), version)
|
apache-2.0
| -2,916,717,131,872,390,000 | 34.913043 | 78 | 0.618947 | false |
stephanpieterse/pyblend-animidi
|
blenderfunc/blender_funcIncludes.py
|
1
|
16560
|
# we need this to scale actions and keyframing right
def get_relative_action_scale(obj_s, obj_e, note_s, note_e):
action_frames = obj_e - obj_s # 10 - 5 = 5
note_frames = note_e - note_s # 1050 - 1025 = 25
if action_frames <= 0:
action_frames = 1
relative_scale = note_frames / float(action_frames) # 25 / 5.0 = 5
return relative_scale
# this is needed so we don't super overextend the end frames needed position
def get_frame_shift(relative_scale, obj_s, obj_e):
action_len = obj_e - obj_s # 10 - 5 = 5
new_len = action_len * relative_scale # 5 * 5 = 25
new_end = obj_s + new_len # 5 + 25 = 30
return new_end
def buildContinueAction(curAction, newobj, noteStart, noteEnd):
for i in curAction.fcurves:
rna_index_exists = False
for j in newobj.animation_data.action.fcurves:
if (j.data_path == i.data_path) and (j.array_index == i.array_index) and (j.group.name == i.group.name):
rna_index_exists = True
if rna_index_exists is False:
newfc = newobj.animation_data.action.fcurves.new(i.data_path, index=i.array_index, action_group=i.group.name)
else:
for j in newobj.animation_data.action.fcurves:
if (j.data_path == i.data_path) and (j.array_index == i.array_index) and (j.group.name == i.group.name):
newfc = j
# we need to figure out where the action officially starts to scale everything right
left_most_point = float('inf')
right_most_point = float('-inf')
for y in i.keyframe_points:
left_most_point = min(y.co.x, left_most_point)
right_most_point = max(y.co.x, right_most_point)
#if y.co.x <= left_most_point:
# left_most_point = y.co.x
#if y.co.x >= right_most_point:
# right_most_point = y.co.x
actionRelScale = get_relative_action_scale(left_most_point,right_most_point,noteStart,noteEnd)
fckp = len(newfc.keyframe_points) - 1
for x in i.keyframe_points:
if x.co.x == left_most_point:
curX = bpy.context.scene.frame_current
new_co = [curX + x.co.x, x.co.y]
newfc.keyframe_points.add(1)
newfc.keyframe_points[fckp].co = new_co
newfc.keyframe_points[fckp].handle_left = [curX + x.handle_left.x, x.handle_left.y]
newfc.keyframe_points[fckp].handle_left_type = x.handle_left_type
newfc.keyframe_points[fckp].handle_right = [curX + x.handle_right.x, x.handle_right.y]
newfc.keyframe_points[fckp].handle_right_type = x.handle_right_type
newfc.keyframe_points[fckp].interpolation = x.interpolation
else:
curX = bpy.context.scene.frame_current
new_co = [curX + get_frame_shift(actionRelScale,left_most_point,x.co.x),x.co.y]
newfc.keyframe_points.add(1)
newfc.keyframe_points[fckp].co = new_co
newfc.keyframe_points[fckp].handle_left = [curX + get_frame_shift(actionRelScale, left_most_point, x.handle_left.x), x.handle_left.y]
newfc.keyframe_points[fckp].handle_left_type = x.handle_left_type
newfc.keyframe_points[fckp].handle_right = [curX + get_frame_shift(actionRelScale, left_most_point, x.handle_right.x), x.handle_right.y]
newfc.keyframe_points[fckp].handle_right_type = x.handle_right_type
newfc.keyframe_points[fckp].interpolation = x.interpolation
fckp += 1
# our func to create objects that need to pop into the scene
def duplicateObject(scene, name, copy_obj):
# Create new mesh
mesh = bpy.data.meshes.new(name)
# Create new object associated with the mesh
ob_new = bpy.data.objects.new(name, mesh)
copy_obj = bpy.data.objects[copy_obj]
# Copy data block from the old object into the new object
ob_new.data = copy_obj.data.copy()
ob_new.scale = copy_obj.scale
ob_new.location = copy_obj.location
ob_new.rotation_euler = copy_obj.rotation_euler
# uncomment this line if scene becomes bloaty and slow i guess
# ob_new.hide = True
ob_new.hide_render = False
# Link new object to the given scene and select it
scene.objects.link(ob_new)
ob_new.select = True
return ob_new.name
# this is just a wrapper so we don't have to specify nla values for fcurves
def populateActionFromListFCurve(action_list, action_object, calc_frame, start_frame, end_frame):
return populateActionFromList(action_list, action_object, calc_frame, start_frame, end_frame, 'HOLD', 'REPLACE', False, 'FCURVE')
# mode can be either 'NLA' or 'FCURVE'
def populateActionFromList(action_list, action_object, calc_frame, start_frame, end_frame, nla_extrap, nla_blend, nla_autoblend, mode = 'NLA'):
# take the start and end frames for each note, and space the actions specified into that space.
# the total length should include: attack, note, (vibrato)
# the prenote is not relative, and ENDS at the start of the attack frame action
# the attack starts on the start_frame, and is not relative
# the note immediately follows, and is relative (scaled)
# if there is a vibrato, we should get the vibrato delay, then if the note holds long enough,
# shorten it and squeeze the vibrato into the rest of the space
# the release follows immediately at the end frame, not relative.
# i'm not really sure what to do with wait. release should end at the right spot?
# IMPORTANT : even though some actions are not relative to the note duration,
# they ARE relative to the soundOptions in conf!
# a note on blender: the nla strips support setting the start to a float number, but not inserting as.
# that's why we have a magic number one to just shift it for the insertion, and set it to where it should
# be right after.
if action_object.animation_data is None:
action_object.animation_data_create()
NLATrack = action_object.animation_data.nla_tracks.new()
if 'prenote' in action_list.keys():
curAction = bpy.data.actions[action_list['prenote']]
action_length = curAction.frame_range.y - curAction.frame_range.x
pre_start_frame = calc_frame - action_length
if mode == 'NLA':
CurStrip = NLATrack.strips.new("prenoteStrip", pre_start_frame+1, curAction)
CurStrip.frame_start = pre_start_frame
CurStrip.frame_end = pre_start_frame + action_length
CurStrip.extrapolation = nla_extrap
CurStrip.blend_type = nla_blend
CurStrip.use_auto_blend = nla_autoblend
elif mode == 'FCURVE':
buildContinueActionV2(curAction, action_object, pre_start_frame, start_frame, end_frame, True)
if 'attack' in action_list.keys():
curAction = bpy.data.actions[action_list['attack']['action']]
curActionTime = action_list['attack']['time']
action_length = curAction.frame_range.y - curAction.frame_range.x
if mode == 'NLA':
CurStrip = NLATrack.strips.new("attackStrip", calc_frame+1, curAction)
CurStrip.frame_start = calc_frame
CurStrip.frame_end = calc_frame + action_length
CurActionStart = CurStrip.frame_start
CurActionEnd = CurStrip.frame_end
actionRelScale = get_relative_action_scale(CurActionStart, CurActionEnd, 0, curActionTime)
CurStrip.frame_end = get_frame_shift(actionRelScale,CurActionStart,CurActionEnd)
AttackActionEnd = CurStrip.frame_end
CurStrip.extrapolation = nla_extrap
CurStrip.blend_type = nla_blend
CurStrip.use_auto_blend = nla_autoblend
elif mode == 'FCURVE':
buildContinueActionV2(curAction, action_object, calc_frame, 0, curActionTime)
calc_end = calc_frame + action_length
actionRelScale = get_relative_action_scale(calc_frame, calc_end, 0, curActionTime)
AttackActionEnd = get_frame_shift(actionRelScale, calc_frame, calc_end)
else:
AttackActionEnd = calc_frame
if 'vibrato' in action_list.keys():
noteAction = bpy.data.actions[action_list['note']]
note_action_length = noteAction.frame_range.y - noteAction.frame_range.x
vibratoAction = bpy.data.actions[action_list['vibrato']['action']]
vibrato_action_length = vibratoAction.frame_range.y - vibratoAction.frame_range.x
vibratoActionTime = action_list['vibrato']['time']
NoteStrip = NLATrack.strips.new("noteStrip", AttackActionEnd+1, noteAction)
actionRelScale = get_relative_action_scale(noteAction.frame_range.x, noteAction.frame_range.y, start_frame, end_frame)
fullNoteEnd = get_frame_shift(actionRelScale, noteAction.frame_range.x, noteAction.frame_range.y)
if note_action_length > vibratoActionTime:
NoteStrip.frame_start = AttackActionEnd
NoteStrip.frame_end = NoteStrip.frame_start + vibratoActionTime
prevNoteStripEnd = NoteStrip.frame_end
if mode == 'NLA':
VibratoStrip = NLATrack.strips.new("vibratoStrip",NoteStrip.frame_end+1,vibratoAction)
VibratoStrip.frame_start = NoteStrip.frame_end
VibratoStrip.frame_end = fullNoteEnd
VibratoStrip.extrapolation = nla_extrap
VibratoStrip.blend_type = nla_blend
VibratoStrip.use_auto_blend = nla_autoblend
elif mode == 'FCURVE':
buildContinueActionV2(vibratoAction, action_object, NoteStrip.frame_end, 0, vibratoActionTime)
release_start = fullNoteEnd
last_frame = fullNoteEnd
else:
if mode == 'NLA':
NoteStrip.frame_start = AttackActionEnd
NoteStrip.frame_end = AttackActionEnd + note_action_length
NoteActionStart = NoteStrip.frame_start
NoteActionEnd = NoteStrip.frame_end
actionRelScale = get_relative_action_scale(NoteActionStart, NoteActionEnd, start_frame, end_frame)
NoteStrip.frame_end = get_frame_shift(actionRelScale,NoteActionStart,NoteActionEnd)
release_start = NoteStrip.frame_end
last_frame = NoteStrip.frame_end
elif mode == 'FCURVE':
buildContinueActionV2(noteAction, action_object, AttackActionEnd, start_frame, end_frame)
action_end = AttackActionEnd + action_length
actionRelScale = get_relative_action_scale(AttackActionEnd, action_end, start_frame, end_frame)
release_start = get_frame_shift(actionRelScale, AttackActionEnd, action_end)
last_frame = release_start
NoteStrip.extrapolation = nla_extrap
NoteStrip.blend_type = nla_blend
NoteStrip.use_auto_blend = nla_autoblend
else:
curAction = bpy.data.actions[action_list['note']]
action_length = curAction.frame_range.y - curAction.frame_range.x
CurStrip = NLATrack.strips.new("noteStrip", AttackActionEnd+1, curAction)
if mode == 'NLA':
CurStrip.frame_start = AttackActionEnd
CurStrip.frame_end = AttackActionEnd + action_length
CurActionStart = CurStrip.frame_start
CurActionEnd = CurStrip.frame_end
actionRelScale = get_relative_action_scale(CurActionStart, CurActionEnd, start_frame, end_frame)
CurStrip.frame_end = get_frame_shift(actionRelScale, CurActionStart, CurActionEnd)
release_start = CurStrip.frame_end
last_frame = CurStrip.frame_end
CurStrip.extrapolation = nla_extrap
CurStrip.blend_type = nla_blend
CurStrip.use_auto_blend = nla_autoblend
elif mode == 'FCURVE':
buildContinueActionV2(curAction, action_object, AttackActionEnd, start_frame, end_frame)
action_end = AttackActionEnd + action_length
actionRelScale = get_relative_action_scale(AttackActionEnd, action_end, start_frame, end_frame)
release_start = get_frame_shift(actionRelScale, AttackActionEnd, action_end)
last_frame = release_start
if 'release' in action_list.keys():
curAction = bpy.data.actions[action_list['release']['action']]
curActionTime = action_list['release']['time']
action_length = curAction.frame_range.y - curAction.frame_range.x
if mode == 'NLA':
CurStrip = NLATrack.strips.new("releaseStrip", release_start+1, curAction)
CurStrip.frame_start = release_start
CurStrip.frame_end = release_start + action_length
CurActionStart = CurStrip.frame_start
CurActionEnd = CurStrip.frame_end
actionRelScale = get_relative_action_scale(CurActionStart, CurActionEnd, 0, curActionTime)
CurStrip.frame_end = get_frame_shift(actionRelScale,CurActionStart,CurActionEnd)
last_frame = CurStrip.frame_end
CurStrip.extrapolation = nla_extrap
CurStrip.blend_type = nla_blend
CurStrip.use_auto_blend = nla_autoblend
elif mode == 'FCURVE':
buildContinueActionV2(curAction, action_object, release_start, 0, curActionTime)
actionRelScale = get_relative_action_scale(curAction.frame_range.x, curAction.frame_range.y, 0, curActionTime)
last_frame = get_frame_shift(actionRelScale,curAction.frame_range.x,curAction.frame_range.y)
# return the last frame so we can do the kill action
return last_frame
def buildContinueActionV2(curAction, newobj, startFrame, noteStart, noteEnd, noRescale = False):
for i in curAction.fcurves:
rna_index_exists = False
for j in newobj.animation_data.action.fcurves:
if (j.data_path == i.data_path) and (j.array_index == i.array_index) and (j.group.name == i.group.name):
rna_index_exists = True
if rna_index_exists is False:
newfc = newobj.animation_data.action.fcurves.new(i.data_path, index=i.array_index, action_group=i.group.name)
else:
for j in newobj.animation_data.action.fcurves:
if (j.data_path == i.data_path) and (j.array_index == i.array_index) and (j.group.name == i.group.name):
newfc = j
# we need to figure out where the action officially starts to scale everything right
left_most_point = 100000.0
right_most_point = 0.0
for y in i.keyframe_points:
if y.co.x <= left_most_point:
left_most_point = y.co.x
if y.co.x >= right_most_point:
right_most_point = y.co.x
if noRescale:
actionRelScale = 1.0
else:
actionRelScale = get_relative_action_scale(left_most_point,right_most_point,noteStart,noteEnd)
fckp = len(newfc.keyframe_points) - 1
for x in i.keyframe_points:
if x.co.x == left_most_point:
curX = startFrame
new_co = [curX + x.co.x, x.co.y]
newfc.keyframe_points.add(1)
newfc.keyframe_points[fckp].co = new_co
newfc.keyframe_points[fckp].handle_left = [curX + x.handle_left.x, x.handle_left.y]
newfc.keyframe_points[fckp].handle_left_type = x.handle_left_type
newfc.keyframe_points[fckp].handle_right = [curX + x.handle_right.x, x.handle_right.y]
newfc.keyframe_points[fckp].handle_right_type = x.handle_right_type
newfc.keyframe_points[fckp].interpolation = x.interpolation
else:
curX = startFrame
new_co = [curX + get_frame_shift(actionRelScale,left_most_point,x.co.x),x.co.y]
newfc.keyframe_points.add(1)
newfc.keyframe_points[fckp].co = new_co
newfc.keyframe_points[fckp].handle_left = [curX + get_frame_shift(actionRelScale, left_most_point, x.handle_left.x), x.handle_left.y]
newfc.keyframe_points[fckp].handle_left_type = x.handle_left_type
newfc.keyframe_points[fckp].handle_right = [curX + get_frame_shift(actionRelScale, left_most_point, x.handle_right.x), x.handle_right.y]
newfc.keyframe_points[fckp].handle_right_type = x.handle_right_type
newfc.keyframe_points[fckp].interpolation = x.interpolation
fckp += 1
# END DEFS
|
gpl-3.0
| 3,051,098,367,651,859,000 | 52.941368 | 152 | 0.640519 | false |
muLAn-project/muLAn
|
muLAn/instruments.py
|
1
|
5376
|
# -*-coding:Utf-8 -*
# ====================================================================
# Packages
# ====================================================================
import configparser as cp
import copy
import glob
import muLAn
import muLAn.packages.general_tools as gtools
import muLAn.packages.algebra as algebra
import numpy as np
import os
import pandas as pd
import sys
import tables
class Instrument:
"""Class to store properties of each instrument
Args:
properties (list): list with shape (1, 2), where properties[0] is
the ID of the observatory (str), and properties[1] is a string
describing the properties of the observatory, as follow,
"Label, HTML color, Passband[=Gamma], Type, Location[, list of int".
`Label` is the name of the instrument; the color should be written
without the `#`, Gamma is the linear limb darkening coefficient
(default: 0.0), `Type` is `Magnitude` or `Flux` depending of the
input data files used, `Location` is a name referring to the
position of the instrument (a file `Location.dat` with JPL ephemeris
is required), and the optional list of int correspond to the data ID
to remove before the fit and the plots.
Examples for properties[1]:
OGLE-I, 000000, I=0.64, Magnitude, Earth
OGLE-V, 000000, V=0.5, Magnitude, Earth, 11, 60, 68, 73, 78, 121, 125, 128, 135
OGLE Passband I, 000000, I, Magnitude, Earth, 11, 60-68
In the third example, the data points from 60 to 68 (included) will
be removed. A file Earth.dat should be in the Data/ directory.
Attributes:
id (str): instrument ID.
label (str): instrument label.
color (str): HTML color with #.
passband (str): passband name.
gamma (float): linear limb-darkening coefficient Gamma.
type (str): wether the input data are in flux or magnitude units.
location (str): key word corresponding to the ephemeris file.
reject (:obj:`numpy.array`): array of the data ID to remove.
"""
def __init__(self, properties):
self._extract_properties(properties)
def _extract_properties(self, prop):
properties = dict()
properties['id'] = prop[0]
props = prop[1].split(',')
n_opts = len(props)
if n_opts > 4:
keywd = 'label color band type location'.split(' ')
for i in range(5):
properties.update({keywd[i]: props[i].strip()})
if n_opts > 5:
properties.update({'reject': props[5:]})
self.reject = self._rejection_list(properties['reject'])
else:
txt = 'Syntax error or not enough properties provided for an instrument.'
sys.exit(txt)
self.id = properties['id']
self.label = properties['label']
self.color = '#{:s}'.format(properties['color'])
band = self._extract_band(properties['band'])
self.passband = band[0]
self.gamma = band[1]
self.type = properties['type']
self.location = properties['location']
def _rejection_list(self, string):
string = [a.strip() for a in string]
nb = len(string)
to_reject = np.array([], dtype=np.int)
for i in range(nb):
substring = string[i].split('-')
if len(substring) == 1:
to_reject = np.append(to_reject, int(substring[0]))
elif len(substring) == 2:
a = int(substring[0].strip())
b = int(substring[1].strip())
n = b - a + 1
ids = np.linspace(a, b, n, dtype=np.int)
to_reject = np.append(to_reject, ids)
return to_reject
def _extract_band(self, string):
string = string.split('=')
string = [a.strip() for a in string]
if len(string) == 2:
return string[0].strip(), float(string[1])
else:
return string[0].strip(), 0.0
class InstrumentsList(Instrument):
"""Class to store a list of instruments (observatories).
Args:
input (str): file that defines instrument properties.
Attributes:
to_dict(dict): dictionary with keys corresponding to each instrument
ID, and values corresponding to a :obj:`muLAn.instruments.Instrument`
object.
"""
def __init__(self, input):
if isinstance(input, str):
self.file = input
self._load_from_file(input)
self._create_instruments_list()
def _load_from_file(self, fname):
cfgobs = cp.ConfigParser()
cfgobs.read(fname)
self.parser = cfgobs
def _create_instruments_list(self):
item = self.parser.items('ObservatoriesDetails')
n = len(item)
instruments_list = dict()
for i in range(n):
tmp = Instrument(list(item[i]))
instruments_list.update({tmp.id: tmp})
setattr(self, tmp.id, tmp)
self._instruments_list = instruments_list
self.len = n
def to_dict(self):
return self._instruments_list
def prop(self, val):
return self._instruments_list[val[0]]
if (__name__ == "__main__"):
pass
|
mit
| -1,610,295,176,745,053,400 | 31 | 91 | 0.564546 | false |
dokterbob/python-postnl-checkout
|
tests/test_django.py
|
1
|
13320
|
import datetime
import decimal
from django.test import TestCase
from django.core.cache import cache
from httmock import HTTMock
from django_dynamic_fixture import G, N
from postnl_checkout.contrib.django_postnl_checkout.models import Order
from .base import PostNLTestMixin
class OrderTests(PostNLTestMixin, TestCase):
""" Tests for Order model. """
maxDiff = None
def setUp(self):
super(OrderTests, self).setUp()
self.order_datum = datetime.datetime(
year=2011, month=7, day=21,
hour=20, minute=11, second=0
)
self.verzend_datum = datetime.datetime(
year=2011, month=7, day=22,
hour=20, minute=11, second=0
)
def test_save(self):
""" Test saving an Order model. """
instance = N(Order)
instance.clean()
instance.save()
def test_prepare_order(self):
""" Test prepare_order class method. """
# Setup mock response
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('prepare_order_request.xml')
)
return self.read_file('prepare_order_response.xml')
kwargs = {
'AangebodenBetaalMethoden': {
'PrepareOrderBetaalMethode': {
'Code': 'IDEAL',
'Prijs': '5.00'
}
},
'AangebodenCommunicatieOpties': {
'PrepareOrderCommunicatieOptie': {
'Code': 'NEWS'
}
},
# FIXME: the following is not submitted by SUDS
# Most probably because it is not properly defined in the WSDL
# Contact PostNL about this.
# 'AangebodenOpties': {
# 'PrepareOrderOptie': {
# 'Code': 'WRAP',
# 'Prijs': '2.50'
# }
# },
# 'AfleverOpties': {
# 'AfleverOptie': {
# 'Code': 'PG',
# 'Kosten': '0.00',
# 'Toegestaan': True
# }
# },
'Consument': {
'ExtRef': 'test@e-id.nl'
},
'Contact': {
'Url': 'http://www.kadowereld.nl/url/contact'
},
'Order': {
'ExtRef': '1105_900',
'OrderDatum': self.order_datum,
'Subtotaal': '125.00',
'VerzendDatum': self.verzend_datum,
'VerzendKosten': '12.50'
},
'Retour': {
'BeschrijvingUrl': 'http://www.kadowereld.nl/url/beschrijving',
'PolicyUrl': 'http://www.kadowereld.nl/url/policy',
'RetourTermijn': 28,
'StartProcesUrl': 'http://www.kadowereld.nl/url/startproces'
},
'Service': {
'Url': 'http://www.kadowereld.nl/url/service'
}
}
# Execute API call
with HTTMock(response):
instance = Order.prepare_order(**kwargs)
# Assert model field values
self.assertTrue(instance.pk)
self.assertEquals(
instance.order_token, '0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
self.assertEquals(
instance.order_ext_ref, '1105_900'
)
self.assertEquals(
instance.order_date, self.order_datum
)
# Assert JSON values
self.assertEquals(instance.prepare_order_request, kwargs)
self.assertEquals(instance.prepare_order_response, {
'Checkout': {
'OrderToken': '0cfb4be2-47cf-4eac-865c-d66657953d5c',
'Url': (
'http://tpppm-test.e-id.nl/Orders/OrderCheckout'
'?token=0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
},
'Webshop': {
'IntRef': 'a0713e4083a049a996c302f48bb3f535'
}
})
def test_read_order(self):
""" Test read_order method. """
# Setup mock response
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('read_order_request.xml')
)
return self.read_file('read_order_response.xml')
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
# Read order data
with HTTMock(response):
new_instance = instance.read_order()
response_data = new_instance.read_order_response
self.assertTrue(response_data)
self.assertEquals(response_data, {
'Voorkeuren': {
'Bezorging': {
'Tijdvak': {
'Start': u'10:30',
'Eind': u'08:30'
},
'Datum': datetime.datetime(2012, 4, 26, 0, 0)
}
},
'Consument': {
'GeboorteDatum': datetime.datetime(1977, 6, 15, 0, 0),
'ExtRef': u'jjansen',
'TelefoonNummer': u'06-12345678',
'Email': u'j.jansen@e-id.nl'
},
'Facturatie': {
'Adres': {
'Huisnummer': u'1',
'Initialen': u'J',
'Geslacht': u'Meneer',
'Deurcode': None,
'Gebruik': u'P',
'Gebouw': None,
'Verdieping': None,
'Achternaam': u'Jansen',
'Afdeling': None,
'Regio': None,
'Land': u'NL',
'Wijk': None,
'Postcode': u'4131LV',
'Straat': 'Lage Biezenweg',
'Bedrijf': None,
'Plaats': u'Vianen',
'Tussenvoegsel': None,
'Voornaam': u'Jan',
'HuisnummerExt': None
}
},
'Webshop': {
'IntRef': u'a0713e4083a049a996c302f48bb3f535'
},
'CommunicatieOpties': {
'ReadOrderResponseCommunicatieOptie': [
{
'Text': u'Do not deliver to neighbours',
'Code': u'REMARK'
}
]
},
'Bezorging': {
'ServicePunt': {
'Huisnummer': None,
'Initialen': None,
'Geslacht': None,
'Deurcode': None,
'Gebruik': None,
'Gebouw': None,
'Verdieping': None,
'Achternaam': None,
'Afdeling': None,
'Regio': None,
'Land': None,
'Wijk': None,
'Postcode': None,
'Straat': None,
'Bedrijf': None,
'Plaats': None,
'Tussenvoegsel': None,
'Voornaam': None,
'HuisnummerExt': None
},
'Geadresseerde': {
'Huisnummer': u'1',
'Initialen': u'J',
'Geslacht': u'Meneer',
'Deurcode': None,
'Gebruik': u'Z',
'Gebouw': None,
'Verdieping': None,
'Achternaam': u'Janssen',
'Afdeling': None,
'Regio': None,
'Land': u'NL',
'Wijk': None,
'Postcode': u'4131LV',
'Straat': u'Lage Biezenweg ',
'Bedrijf': u'E-ID',
'Plaats': u'Vianen',
'Tussenvoegsel': None,
'Voornaam': u'Jan',
'HuisnummerExt': None
}
},
'Opties': {
'ReadOrderResponseOpties': [
{
'Text': u'Congratulat ions with your new foobar!',
'Code': u'CARD',
'Prijs': decimal.Decimal('2.00')
}
]
},
'Order': {
'ExtRef': u'15200_001'
},
'BetaalMethode': {
'Optie': u'0021',
'Code': u'IDEAL',
'Prijs': decimal.Decimal('0.00')
}
})
def test_confirm_order(self):
""" Test confirm_order """
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('confirm_order_request.xml')
)
return self.read_file('confirm_order_response.xml')
kwargs = {
'Order': {
'PaymentTotal': decimal.Decimal('183.25')
}
}
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c',
order_ext_ref='1105_900'
)
# Execute API call
with HTTMock(response):
instance.confirm_order(**kwargs)
def test_update_order(self):
""" Test update_order """
def response_success(url, request):
self.assertXMLEqual(
request.body, self.read_file('update_order_request.xml')
)
return self.read_file('update_order_response_success.xml')
def response_fail(url, request):
self.assertXMLEqual(
request.body, self.read_file('update_order_request.xml')
)
return self.read_file('update_order_response_fail.xml')
kwargs = {
'Order': {
'ExtRef': 'FDK004',
'Zending': {
'UpdateOrderOrderZending': {
'Busstuk': {
'UpdateOrderOrderZendingBusstuk': {
'Verzonden': '23-08-2011 12:00:00'
}
},
'ExtRef': '642be996-6ab3-4a4c-b7d6-2417a4cee0df',
'Pakket': {
'UpdateOrderOrderZendingPakket': {
'Barcode': '3s123456789',
'Postcode': '4131LV'
}
}
}
}
}
}
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c',
order_ext_ref='1105_900'
)
# Make call fail
with HTTMock(response_fail):
self.assertRaises(
Exception, lambda: instance.update_order(**kwargs)
)
# Make call pass
with HTTMock(response_success):
response = instance.update_order(**kwargs)
self.assertTrue(response)
# Make sure the requested stuff is saved
self.assertEquals(
instance.update_order_request, {
'Checkout': {
'OrderToken': '0cfb4be2-47cf-4eac-865c-d66657953d5c'
},
'Order': {
'ExtRef': 'FDK004',
'Zending': {
'UpdateOrderOrderZending': {
'Busstuk': {
'UpdateOrderOrderZendingBusstuk': {
'Verzonden': '23-08-2011 12:00:00'
}
},
'ExtRef': '642be996-6ab3-4a4c-b7d6-2417a4cee0df',
'Pakket': {
'UpdateOrderOrderZendingPakket': {
'Barcode': '3s123456789',
'Postcode': '4131LV'
}
}
}
}
}
}
)
def test_ping_status(self):
""" Test ping_status """
instance = G(Order)
self.response_called = 0
def ok_response(url, request):
# Assert
self.assertXMLEqual(
request.body,
self.read_file('ping_status_request.xml')
)
self.response_called += 1
return self.read_file('ping_status_response_ok.xml')
def nok_response(url, request):
return self.read_file('ping_status_response_nok.xml')
with HTTMock(ok_response):
self.assertEquals(instance.ping_status(), True)
self.assertEquals(self.response_called, 1)
# Repeated call should not cause the response to be called
with HTTMock(ok_response):
self.assertEquals(instance.ping_status(), True)
self.assertEquals(self.response_called, 1)
# Clear cache
cache.clear()
with HTTMock(nok_response):
self.assertEquals(instance.ping_status(), False)
|
agpl-3.0
| -5,406,795,193,126,462,000 | 30.866029 | 79 | 0.426652 | false |
ccook5/open-av-controller
|
testing/cli.py
|
1
|
4678
|
import argparse
from ConfigParser import (
NoSectionError,
SafeConfigParser as ConfigParser
)
from getpass import getpass
from os import path
from socket import socket
import sys
import appdirs
from pjlink import Projector
from pjlink import projector
from pjlink.cliutils import make_command
def cmd_power(p, state=None):
if state is None:
print p.get_power()
else:
p.set_power(state)
def cmd_input(p, source, number):
if source is None:
source, number = p.get_input()
print source, number
else:
p.set_input(source, number)
def cmd_inputs(p):
for source, number in p.get_inputs():
print '%s-%s' % (source, number)
def cmd_mute_state(p):
video, audio = p.get_mute()
print 'video:', 'muted' if video else 'unmuted'
print 'audio:', 'muted' if audio else 'unmuted'
def cmd_mute(p, what):
if what is None:
return cmd_mute_state(p)
what = {
'video': projector.MUTE_VIDEO,
'audio': projector.MUTE_AUDIO,
'all': projector.MUTE_VIDEO | projector.MUTE_AUDIO,
}[what]
p.set_mute(what, True)
def cmd_unmute(p, what):
if what is None:
return cmd_mute_state(p)
what = {
'video': projector.MUTE_VIDEO,
'audio': projector.MUTE_AUDIO,
'all': projector.MUTE_VIDEO | projector.MUTE_AUDIO,
}[what]
p.set_mute(what, False)
def cmd_info(p):
info = [
('Name', p.get_name().encode('utf-8')),
('Manufacturer', p.get_manufacturer()),
('Product Name', p.get_product_name()),
('Other Info', p.get_other_info())
]
for key, value in info:
print '%s: %s' % (key, value)
def cmd_lamps(p):
for i, (time, state) in enumerate(p.get_lamps(), 1):
print 'Lamp %d: %s (%d hours)' % (
i,
'on' if state else 'off',
time,
)
def cmd_errors(p):
for what, state in p.get_errors().items():
print '%s: %s' % (what, state)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--projector')
sub = parser.add_subparsers(title='command')
power = make_command(sub, 'power', cmd_power)
power.add_argument('state', nargs='?', choices=('on', 'off'))
inpt = make_command(sub, 'input', cmd_input)
inpt.add_argument('source', nargs='?', choices=projector.SOURCE_TYPES)
inpt.add_argument('number', nargs='?', choices='123456789', default='1')
make_command(sub, 'inputs', cmd_inputs)
mute = make_command(sub, 'mute', cmd_mute)
mute.add_argument('what', nargs='?', choices=('video', 'audio', 'all'))
unmute = make_command(sub, 'unmute', cmd_unmute)
unmute.add_argument('what', nargs='?', choices=('video', 'audio', 'all'))
make_command(sub, 'info', cmd_info)
make_command(sub, 'lamps', cmd_lamps)
make_command(sub, 'errors', cmd_errors)
return parser
def resolve_projector(projector):
password = None
# host:port
if projector is not None and ':' in projector:
host, port = projector.rsplit(':', 1)
port = int(port)
# maybe defined in config
else:
appdir = appdirs.user_data_dir('pjlink')
conf_file = path.join(appdir, 'pjlink.conf')
try:
config = ConfigParser({'port': '4352', 'password': ''})
with open(conf_file, 'r') as f:
config.readfp(f)
section = projector
if projector is None:
section = 'default'
host = config.get(section, 'host')
port = config.getint(section, 'port')
password = config.get(section, 'password') or None
except (NoSectionError, IOError):
if projector is None:
raise KeyError('No default projector defined in %s' % conf_file)
# no config file, or no projector defined for this host
# thus, treat the projector as a hostname w/o port
host = projector
port = 4352
return host, port, password
def main():
parser = make_parser()
args = parser.parse_args()
kwargs = dict(args._get_kwargs())
func = kwargs.pop('__func__')
projector = kwargs.pop('projector')
host, port, password = resolve_projector(projector)
sock = socket()
sock.connect((host, port))
f = sock.makefile()
if password:
get_password = lambda: password
else:
get_password = getpass
proj = Projector(f)
rv = proj.authenticate(get_password)
if rv is False:
print>>sys.stderr, 'Incorrect password.'
return
func(proj, **kwargs)
if __name__ == '__main__':
main()
|
gpl-2.0
| -4,972,172,547,642,636,000 | 26.040462 | 80 | 0.588072 | false |
FeiSun/BERT4Rec
|
util.py
|
1
|
1031
|
from __future__ import print_function
import sys
import copy
import random
import numpy as np
from collections import defaultdict
def data_partition(fname):
usernum = 0
itemnum = 0
User = defaultdict(list)
user_train = {}
user_valid = {}
user_test = {}
# assume user/item index starting from 1
f = open(fname, 'r')
for line in f:
u, i = line.rstrip().split(' ')
u = int(u)
i = int(i)
usernum = max(u, usernum)
itemnum = max(i, itemnum)
User[u].append(i)
for user in User:
nfeedback = len(User[user])
if nfeedback < 3:
user_train[user] = User[user]
user_valid[user] = []
user_test[user] = []
else:
user_train[user] = User[user][:-2]
user_valid[user] = []
user_valid[user].append(User[user][-2])
user_test[user] = []
user_test[user].append(User[user][-1])
return [user_train, user_valid, user_test, usernum, itemnum]
|
apache-2.0
| 9,132,715,667,523,385,000 | 25.435897 | 64 | 0.543162 | false |
AutorestCI/azure-sdk-for-python
|
azure-batch/azure/batch/models/job_get_options.py
|
1
|
3278
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, select=None, expand=None, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
mit
| 4,880,764,553,219,390,000 | 47.925373 | 217 | 0.687614 | false |
lowdev/alfred
|
robot/stt/watson/stt_watson/SttWatson.py
|
1
|
1865
|
import os
import signal
import threading
from ..config.Config import Config
from ..recording.Record import Record
from ..stt_watson.SttWatsonAbstractListener import SttWatsonAbstractListener
from ..utils.SignalHandler import SignalHandler
from ..watson_client.Client import Client
class SttWatson:
default_config = {
'audio-chunk': 8000,
'audio-rate': 44100,
'channels': 1,
'watson-stt': {
'user': None,
'password': None,
'model': 'en-US_BroadbandModel',
'tokenauth': None,
}
}
def __init__(self, config):
config['audio-chunk'] = 8000
config['audio-rate'] = 44100
config['channels'] = 1
self.listeners = []
self.stopper = threading.Event()
self.record = Record(config, self.stopper)
self.workers = [self.record]
self.watsonClient = Client(config)
self.handler = SignalHandler(self.stopper, self.workers)
signal.signal(signal.SIGINT, self.handler)
def addListener(self, listener):
if not isinstance(listener, SttWatsonAbstractListener):
raise Exception("Listener added is not a derived class from SttWatsonAbstractListener")
self.listeners.append(listener)
def pauseRecord(self):
self.record.pauseRecord()
def continuRecord(self):
self.record.continuRecord()
def setListeners(self, listeners):
if listeners is not list:
listeners = [listeners]
for listener in listeners:
self.addListener(listener)
def getListeners(self):
return self.listeners
def run(self):
audioFd, writer = os.pipe()
self.record.setWriter(writer)
self.record.start()
self.watsonClient.setListeners(self.listeners)
self.watsonClient.startStt(audioFd)
|
gpl-3.0
| -7,645,826,309,077,861,000 | 29.080645 | 99 | 0.636461 | false |
Williams224/davinci-scripts
|
lbpketa/DNTupleMaker.py
|
1
|
15414
|
from Gaudi.Configuration import *
from Configurables import DaVinci
from Configurables import GaudiSequencer
simulation=False
stream='Bhadron'
line='StrippingB2XEtaLb2pKeta3piLine'
from Configurables import DecayTreeTuple
from DecayTreeTuple.Configuration import *
tuple=DecayTreeTuple()
tuple.Decay="[Lambda_b0 -> ^p+ ^K- ^(eta -> ^pi+ ^pi- ^(pi0 -> ^gamma ^gamma))]CC"
tuple.Branches={"Lambda_b0":"[Lambda_b0 -> p+ K- (eta -> pi+ pi- (pi0 -> gamma gamma))]CC"}
tuple.Inputs=['Phys/{0}/Particles'.format(line)]
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
#, "TupleToolMCTruth"
#, "TupleToolMCBackgroundInfo"
, "TupleToolCaloHypo"
, "TupleToolRecoStats"
#, "TupleToolTrackIsolation"
]
tuple.addTool(TupleToolDecay,name="Lambda_b0")
from Configurables import TupleToolDecayTreeFitter
#==============================REFIT WITH ETA, PI0 AND PV CONTRAINED==============================
tuple.Lambda_b0.addTupleTool('TupleToolDecayTreeFitter/PVFitpf')
tuple.Lambda_b0.PVFitpf.Verbose=True
tuple.Lambda_b0.PVFitpf.constrainToOriginVertex=True
tuple.Lambda_b0.PVFitpf.daughtersToConstrain = ["eta","p+","K-","pi0"]
#==============================REFIT WITH ONLY ETA AND PV CONSTRAINED==========================
tuple.Lambda_b0.addTupleTool('TupleToolDecayTreeFitter/PVFit')
tuple.Lambda_b0.PVFit.Verbose=True
tuple.Lambda_b0.PVFit.constrainToOriginVertex=True
tuple.Lambda_b0.PVFit.daughtersToConstrain = ["eta","p+","K-"]
#==============================REFIT WITH ONLY PV CONTRAINED==============================
tuple.Lambda_b0.addTupleTool('TupleToolDecayTreeFitter/PVOnly')
tuple.Lambda_b0.PVOnly.Verbose=True
tuple.Lambda_b0.PVOnly.constrainToOriginVertex=True
#========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================
tuple.Lambda_b0.addTupleTool('TupleToolDecayTreeFitter/NoPVFit')
tuple.Lambda_b0.NoPVFit.Verbose=True
tuple.Lambda_b0.NoPVFit.constrainToOriginVertex=False
tuple.Lambda_b0.NoPVFit.daughtersToConstrain = ["eta","p+","K-"]
#==============================REFIT WITH ETA AND PV K for piCONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforpi')
tuple.B0.PVFitKforpi.Verbose=True
tuple.B0.PVFitKforpi.constrainToOriginVertex=True
tuple.B0.PVFitKforpi.daughtersToConstrain = ["eta"]
tuple.B0.PVFitKforpi.Substitutions={
"B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "pi+" ,
"B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "pi-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ==============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforK')
tuple.B0.PVFitpiminusforK.Verbose=True
tuple.B0.PVFitpiminusforK.constrainToOriginVertex=True
tuple.B0.PVFitpiminusforK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminusforK.Substitutions={
"B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "K-" ,
"B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "K+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap =============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forK')
tuple.B0.PVFitpiminus0forK.Verbose=True
tuple.B0.PVFitpiminus0forK.constrainToOriginVertex=True
tuple.B0.PVFitpiminus0forK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminus0forK.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "K-" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "K+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforK')
tuple.B0.PVFitpiplusforK.Verbose=True
tuple.B0.PVFitpiplusforK.constrainToOriginVertex=True
tuple.B0.PVFitpiplusforK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiplusforK.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "K+" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "K-" ,
}
#proton swaps
#==============================REFIT WITH ETA AND PV K for proton CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforproton')
tuple.B0.PVFitKforproton.Verbose=True
tuple.B0.PVFitKforproton.constrainToOriginVertex=True
tuple.B0.PVFitKforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitKforproton.Substitutions={
"B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p+" ,
"B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p~-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ==============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforproton')
tuple.B0.PVFitpiminusforproton.Verbose=True
tuple.B0.PVFitpiminusforproton.constrainToOriginVertex=True
tuple.B0.PVFitpiminusforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminusforproton.Substitutions={
"B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p~-" ,
"B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap =============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forproton')
tuple.B0.PVFitpiminus0forproton.Verbose=True
tuple.B0.PVFitpiminus0forproton.constrainToOriginVertex=True
tuple.B0.PVFitpiminus0forproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminus0forproton.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "p~-" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "p+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforproton')
tuple.B0.PVFitpiplusforproton.Verbose=True
tuple.B0.PVFitpiplusforproton.constrainToOriginVertex=True
tuple.B0.PVFitpiplusforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiplusforproton.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "p+" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "p~-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitgammaforpi0')
tuple.B0.PVFitgammaforpi0.Verbose=True
tuple.B0.PVFitgammaforpi0.constrainToOriginVertex=True
tuple.B0.PVFitgammaforpi0.daughtersToConstrain = ["eta"]
tuple.B0.PVFitgammaforpi0.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> ^gamma gamma))" : "pi0" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ pi- (pi0 -> ^gamma gamma))" : "pi0" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitgamma0forpi0')
tuple.B0.PVFitgamma0forpi0.Verbose=True
tuple.B0.PVFitgamma0forpi0.constrainToOriginVertex=True
tuple.B0.PVFitgamma0forpi0.daughtersToConstrain = ["eta"]
tuple.B0.PVFitgamma0forpi0.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma ^gamma))" : "pi0" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ pi- (pi0 -> gamma ^gamma))" : "pi0" ,
}
#==============================REFIT WITH ONLY K* CONSTRAINED===================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/KStarOnly')
tuple.B0.KStarOnly.Verbose=True
tuple.B0.KStarOnly.constrainToOriginVertex=True
tuple.B0.KStarOnly.daughtersToConstrain = ["K*(892)0"]
#==============================REFIT WITH ONLY PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVOnly')
tuple.B0.PVOnly.Verbose=True
tuple.B0.PVOnly.constrainToOriginVertex=True
#========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Conskstar_eta')
tuple.B0.Conskstar_eta.Verbose=True
tuple.B0.Conskstar_eta.constrainToOriginVertex=False
tuple.B0.Conskstar_eta.daughtersToConstrain = ["K*(892)0","eta"]
#========================================REFIT WITH NOTHING CONSTRAINED========================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Consnothing')
tuple.B0.Consnothing.Verbose=True
tuple.B0.Consnothing.constrainToOriginVertex=False
########################################=LOKI FUNCOR VARIABLES===============================================
tuple.addBranches({'Kstar' : '[B0 -> ^(K*(892)0 -> K+ pi-) (eta-> pi- pi+ (pi0 -> gamma gamma))]CC',
'eta' : '[B0 -> (K*(892)0 -> K+ pi-) ^(eta-> pi- pi+ (pi0 -> gamma gamma))]CC',
'Kplus' : '[B0 -> (K*(892)0 -> ^K+ pi-) (eta-> pi- pi+ (pi0 -> gamma gamma))]CC',
'piminus' : '[B0 -> (K*(892)0 -> K+ ^pi-) (eta-> pi- pi+ (pi0 -> gamma gamma))]CC',
'piplus' : '[B0 -> (K*(892)0 -> K+ pi-) (eta-> pi- ^pi+ (pi0 -> gamma gamma))]CC',
'piminus0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta-> ^pi- pi+ (pi0 -> gamma gamma))]CC',
'gamma' : '[B0 -> (K*(892)0 -> K+ pi-) (eta-> pi- pi+ (pi0 -> ^gamma gamma))]CC',
'gamma0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta-> pi- pi+ (pi0 -> gamma ^gamma))]CC',
'pi0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta-> pi- pi+ ^(pi0 -> gamma gamma))]CC'})
from LoKiPhys.decorators import MAXTREE,MINTREE,ISBASIC,HASTRACK,SUMTREE,PT,ABSID,NINTREE,ETA,TRPCHI2
B0_hybrid=tuple.B0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_B0')
Kstar_hybrid=tuple.Kstar.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kstar')
eta_hybrid=tuple.eta.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_eta')
Kplus_hybrid=tuple.Kplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kplus')
piminus_hybrid=tuple.piminus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus')
piplus_hybrid=tuple.piplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piplus')
piminus0_hybrid=tuple.piminus0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus0')
gamma_hybrid=tuple.gamma.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma')
gamma0_hybrid=tuple.gamma0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma0')
pi0_hybrid=tuple.pi0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_pi0')
preamble=[
'TRACK_MAX_PT= MAXTREE(PT, ISBASIC & HASTRACK, -666)',
'TRACK_MIN_PT= MINTREE(PT, ISBASIC & HASTRACK)',
'SUMTRACK_PT= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),PT)',
'SUM_PCHI2= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),TRPCHI2)'
]
B0_hybrid.Preambulo=preamble
B0_hybrid.Variables = {
'max_pt_track' : 'TRACK_MAX_PT',
'min_pt_track' : 'TRACK_MIN_PT',
'sum_track_pt' : 'SUMTRACK_PT',
'sum_pchi2' : 'SUM_PCHI2',
'n_highpt_tracks' : 'NINTREE(ISBASIC & HASTRACK & (PT>250.0*MeV))',
'eta':'ETA'
}
Kstar_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
eta_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
Kplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus_hybrid.Variables ={
'eta': 'ETA'
}
piplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus0_hybrid.Variables ={
'eta': 'ETA'
}
gamma_hybrid.Variables = {
'eta':'ETA'
}
gamma0_hybrid.Variables= {
'eta':'ETA'
}
pi0_hybrid.Variables={
'eta':'ETA'
}
#==============================MassSubs=====================================
from Configurables import TupleToolSubMass
tuple.B0.addTool(TupleToolSubMass)
tuple.B0.ToolList += ["TupleToolSubMass"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => K-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => pi+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => K+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => p~-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => pi0"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e-"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => mu-"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => mu+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi0 => eta"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["K+/pi- => pi+/K-"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => pi-/pi+"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => mu+/mu-"]
#==============================TRIGGER DECISIONS==============================
from Configurables import TupleToolTISTOS
tistos = tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS")
tistos.VerboseL0=True
tistos.VerboseHlt1=True
tistos.VerboseHlt2=True
tistos.TriggerList=["L0PhotonDecision",
"L0ElectronDecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackAllL0Decision",
"Hlt1TrackMuonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision",
"Hlt1SingleElectronNoIPDecision",
"L0HadronDecision",
"L0LocalPi0Decision",
"L0GlobalPi0Decision",
"L0MuonDecision",
"Hlt2Topo2BodyBBDTDecision",
"Hlt2Topo3BodyBBDTDecision",
"Hlt2Topo4BodyBBDTDecision",
"Hlt2RadiativeTopoTrackTOSDecision",
"Hlt2RadiativeTopoPhotonL0Decision",
"Hlt2TopoRad2BodyBBDTDecision",
"Hlt2TopoRad2plus1BodyBBDTDecision",
"Hlt2Topo2BodySimpleDecision",
"Hlt2Topo3BodySimpleDecision",
"Hlt2Topo4BodySimpleDecision"]
from Configurables import TupleToolL0Calo
tuple.Kplus.addTool(TupleToolL0Calo,name="KplusL0Calo")
tuple.Kplus.ToolList += ["TupleToolL0Calo/KplusL0Calo"]
tuple.Kplus.KplusL0Calo.WhichCalo="HCAL"
tuple.piplus.addTool(TupleToolL0Calo,name="piplusL0Calo")
tuple.piplus.ToolList += ["TupleToolL0Calo/piplusL0Calo"]
tuple.piplus.piplusL0Calo.WhichCalo="HCAL"
tuple.piminus.addTool(TupleToolL0Calo,name="piminusL0Calo")
tuple.piminus.ToolList += ["TupleToolL0Calo/piminusL0Calo"]
tuple.piminus.piminusL0Calo.WhichCalo="HCAL"
tuple.piminus0.addTool(TupleToolL0Calo,name="piminus0L0Calo")
tuple.piminus0.ToolList += ["TupleToolL0Calo/piminus0L0Calo"]
tuple.piminus0.piminus0L0Calo.WhichCalo="HCAL"
DaVinci().InputType='MDST'
DaVinci().RootInTES='/Event/{0}'.format(stream)
DaVinci().UserAlgorithms+=[tuple]
DaVinci().TupleFile="Output.root"
DaVinci().HistogramFile="histos.root"
DaVinci().DataType='2012'
DaVinci().EvtMax=-1
DaVinci().Lumi=True
DaVinci().PrintFreq=1000
DaVinci().MoniSequence=[tuple]
DaVinci().Simulation=False
#from GaudiConf import IOHelper
# Use the local input data
#IOHelper().inputFiles([
# './00041836_00000057_1.bhadron.mdst'
# ], clear=True)
|
mit
| 3,344,568,689,875,149,000 | 42.789773 | 111 | 0.627222 | false |
torchbox/colourlens
|
colourlens/views.py
|
1
|
5200
|
import urllib
from django.db.models import Avg, Sum, Count
from django import forms
from django.forms.widgets import Input
from django.shortcuts import render
from colourlens.models import Artwork, Colour
PROM_ATTRS = {'min': '0', 'max': '100', 'step': '5'}
DIST_ATTRS = {'min': '0', 'max': '50', 'step': '1'}
class RangeInput(Input):
input_type = "range"
class ColourForm(forms.Form):
def __init__(self, *args, **kwargs):
"""
Add classes to denote type of input
"""
super(ColourForm, self).__init__(*args, **kwargs)
for k, v in self.fields.iteritems():
if v.widget.__class__ == forms.CheckboxInput:
v.widget.attrs['class'] = 'colourbox'
elif v.widget.__class__ == RangeInput:
v.widget.attrs['class'] = 'slider'
black = forms.BooleanField(required=False)
grey = forms.BooleanField(required=False)
silver = forms.BooleanField(required=False)
white = forms.BooleanField(required=False)
red = forms.BooleanField(required=False)
maroon = forms.BooleanField(required=False)
brown = forms.BooleanField(required=False)
orange = forms.BooleanField(required=False)
yellow = forms.BooleanField(required=False)
lime = forms.BooleanField(required=False)
green = forms.BooleanField(required=False)
olive = forms.BooleanField(required=False)
cyan = forms.BooleanField(required=False)
teal = forms.BooleanField(required=False)
blue = forms.BooleanField(required=False)
navy = forms.BooleanField(required=False)
magenta = forms.BooleanField(required=False)
purple = forms.BooleanField(required=False)
prominence = forms.IntegerField(label="Increase colour area",
widget=RangeInput(attrs=PROM_ATTRS))
distance = forms.IntegerField(label="Broaden palette",
widget=RangeInput(attrs=DIST_ATTRS))
submitted = forms.CharField(widget=forms.HiddenInput())
class ColourChoice(object):
def __init__(self, selected_colours, colour):
self.selected_colours = selected_colours
self.colour = colour
@property
def artwork_count(self):
return self.colour.artwork__count
@property
def hex_value(self):
return self.colour.hex_value
@property
def query_string(self):
# Get new set of selected colours
if self.colour.hex_value in self.selected_colours:
new_selected_colours = self.selected_colours.difference(set([self.colour.hex_value]))
else:
new_selected_colours = self.selected_colours.union(set([self.colour.hex_value]))
if new_selected_colours:
return urllib.urlencode([
('colour', colour)
for colour in new_selected_colours
])
def index(request, institution=False):
"""
Search and browse colours
"""
DISTANCE = 20
artworks = Artwork.objects.select_related().all()
colours = Colour.objects.all()
req_colours = request.GET.getlist('colour', [])
startyear = request.GET.get('startyear', None)
endyear = request.GET.get('endyear', None)
colour_filters = {}
if startyear:
artworks = artworks.filter(year__gte=startyear)
colour_filters['artwork__year__gte'] = startyear
if endyear:
artworks = artworks.filter(year__lte=endyear)
colour_filters['artwork__year__lte'] = endyear
for hex_value in req_colours:
artworks = artworks.filter(
colours__hex_value=hex_value,
colourdistance__distance__lte=DISTANCE,
)
if institution:
artworks = artworks.filter(institution=institution)
colour_filters['artwork__institution'] = institution
artworks = artworks.annotate(
ave_distance=Avg('colourdistance__distance'),
ave_presence=Avg('colourdistance__presence'),
tot_presence=Sum('colourdistance__presence'),
tot_prominence=Sum('colourdistance__prominence'),
ave_prominence=Avg('colourdistance__prominence'),
)
artworks = artworks.order_by('-tot_presence').distinct()
artworks_count = artworks.count()
if req_colours:
colour_filters['artwork__id__in'] = [a.id for a in artworks[:990]]
colour_filters['colourdistance__distance__lte'] = DISTANCE
colours = colours.filter(**colour_filters)
colours = colours.annotate(Count('artwork', distinct=True)).order_by('hue')
colour_count = colours.count()
colour_width = 99.4 / colour_count
if colour_count > 0:
total_palette = sum(c.artwork__count for c in colours)
else:
total_palette = 0
institutions = Artwork.objects.all().values('institution').distinct()
return render(request, 'colour.html', {
'artworks': artworks[:40],
'colours': [ColourChoice(set(req_colours), colour) for colour in colours],
'colour_count': colour_count,
'colour_width': colour_width,
'total_palette': total_palette,
'found': artworks_count,
'institution': institution,
'institutions': institutions,
'req_colours': req_colours,
})
|
mit
| 9,175,406,315,224,728,000 | 33.210526 | 97 | 0.641731 | false |
bovenyan/hsa
|
hsa-python/net_plumbing/net_plumber.py
|
1
|
22912
|
'''
Created on Jun 25, 2012
@author: Peyman Kazemian
'''
from headerspace.hs import headerspace
from net_plumbing.net_plumber_nodes import RuleNode, SourceNode, SinkNode,\
ProbeNode,SourceReachabilityProbeNode
from net_plumbing.net_plumber_process import *
from multiprocessing import cpu_count, Event, JoinableQueue as jQueue
from utils.wildcard import wildcard_intersect
NUM_THREADS = 1#2*cpu_count() - 1
class NetPlumber(object):
'''
This class maintains a live, up-to-date view of network and the interaction
between rules installed in the network.
'''
def __init__(self, header_length):
'''
Constructor
'''
# length: L parameter in HSA
self.length = header_length
# topology: a dictionary from sourePort (string) to a list of destination
# ports (to allow one to many links)
self.topology = {}
self.inv_topology = {}
# Information about rules and tables in this network
# * tables: a dictionary mapping the table names to rules in the table
# in an ordered list
# * node_by_id: a dictionary mapping node_id to rules
self.tables = {}
self.sources = {}
self.probes = {}
self.node_by_id = {}
self.last_id_used_for_table = {}
# inport_to_node: input-port to node dictionary.
self.inport_to_node = {}
# outport_to_node: output-port to node dictionary map.
self.outport_to_node = {}
# global collector of all source probe node results
self.source_probes_result = {}
def get_source_probe_state(self,name=None):
if (name == None):
return self.source_probes_result
elif name in self.source_probes_result.keys():
return self.source_probes_result[name]
else:
return None
def add_link(self, sPort, dPort):
'''
adds a link to the topology of network from sPort to dPort.
'''
if "%d" % sPort in self.topology.keys():
self.topology["%d" % sPort].append(dPort)
else:
self.topology["%d" % sPort] = [dPort]
if "%d" % dPort in self.inv_topology.keys():
self.inv_topology["%d" % dPort].append(sPort)
else:
self.inv_topology["%d" % dPort] = [sPort]
self.__update_plumber_for_new_link(sPort, dPort)
def remove_link(self, sPort, dPort):
'''
removes the link between sPort and dPort (unidirectional), if exist.
'''
if sPort in self.topology.keys():
if dPort in self.topology["%d" % sPort]:
self.topology["%d" % sPort].remove(dPort)
if dPort in self.inv_topology.keys():
if sPort in self.inv_topology["%d" % dPort]:
self.inv_topology["%d" % dPort].remove(sPort)
self.__update_plumber_for_removed_link(sPort, dPort)
def get_dst_end_of_link(self, port):
'''
returns the list of port numbers at the dst end of link
'''
if "%d" % port not in self.topology.keys():
return []
else:
return self.topology["%d" % port]
def get_src_end_of_link(self, port):
'''
returns the list of port numbers at the src end of link
'''
if "%d" % port not in self.inv_topology.keys():
return []
else:
return self.inv_topology["%d" % port]
def get_topology(self):
'''
for debuging.
returns a list of (sPort,[dPort]) pairs.
'''
results = []
for sPort in self.topology.keys():
results.append((int(sPort), self.topology[sPort]))
return results
def add_source(self, name, hs, ports):
'''
adds a source node named @name generating flow of @hs at @ports.
@hs: headerspace() object.
@ports: list of port numbers
'''
if name in self.node_by_id.keys():
return False
s = SourceNode(name, hs, ports, hs.length)
# set up outport_to_node pointers
for port in ports:
if str(port) not in self.outport_to_node.keys():
self.outport_to_node[str(port)] = []
self.outport_to_node[str(port)].append(s)
self.node_by_id[name] = s
self.sources[name] = s
#set up pipeline dependencies
self.__set_pipeline_dependencies(s)
#route source flow
self.__route_source_flow(s)
# TODO: route sink flow
def remove_source(self,name):
'''
remove source node named @name from the network
'''
if name not in self.sources.keys():
return False
s = self.node_by_id[name]
#clear pipelines
self.__remove_next_in_pipeline(s)
#remove source flows from network
self.__remove_source_flows_through_node(s,s.node_id)
#remove the source from the dics
for port in s.output_ports:
self.outport_to_node[str(port)].remove(s)
self.node_by_id.pop(name)
self.sources.pop(name)
def add_rule(self, table, index, in_ports, out_ports, match, mask, rewrite):
'''
@table: table name
@index: position in table
@in_ports: list of input ports to match on.
@out_ports: list of output ports to send to
@match: matching headerspace for this rule.
@mask: mask pattern (or None). should have 0 on all bits to be rewritten.
@rewrite: rewrite pattern. should rewrite only un-masked places.
'''
r = RuleNode().set_rule(table, in_ports, out_ports, \
match, mask, rewrite)
# If this is first time a rule added to this table, initialize it.
if table not in self.tables.keys():
self.tables[table] = []
self.last_id_used_for_table[table] = 0
# Update inport and outport lookup maps
for port in in_ports:
if str(port) not in self.inport_to_node.keys():
self.inport_to_node[str(port)] = []
self.inport_to_node[str(port)].append(r)
for port in out_ports:
if str(port) not in self.outport_to_node.keys():
self.outport_to_node[str(port)] = []
self.outport_to_node[str(port)].append(r)
# generate unique id for this rule
new_id = self.last_id_used_for_table[table] + 1
rule_id = "%s_%d" % (table, new_id)
self.last_id_used_for_table[table] = new_id
r._set_node_id(rule_id)
# add this rule to the correct table in tables map
if index < 0 or len(self.tables[table]) <= index:
self.tables[table].append(r)
else:
self.tables[table].insert(index, r)
# add this rule to node_by_id map
self.node_by_id[rule_id] = r
# setup table dependency
self.__set_influences(r)
# setup pipeline dependency
self.__set_pipeline_dependencies(r)
# route source flow through this node
self.__route_source_flow(r)
#TODO route sink flow
return rule_id
def remove_rule(self, rule_id):
'''
removes the rule with id=@node_id from the network.
'''
if rule_id not in self.node_by_id.keys():
return False
#clear influence_on and affected_by
rule = self.node_by_id[rule_id]
for r in rule.influence_on:
for i in reversed(range(len(r.affected_by))):
a = r.affected_by[i]
if a[0] == rule:
r.affected_by.remove(a)
for a in rule.affected_by:
a[0].influence_on.remove(rule)
#clear pipelines
self.__remove_next_in_pipeline(rule)
self.__remove_previous_pipeline(rule)
#remove source flow
self.__remove_source_flows_through_node(rule,rule.node_id)
#TODO: remove sink flow
#remove the rule from the tables, node_by_id and inport/outport_to_node
for port in rule.input_ports:
self.inport_to_node[str(port)].remove(rule)
for port in rule.output_ports:
self.outport_to_node[str(port)].remove(rule)
self.tables[rule.table].remove(rule)
self.node_by_id.pop(rule_id)
return True
def add_source_reachability_probe(self,probe_name,from_ports,to_ports,wc):
if probe_name in self.node_by_id.keys():
return False
self.source_probes_result[probe_name] = []
p = SourceReachabilityProbeNode(probe_name,self.source_probes_result,\
to_ports,from_ports,wc)
for port in to_ports:
if str(port) not in self.outport_to_node.keys():
self.outport_to_node[str(port)] = []
self.outport_to_node[str(port)].append(p)
self.probes[probe_name] = p
self.node_by_id[probe_name] = p
#set up pipeline dependencies
self.__set_pipeline_dependencies(p)
#route source flow
self.__route_source_flow(p)
def remove_source_reachability_probe(self,probe_id):
if probe_id not in self.probes.keys():
return False
p = self.node_by_id[probe_id]
#clear pipeline
self.__remove_previous_pipeline(p)
# clear port and id look-ups
for port in p.output_ports:
self.outport_to_node[str(port)].remove(p)
self.source_probes_result.pop(probe_id)
self.probes.pop(probe_id)
self.node_by_id.pop(probe_id)
def print_pluming_network(self, print_flow=False):
'''
For debuging purposes
'''
for table in self.tables.keys():
print "*" * 20
print "table: %s" % table
print "*" * 20
for rule in self.tables[table]:
print "Rule: %s (match: %s, in_ports = %s, mask: %s, rewrite: %s, out_ports: %s)"%\
(rule.node_id, rule.match, rule.input_ports, \
rule.mask, rule.rewrite, rule.output_ports)
print "Pipelined To:"
for (r, wc, f_port,t_port) in rule.next_in_pipeline:
print "\t%s (%s,%d --> %d)" % (r.node_id, wc, f_port,t_port)
print "Pipelined From:"
for (r, wc, f_port, t_port) in rule.previous_in_pipeline:
print "\t%s (%s,%d --> %d)" % (r.node_id, wc, f_port, t_port)
print "Affected By:"
for (r, wc, ports) in rule.affected_by:
print "\t%s (%s,%s)" % (r.node_id, wc, ports)
if (print_flow):
print "Source Flow:"
for s_flow in rule.source_flow:
print " From port %d:"%s_flow[1]
lines = str(s_flow[0]).split("\n")
for line in lines:
print "\t",line
print "==" * 10
for sname in self.sources.keys():
s = self.sources[sname]
print "*" * 20
print "source: %s" % s.source_name
print "*" * 20
print "Pipelined To:"
for (r,wc,f_port,t_port) in s.next_in_pipeline:
print "\t%s (%s,%d --> %d)" % (r.node_id, wc, f_port, t_port)
if (print_flow):
print "Source Flow:"
for s_flow in s.source_flow:
print "\t%s"%(s_flow[0])
for pname in self.probes:
p = self.probes[pname]
print "*" * 20
print "probe: %s" % p.node_id
print "*" * 20
print "Pipelined From:"
for (r, wc, f_port, t_port) in p.previous_in_pipeline:
print "\t%s (%s,%d --> %d)" % (r.node_id, wc, f_port, t_port)
if (print_flow):
print "Source Flow:"
for s_flow in p.source_flow:
print " From port %d:"%s_flow[1]
lines = str(s_flow[0]).split("\n")
for line in lines:
print "\t",line
print "Violations:"
for s_flow in p.probes_results[p.node_id]:
path = ""
for applied_rule in s_flow[0].applied_rules:
path += "(%s, @p=%s)"%(applied_rule[1],str(applied_rule[2])) + "-->"
path += "(Probe, @p=%d)"%(s_flow[1])
print " Path: %s"%path
print " Header at Destination:"
lines = str(s_flow[0]).split("\n")
for line in lines:
print "\t",line
def __get_rules_by_input_port(self, port):
if "%d" % port in self.inport_to_node.keys():
return self.inport_to_node["%d" % port]
else:
return []
def __get_rules_by_output_port(self, port):
if "%d" % port in self.outport_to_node.keys():
return self.outport_to_node["%d" % port]
else:
return []
def __set_influences(self,rule):
higher_priority = True
table = rule.table
for r in self.tables[table]:
if rule.node_id == r.node_id:
higher_priority = False
else:
common_ports = [val for val in r.input_ports
if val in rule.input_ports]
if len(common_ports) == 0:
continue
common_headerspace = wildcard_intersect(rule.match, r.match)
if len(common_headerspace) == 0:
continue
if (higher_priority):
r.influenced_on_rule(rule)
rule.affected_by_rule(r, common_headerspace, common_ports)
else:
rule.influenced_on_rule(r)
r.affected_by_rule(rule, common_headerspace, common_ports)
def __set_pipeline_dependencies(self, node):
for port in node.output_ports:
next_ports = self.get_dst_end_of_link(port)
for next_port in next_ports:
potential_next_rules = self.__get_rules_by_input_port(next_port)
for r in potential_next_rules:
survived_hs = wildcard_intersect(r.match,node.inverse_match)
if not survived_hs.is_empty():
node.set_next_in_pipeline(r,survived_hs,port,next_port)
r.set_previous_in_pipeline(node,survived_hs,next_port,port)
for port in node.input_ports:
previous_ports = self.get_src_end_of_link(port)
for previous_port in previous_ports:
potential_back_rules = self.__get_rules_by_output_port(previous_port)
for r in potential_back_rules:
survived_hs = wildcard_intersect(node.match,r.inverse_match)
if not survived_hs.is_empty():
r.set_next_in_pipeline(node,survived_hs,previous_port,port)
node.set_previous_in_pipeline(r,survived_hs,port,previous_port)
def __update_plumber_for_new_link(self,sPort,dPort):
source_routing_tasks = []
potential_src_rules = self.__get_rules_by_output_port(sPort)
potential_dest_rules = self.__get_rules_by_input_port(dPort)
for s_rule in potential_src_rules:
for d_rule in potential_dest_rules:
survived_hs = wildcard_intersect(d_rule.match,s_rule.inverse_match)
if not survived_hs.is_empty():
s_rule.set_next_in_pipeline(d_rule,survived_hs,sPort,dPort)
d_rule.set_previous_in_pipeline(s_rule,survived_hs,dPort,sPort)
fwd_pipeline = s_rule.next_in_pipeline[-1]
for src_flow in s_rule.source_flow:
source_routing_tasks.append((fwd_pipeline,src_flow))
self.__perform_source_routing_tasks(source_routing_tasks)
def __route_source_flow(self, node):
tasks = []
if node.__class__ == SourceNode:
for pipeline in node.next_in_pipeline:
tasks.append((pipeline,node.source_flow[0]))
elif node.__class__ == RuleNode or issubclass(node.__class__,ProbeNode):
for (r,h,p1,p2) in node.previous_in_pipeline:
for pipeline in r.pipelines_to(node):
for s_flow in r.source_flow:
tasks.append((pipeline,s_flow))
self.__perform_source_routing_tasks(tasks)
def __perform_source_routing_tasks(self, tasks):
while len(tasks) > 0:
(pipeline,s_flow) = tasks.pop()
if (pipeline[2] == s_flow[1]):
continue
else:
f = s_flow[0].copy_intersect(pipeline[1])
if f.count() > 0:
new_source_flow = pipeline[0].process_source_flow(f,pipeline[3])
if new_source_flow == None:
continue
for next_pipeline in pipeline[0].next_in_pipeline:
tasks.append((next_pipeline,new_source_flow))
def __remove_source_flows_through_node(self,node,node_id):
seenHS = node.remove_source_flow_through_node(node_id)
if (seenHS):
for pipeline in node.next_in_pipeline:
self.__remove_source_flows_through_node(pipeline[0], node_id)
def __remove_source_flow_through_port(self,node,port):
seenHS = node.remove_source_flow_through_port(port)
if (seenHS):
for pipeline in node.next_in_pipeline:
self.__remove_source_flow_through_port(pipeline[0], port)
def __update_plumber_for_removed_link(self,sPort,dPort):
potential_src_rules = self.__get_rules_by_output_port(sPort)
potential_dest_rules = self.__get_rules_by_input_port(dPort)
for s_rule in potential_src_rules:
for i in reversed(range(len(s_rule.next_in_pipeline))):
fwd_pipeline = s_rule.next_in_pipeline[i]
if fwd_pipeline[2] == sPort and fwd_pipeline[3] == dPort:
self.__remove_source_flow_through_port(fwd_pipeline[0], dPort)
s_rule.next_in_pipeline.remove(fwd_pipeline)
for d_rule in potential_dest_rules:
for i in reversed(range(len(d_rule.previous_in_pipeline))):
rev_pipeline = d_rule.previous_in_pipeline[i]
if rev_pipeline[2] == dPort and rev_pipeline[3] == sPort:
#TODO: remove sink flow
d_rule.previous_in_pipeline.remove(rev_pipeline)
def __remove_previous_pipeline(self,node):
for pp in node.previous_in_pipeline:
prev_node_next_in_pipeline = pp[0].next_in_pipeline
for i in reversed(range(len(prev_node_next_in_pipeline))):
np = prev_node_next_in_pipeline[i]
if np[0] == node:
prev_node_next_in_pipeline.remove(np)
def __remove_next_in_pipeline(self,node):
for np in node.next_in_pipeline:
next_node_previous_in_pipeline = np[0].previous_in_pipeline
for i in reversed(range(len(next_node_previous_in_pipeline))):
pp = next_node_previous_in_pipeline[i]
if pp[0] == node:
next_node_previous_in_pipeline.remove(pp)
'''
Experimental
'''
def __set_influences_mp(self, rule):
'''
adds influence of all higher ranked rules to @rule.
add influence of @rule to all lower ranked rules.
@rule is newly added rule
'''
#setting up threads
dataQ = jQueue()
resultQ = jQueue()
sigterm = Event()
processess = []
for i in range(NUM_THREADS):
p = set_influence_process(rule,dataQ,resultQ,sigterm)
processess.append(p)
p.start()
table = rule.table
higherPriority = True
for r in self.tables[table]:
if rule.node_id == r.node_id:
higherPriority = False
else:
dataQ.put((r,higherPriority))
#waiting for threads to be done.
dataQ.join()
sigterm.set()
count = NUM_THREADS
while (count > 0):
next_result = resultQ.get()
if next_result == None:
count -= 1
continue
(rule_id,higher_priority,com_hs,com_ports) = next_result
r = self.node_by_id[rule_id]
if (higher_priority):
r.influenced_on_rule(rule)
rule.affected_by_rule(r, com_hs, com_ports)
else:
rule.influenced_on_rule(r)
r.affected_by_rule(rule, com_hs, com_ports)
for p in processess:
p.join()
def __set_pipeline_dependencies_mp(self, rule):
'''
@rule is newly added rule
'''
#setting up threads
dataQ = jQueue()
resultQ = jQueue()
sigterm = Event()
processess = []
for i in range(NUM_THREADS):
p = set_pipeline_process(rule,dataQ,resultQ,sigterm)
processess.append(p)
p.start()
for port in rule.output_ports:
next_ports = self.get_dst_end_of_link(port)
for next_port in next_ports:
potential_next_rules = self.__get_rules_by_input_port(next_port)
for r in potential_next_rules:
dataQ.put((r,port,next_port,False))
for port in rule.input_ports:
previous_ports = self.get_src_end_of_link(port)
for previous_port in previous_ports:
potential_back_rules = self.__get_rules_by_output_port(previous_port)
for r in potential_back_rules:
dataQ.put((r,port,previous_port,True))
dataQ.join()
sigterm.set()
count = NUM_THREADS
while (count > 0):
next_result = resultQ.get()
if next_result == None:
count -= 1
continue
(survived_hs,node_id,rule_port,r_port,back) = next_result
r = self.node_by_id[node_id]
if (back):
r.set_next_in_pipeline(rule,survived_hs,r_port,rule_port)
rule.set_previous_in_pipeline(r,survived_hs,rule_port,r_port)
else:
rule.set_next_in_pipeline(r,survived_hs,rule_port,r_port)
r.set_previous_in_pipeline(rule,survived_hs,r_port,rule_port)
for p in processess:
p.join()
def __route_source_flow_mp(self, rule):
'''
Note: node should already have all the pipeline and influence states
set up before calling this method.
@rule: the rule for which we want to route flow
'''
# taskQ: a queue of tasks.
# each task is (prev_rule_pipeline_to_rule, source_flow).
# source_flow should be routed from prev_rule to rule
print "route source flow"
taskQ = jQueue()
resultQ = jQueue()
# create thread
processess = []
sigterm = Event()
for i in range(NUM_THREADS):
p = route_source_flow_process(taskQ,resultQ,sigterm)
processess.append(p)
p.start()
if rule.__class__ == SourceNode:
for pipeline in rule.next_in_pipeline:
taskQ.put((pipeline,rule.source_flow[0]))
elif rule.__class__ == RuleNode:
for (r,h,p1,p2) in rule.previous_in_pipeline:
for pipeline in r.pipelines_to(rule):
for s_flow in r.source_flow:
taskQ.put((pipeline,s_flow))
taskQ.join()
sigterm.set()
count = NUM_THREADS
while (count > 0):
next_result = resultQ.get()
if next_result == None:
count -= 1
continue
(node_id,new_source_flow) = next_result
r = self.node_by_id[node_id]
r.source_flow.append(new_source_flow)
for p in processess:
p.join()
print "end: route source flow"
|
gpl-2.0
| 2,319,341,807,518,203,000 | 35.310618 | 93 | 0.573019 | false |
ThunderGemios10/The-Super-Duper-Script-Editor
|
script_file.py
|
1
|
4412
|
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
import os
# import mecab_parser
import re
from text_files import load_text, save_text
from scene_info import SceneInfo
# Some regexes for our enjoyment.
LINE_BREAKS = re.compile(ur"\r\n?", re.UNICODE | re.DOTALL)
TEXT_PARSER = re.compile(ur"([^\u0000\u0001]+)[\u0000\u0001]+[\n\r]*([^\[]+)?[\n\r]*(\[(.+)\])?", re.UNICODE | re.DOTALL)
TAG_KILLER = re.compile(ur"\<CLT\>|\<CLT (?P<CLT_INDEX>\d+)\>|<DIG.*?>", re.UNICODE | re.DOTALL)
class ScriptFile():
def __init__(self, filename = None, scene_info = None):
self.translated = ""
self.translated_notags = ""
self.original = ""
self.original_notags = ""
self.comments = ""
self.filename = None
if not filename == None:
self.open(filename)
if scene_info == None:
# Squeeze the file ID out of the filename.
scene_info = SceneInfo(file_id = int(os.path.splitext(os.path.basename(filename))[0]))
self.scene_info = scene_info
def open(self, filename):
if not filename or not os.path.isfile(filename):
return
text = load_text(filename)
self.from_data(text)
self.filename = filename
def from_data(self, data):
self.filename = None
self.translated = ""
self.translated_notags = ""
self.original = ""
self.original_notags = ""
self.comments = ""
# Sanitize our line-breaks. The game handles \r\n in some instances,
# but not all. It always handles \n properly.
text = LINE_BREAKS.sub("\n", data)
match = TEXT_PARSER.search(text)
if match:
# Remove any trailing linebreaks, because
first_part = match.group(1)
second_part = match.group(2)
third_part = match.group(4)
if not second_part:
self.original = first_part
else:
self.translated = first_part
self.original = second_part
if third_part:
self.comments = third_part
self.original_notags = TAG_KILLER.sub("", self.original)
self.translated_notags = TAG_KILLER.sub("", self.translated)
##############################################################################
### @fn pack()
### @desc Converts all the data into the script file format.
### @param for_game -- Whether to include the original, untranslated data.
### True = exclude untranslated, since we don't need it.
##############################################################################
def pack(self, for_game = False):
if self.translated == "":
output = u"\ufeff" + self.original + u"\u0000"
if self.comments != "" and not for_game:
output += "\n[" + self.comments + "]"
else:
output = u"\ufeff" + self.translated + u"\u0000"
if not for_game:
output += "\n" + self.original
if self.comments != "":
# We want a newline, but not a thousand.
if not self.original[-1] == '\n':
output += "\n"
output += "[" + self.comments + "]"
# Sanitize our line breaks, just in case.
output = LINE_BREAKS.sub("\n", output)
return output
def save(self, filename = None):
if filename == None:
if self.filename == None:
return
else:
filename = self.filename
output = self.pack(for_game = False)
save_text(output, filename)
### EOF ###
|
gpl-3.0
| 2,786,544,307,136,137,000 | 30.956522 | 121 | 0.562259 | false |
codilime/cloudify-system-tests
|
cosmo_tester/framework/cfy_helper.py
|
1
|
13909
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import tempfile
import shutil
import json
import os
import logging
import sh
from path import path
from cloudify_cli.utils import (load_cloudify_working_dir_settings,
get_configuration_path,
update_wd_settings)
from cosmo_tester.framework.util import sh_bake, YamlPatcher
cfy = sh_bake(sh.cfy)
cfy_out = sh.cfy
DEFAULT_EXECUTE_TIMEOUT = 1800
INPUTS = 'inputs'
PARAMETERS = 'parameters'
class CfyHelper(object):
def __init__(self,
cfy_workdir=None,
management_ip=None,
management_user=None,
management_key=None):
self.logger = logging.getLogger('TESTENV')
self.logger.setLevel(logging.INFO)
self._cfy_workdir = cfy_workdir
self.tmpdir = False
if cfy_workdir is None:
self.tmpdir = True
self._cfy_workdir = tempfile.mkdtemp(prefix='cfy-')
self.workdir = path(self._cfy_workdir)
if management_ip is not None:
self.use(management_ip)
if management_user and management_key:
try:
self._set_management_creds(management_user, management_key)
except Exception as ex:
self.logger.warn(
'Failed to set management creds. Note that you will '
'not be able to perform ssh actions after bootstrap. '
'Reason: {0}'.format(ex))
def bootstrap(self,
blueprint_path,
inputs_file=None,
install_plugins=True,
keep_up_on_failure=False,
validate_only=False,
reset_config=False,
task_retries=5,
task_retry_interval=90,
subgraph_retries=2,
verbose=False,
debug=False):
with self.workdir:
cfy.init(reset_config=reset_config).wait()
with YamlPatcher(get_configuration_path()) as patch:
prop_path = ('local_provider_context.'
'cloudify.workflows.subgraph_retries')
patch.set_value(prop_path, subgraph_retries)
if not inputs_file:
inputs_file = self._get_inputs_in_temp_file({}, 'manager')
cfy.bootstrap(
blueprint_path=blueprint_path,
inputs=inputs_file,
install_plugins=install_plugins,
keep_up_on_failure=keep_up_on_failure,
validate_only=validate_only,
task_retries=task_retries,
task_retry_interval=task_retry_interval,
verbose=verbose,
debug=debug).wait()
def recover(self, snapshot_path, task_retries=5):
with self.workdir:
cfy.recover(force=True,
task_retries=task_retries,
snapshot_path=snapshot_path).wait()
def create_snapshot(self,
snapshot_id,
include_metrics=False,
exclude_credentials=False):
with self.workdir:
cfy.snapshots.create(
snapshot_id=snapshot_id,
include_metrics=include_metrics,
exclude_credentials=exclude_credentials).wait()
def download_snapshot(self, snapshot_id, output_path=''):
with self.workdir:
cfy.snapshots.download(
snapshot_id=snapshot_id,
output=output_path).wait()
def teardown(self,
ignore_deployments=True,
verbose=False):
with self.workdir:
cfy.teardown(
ignore_deployments=ignore_deployments,
force=True,
verbose=verbose).wait()
def uninstall(self, deployment_id, workflow_id, parameters,
allow_custom_parameters, timeout, include_logs):
parameters = self._get_parameters_in_temp_file(parameters, workflow_id)
with self.workdir:
cfy.uninstall(deployment_id=deployment_id,
workflow=workflow_id,
parameters=parameters,
allow_custom_parameters=allow_custom_parameters,
timeout=timeout,
include_logs=include_logs
).wait()
def install(
self,
blueprint_path,
blueprint_id,
deployment_id,
verbose=False,
include_logs=True,
execute_timeout=DEFAULT_EXECUTE_TIMEOUT,
inputs=None):
inputs_file = self._get_inputs_in_temp_file(inputs, deployment_id)
with self.workdir:
cfy.install(blueprint_path=blueprint_path,
blueprint_id=blueprint_id,
deployment_id=deployment_id,
inputs=inputs_file,
timeout=execute_timeout,
include_logs=include_logs,
verbose=verbose).wait()
upload_deploy_and_execute_install = install
def publish_archive(self,
blueprint_id,
archive_location,
verbose=False):
with self.workdir:
cfy.blueprints.publish_archive(
blueprint_id=blueprint_id,
archive_location=archive_location,
blueprint_filename='blueprint.yaml',
verbose=verbose).wait()
def create_deployment(self,
blueprint_id,
deployment_id,
verbose=False,
inputs=None):
with self.workdir:
inputs_file = self._get_inputs_in_temp_file(inputs, deployment_id)
cfy.deployments.create(
blueprint_id=blueprint_id,
deployment_id=deployment_id,
verbose=verbose,
inputs=inputs_file).wait()
def delete_deployment(self, deployment_id,
verbose=False,
ignore_live_nodes=False):
with self.workdir:
cfy.deployments.delete(
deployment_id=deployment_id,
ignore_live_nodes=ignore_live_nodes,
verbose=verbose).wait()
def delete_blueprint(self, blueprint_id,
verbose=False):
with self.workdir:
cfy.blueprints.delete(
blueprint_id=blueprint_id,
verbose=verbose).wait()
def list_blueprints(self, verbose=False):
with self.workdir:
cfy.blueprints.list(verbose=verbose).wait()
def list_deployments(self, verbose=False):
with self.workdir:
cfy.deployments.list(verbose=verbose).wait()
def list_executions(self, verbose=False):
with self.workdir:
cfy.executions.list(verbose=verbose).wait()
def list_events(self, execution_id, verbosity='', include_logs=True):
with self.workdir:
command = cfy_out.events.list.bake(
execution_id=execution_id,
include_logs=include_logs)
if verbosity:
command = command.bake(verbosity)
return command().stdout.strip()
def get_blueprint(self, blueprint_id, verbose=False):
with self.workdir:
cfy.blueprints.get(
blueprint_id=blueprint_id, verbose=verbose).wait()
def get_deployment(self, deployment_id, verbose=False):
with self.workdir:
cfy.deployments.get(
deployment_id=deployment_id, verbose=verbose).wait()
def get_execution(self, execution_id, verbose=False):
with self.workdir:
cfy.executions.get(
execution_id=execution_id, verbose=verbose).wait()
def cancel_execution(self, execution_id, verbose=False):
with self.workdir:
cfy.executions.cancel(
execution_id=execution_id, verbose=verbose).wait()
def execute_install(self,
deployment_id,
verbose=False,
include_logs=True,
execute_timeout=DEFAULT_EXECUTE_TIMEOUT):
self.execute_workflow(
workflow='install',
deployment_id=deployment_id,
execute_timeout=execute_timeout,
verbose=verbose,
include_logs=include_logs)
def execute_uninstall(self,
deployment_id,
verbose=False,
include_logs=True,
execute_timeout=DEFAULT_EXECUTE_TIMEOUT):
self.execute_workflow(
workflow='uninstall',
deployment_id=deployment_id,
execute_timeout=execute_timeout,
verbose=verbose,
include_logs=include_logs)
def upload_blueprint(self,
blueprint_id,
blueprint_path,
verbose=False):
with self.workdir:
cfy.blueprints.upload(
blueprint_path=blueprint_path,
blueprint_id=blueprint_id,
verbose=verbose).wait()
def download_blueprint(self, blueprint_id):
with self.workdir:
cfy.blueprints.download(blueprint_id=blueprint_id).wait()
def download_plugin(self, plugin_id, output_file):
with self.workdir:
cfy.plugins.download(plugin_id=plugin_id, output=output_file)\
.wait()
def use(self, management_ip):
with self.workdir:
cfy.use(management_ip=management_ip).wait()
def get_management_ip(self):
with self.workdir:
settings = load_cloudify_working_dir_settings()
return settings.get_management_server()
def _set_management_creds(self, user, key):
with self.workdir, update_wd_settings() as ws_settings:
ws_settings.set_management_user(user)
ws_settings.set_management_key(key)
def get_provider_context(self):
with self.workdir:
settings = load_cloudify_working_dir_settings()
return settings.get_provider_context()
def install_agents(self, deployment_id=None, include_logs=False):
with self.workdir:
cfy.agents.install(deployment_id=deployment_id,
include_logs=include_logs).wait()
def close(self):
if self.tmpdir:
shutil.rmtree(self._cfy_workdir)
def execute_workflow(self,
workflow,
deployment_id,
verbose=False,
include_logs=True,
execute_timeout=DEFAULT_EXECUTE_TIMEOUT,
parameters=None):
params_file = self._get_parameters_in_temp_file(parameters, workflow)
with self.workdir:
cfy.executions.start(
workflow=workflow,
deployment_id=deployment_id,
timeout=execute_timeout,
verbose=verbose,
include_logs=include_logs,
parameters=params_file).wait()
def get_logs(self, destination_path=os.getcwd()):
with self.workdir:
cfy.logs.get(
destination_path=destination_path,
verbose=True).wait()
def purge_logs(self, force=True, backup_first=False):
with self.workdir:
cfy.logs.purge(
force=force,
backup_first=backup_first,
verbose=True).wait()
def backup_logs(self):
with self.workdir:
cfy.logs.backup(verbose=True).wait()
def ssh_list(self):
with self.workdir:
return sh.cfy.ssh(list=True)
def ssh_run_command(self, command):
with self.workdir:
return sh.cfy.ssh(command=command)
def install_plugins_locally(self, blueprint_path):
cfy.local(
'install-plugins',
blueprint_path=blueprint_path).wait()
def _get_dict_in_temp_file(self, dictionary, prefix, suffix):
dictionary = dictionary or {}
file_ = tempfile.mktemp(prefix='{0}-'.format(prefix),
suffix=suffix,
dir=self.workdir)
with open(file_, 'w') as f:
f.write(json.dumps(dictionary))
return file_
def _get_inputs_in_temp_file(self, inputs, inputs_prefix):
return self._get_dict_in_temp_file(dictionary=inputs,
prefix=inputs_prefix,
suffix='-inputs.json')
def _get_parameters_in_temp_file(self, parameters, parameters_prefix):
return self._get_dict_in_temp_file(dictionary=parameters,
prefix=parameters_prefix,
suffix='-parameters.json')
|
apache-2.0
| -3,246,946,880,898,750,500 | 34.940568 | 79 | 0.543892 | false |
google/loaner
|
loaner/web_app/backend/handlers/cron/run_reminder_events.py
|
1
|
5071
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for processing Reminder Events in a cron job."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from absl import logging
import webapp2
from loaner.web_app.backend.lib import events
from loaner.web_app.backend.models import config_model
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import event_models
_NO_REMINDER_EVENTS_MSG = 'No enabled reminder events.'
_DEVICE_ALREADY_NOTED_MSG = (
'Device %s already marked to be reminded for level %d.')
_DEVICE_MARKED_RETURNED_MSG = (
'Device %s is marked returned, and so will not be reminded for level %d.')
_DEVICE_REPEAT_WAITING_MSG = (
'Device %s already reminded for level %d, and repeat interval '
'of %d days has not yet elapsed.')
_DEVICE_SET_REMINDER_MSG = (
'Device %s will get a reminder level %d after %s.')
_DEVICE_REMINDING_NOW_MSG = 'Reminding for Device %s at level %s.'
_EVENT_ACTION_ERROR_MSG = (
'The following error occurred while trying to set a device reminder: %s')
class RunReminderEventsHandler(webapp2.RequestHandler):
"""Handler for processing Reminder Events."""
def __init__(self, *args, **kwargs):
super(RunReminderEventsHandler, self).__init__(*args, **kwargs)
self.reminder_delay_delta = datetime.timedelta(
hours=config_model.Config.get('reminder_delay'))
def get(self):
"""Process the Reminder Action task if need be."""
self.reminder_events = event_models.ReminderEvent.get_all_enabled()
if self.request.GET.get('find_remindable_devices') == 'true':
self._find_remindable_devices()
if self.request.GET.get('remind_for_devices') == 'true':
self._remind_for_devices()
def _find_remindable_devices(self):
"""Find devices in a remindable state and mark them so."""
if not self.reminder_events:
logging.error(_NO_REMINDER_EVENTS_MSG)
for reminder_event in self.reminder_events:
for device in reminder_event.get_matching_entities():
# Device has been marked pending return within the grace period.
if device.mark_pending_return_date:
logging.info(
_DEVICE_MARKED_RETURNED_MSG, device.identifier,
reminder_event.level)
continue
# Device already marked for a reminder at this level.
if device.next_reminder and (
device.next_reminder.level == reminder_event.level):
logging.info(
_DEVICE_ALREADY_NOTED_MSG, device.identifier,
reminder_event.level)
continue
# Device already had a reminder at this level.
if (
device.last_reminder and
device.last_reminder.level == reminder_event.level):
# We shouldn't remind again.
if not reminder_event.repeat_interval:
continue
else:
# We shouldn't remind again if insufficient time has elapsed.
time_since_reminder = (
datetime.datetime.utcnow() - device.last_reminder.time)
if (
time_since_reminder.total_seconds() <
reminder_event.interval * 86400):
logging.info(
_DEVICE_REPEAT_WAITING_MSG, device.identifier,
reminder_event.level, reminder_event.repeat_interval)
continue
# We should set a reminder with the delay from configuration settings.
device.set_next_reminder(
reminder_event.level, self.reminder_delay_delta)
logging.info(
_DEVICE_SET_REMINDER_MSG, device.identifier,
reminder_event.level, str(device.next_reminder.time))
def _remind_for_devices(self):
"""Find devices marked as being in a remindable state and raise event."""
for device in device_model.Device.query(
device_model.Device.next_reminder.time <= datetime.datetime.utcnow()
).fetch():
logging.info(
_DEVICE_REMINDING_NOW_MSG, device.identifier,
device.next_reminder.level)
try:
events.raise_event(
event_name=event_models.ReminderEvent.make_name(
device.next_reminder.level),
device=device)
except events.EventActionsError as err:
# We log the error so that a single device does not disrupt all other
# devices that need reminders set.
logging.error(_EVENT_ACTION_ERROR_MSG, err)
|
apache-2.0
| -2,524,230,247,139,262,500 | 38.929134 | 78 | 0.667324 | false |
nvlbg/gepify
|
gepify/providers/playlists.py
|
1
|
6438
|
"""
gepify.providers.playlists
~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides information about downloaded playlists
as well as functionality to download playlists.
"""
from werkzeug.contrib.cache import RedisCache
from gepify.celery import celery_app
from . import songs, PLAYLISTS_DIRECTORY
from .songs import SUPPORTED_FORMATS
from celery import chord
from celery.utils.log import get_task_logger
import zipfile
from hashlib import md5
import os
import time
cache = RedisCache(
host=os.environ.get('REDIS_HOST', 'localhost'),
port=os.environ.get('REDIS_PORT', 6379),
password=os.environ.get('REDIS_PASS', ''),
key_prefix='playlist_',
default_timeout=0
)
logger = get_task_logger(__name__)
def get_playlist(service, playlist, format):
"""Return information about a playlists if it exists.
Parameters
----------
service : str
The service which provided the playlist (e.g. spotify).
playlist : str
The id of the playlist.
format : str
The format of the songs in the playlist.
Returns
-------
dict
If the playlist exists with the following information:
path - The path on the filesystem where the playlist is located.
checksum - The `checksum` of the tracks in the playlist.
None
If the playlist does not exist.
"""
playlist = cache.get('{}_{}_{}'.format(service, playlist, format))
if playlist == 'downloading':
return None
return playlist
def has_playlist(service, playlist, format):
"""Check if a playlist exists.
Parameters
----------
service : str
The service which provided the playlist (e.g. spotify).
playlist : str
The id of the playlist.
format : str
The format of the songs in the playlist.
Returns
-------
bool
True if the playlist is downloaded and exists, False otherwise.
"""
playlist = cache.get('{}_{}_{}'.format(service, playlist, format))
return playlist is not None and playlist != 'downloading'
def checksum(tracks):
"""Return the checksum of the tracks.
Parameters
----------
tracks : list
List of song names.
Returns
-------
str
A checksum for the given tracks.
"""
track_names = sorted([track['name'] for track in tracks])
return md5(''.join(track_names).encode('utf-8')).hexdigest()
@celery_app.task
def handle_error(playlist_cache_key):
logger.error('An error occured while trying to download a playlist.'
' Cache key: {}'.format(playlist_cache_key))
cache.delete(playlist_cache_key)
@celery_app.task
def create_zip_playlist(playlist, service, checksum, format='mp3'):
playlist_cache_key = '{}_{}_{}'.format(service, playlist['id'], format)
playlist_zip_filename = '{}/{}.zip'.format(PLAYLISTS_DIRECTORY, playlist_cache_key)
playlist_zip = zipfile.ZipFile(playlist_zip_filename, 'w')
playlist_m3u_contents = ['#EXTM3U']
for song_info in playlist['tracks']:
song = songs.get_song(song_info['name'])
playlist_zip.write(
song['files'][format], '{}.{}'.format(song['name'], format))
playlist_m3u_contents.append(
'#EXTINF:{},{}\n{}.{}\n'.format(
-1, song['name'], song['name'], format)
)
playlist_zip.writestr(
'{}.m3u'.format(playlist['name']),
bytes('\n'.join(playlist_m3u_contents), 'utf-8')
)
playlist_zip.close()
cache.set(playlist_cache_key, {
'path': playlist_zip_filename,
'checksum': checksum
})
@celery_app.task
def download_playlist(playlist, service, provider='youtube', format='mp3'):
"""Download a playlist.
Parameters
----------
playlist : dict
Contains information about the playlist:
id - The id of the playlist.
tracks - List of dicts with information about songs.
Each dict should have:
name - The song name
[provider] (optional) - Known id of the song by [provider].
If present song will not be searched and will be directly
downloaded by this id.
service : str
The service which provided the playlist (e.g. spotify).
provider : str
The provider to use when downloading the songs.
format : str
The format in which to convert the songs after downloading.
Raises
------
ValueError
If `format` is not supported.
"""
if format not in SUPPORTED_FORMATS:
raise ValueError('Format not supported: {}'.format(format))
playlist_cache_key = '{}_{}_{}'.format(service, playlist['id'], format)
playlist_data = cache.get(playlist_cache_key)
if playlist_data == 'downloading':
logger.info(
'Attempt to download a playlist in the process of downloading')
return
playlist_checksum = checksum(playlist['tracks'])
if (playlist_data is not None and
playlist_data['checksum'] == playlist_checksum):
logger.info('Attempt to download an already downloaded playlist')
return
cache.set(playlist_cache_key, 'downloading')
download_song_tasks = []
for song in playlist['tracks']:
if not songs.has_song_format(song['name'], format):
download_song_tasks.append(
songs.download_song.si(
song, provider, format
)
)
if len(download_song_tasks) == 0:
create_zip_playlist.apply_async(
args=(playlist, service, playlist_checksum, format),
link_error=handle_error.si(playlist_cache_key)
)
else:
chord(
download_song_tasks,
create_zip_playlist.si(
playlist, service, playlist_checksum, format
),
link_error=handle_error.si(playlist_cache_key)
).delay()
@celery_app.task(ignore_result=True)
def clean_playlists():
"""Delete old playlist files."""
for playlist in os.listdir(PLAYLISTS_DIRECTORY):
path_to_playlist = '{}/{}'.format(PLAYLISTS_DIRECTORY, playlist)
last_modified = os.path.getmtime(path_to_playlist)
now = time.time()
if now - last_modified > 30 * 60: # 30 minutes
os.remove(path_to_playlist)
cache.delete(playlist[:-4])
logger.info('Deleting old playlist: {}'.format(path_to_playlist))
|
mit
| -8,243,888,045,755,013,000 | 28.668203 | 87 | 0.614011 | false |
aamalev/aiohttp_apiset
|
tests/test_schema_file.py
|
1
|
3140
|
from collections import OrderedDict
from pathlib import Path
import pytest
from aiohttp_apiset.swagger.loader import (
AllOf,
DictLoader,
ExtendedSchemaFile,
FileLoader,
Loader,
SchemaFile,
yaml,
)
def test_load():
p = Path(__file__).parent / 'data/schema01.yaml'
f = SchemaFile(p)
assert f is SchemaFile(p)
assert f('../data/file.yaml#/basePath') == '/image'
assert 'get' in f['paths']['/pet']
@pytest.mark.parametrize('p', [
'data/schema01.yaml',
'data/root.yaml',
])
def test_paths(p):
d = Path(__file__).parent
f = ExtendedSchemaFile(p, dirs=[d, d / 'data'])
paths = f['paths']
items = list(paths.items())
assert len(items) == len([url for url in paths])
for url, m in items:
methods = paths[url]
assert m == methods
@pytest.mark.parametrize('p', [
'data/schema01.yaml',
'data/root.yaml',
])
def test_resolve(p):
d = Path(__file__).parent
f = ExtendedSchemaFile(p, [d, d / 'data'])
data = f.resolve()
assert data['paths']
def test_route_include(swagger_router):
paths = [route.url_for().human_repr()
for route in swagger_router.routes()]
assert '/api/1/include2/inc/image' in paths, paths
@pytest.mark.parametrize('loader', [
FileLoader,
DictLoader,
])
@pytest.mark.parametrize('p', [
'data/schema01.yaml',
'data/root.yaml',
])
def test_loader(loader, p):
loader_instance = loader()
d = Path(__file__).parent
loader_instance.add_search_dir(d)
loader_instance.add_search_dir(d / 'data')
assert loader_instance.load(p)
@pytest.mark.parametrize('loader', [
FileLoader,
DictLoader,
])
@pytest.mark.parametrize('p', [
'data/schema01.yaml',
'data/root.yaml',
])
def test_loader_resolve_data(loader, p):
loader_instance = loader()
d = Path(__file__).parent
loader_instance.add_search_dir(d)
assert '/api/1' == loader_instance(p + '#/basePath')
data = loader_instance.resolve_data({'$ref': p + '#/basePath'})
assert '/api/1' == data, data
data = loader_instance.resolve_data({'t': {'$ref': p + '#/definitions/g'}})
assert {'t': {'f': 1, 'd': 2}} == data, data
def test_allOf():
a = AllOf({'a': 1}, {'b': 2})
assert dict(a) == {'a': 1, 'b': 2}
with pytest.raises(KeyError):
print(a['c']) # noqa
assert len(a) == 2
def test_ordered_with_merge():
d = """
d: 1
a: 2
c: &c
f: 3
j: 4
t:
<<: *c
z: 5
"""
data = yaml.load(d, Loader)
assert isinstance(data, OrderedDict)
def test_local_refs():
class F(dict):
local_refs = {('x', 'y'): {'z': 3}}
f = F(x={'w': 1}, r=4)
loader = FileLoader()
assert loader._set_local_refs(f) == {'x': {'y': {'z': 3}, 'w': 1}, 'r': 4}
def test_load_local_refs(swagger_router):
loader = swagger_router._file_loader
result = loader.load('data/root.yaml')
assert str(Path('data/include.yaml')) in FileLoader.files
assert FileLoader.local_refs
assert ExtendedSchemaFile.files
assert 'Defi' in result['definitions']
|
apache-2.0
| 2,994,473,244,152,776,000 | 23.341085 | 79 | 0.581847 | false |
EclipseXuLu/DataHouse
|
DataHouse/jobcn/jobcn_spider.py
|
1
|
6201
|
import os
import time
import shutil
import requests
from lxml import etree
from openpyxl import Workbook
JOBCN_DATA_DIR = '../../DataSet/jobcn/'
base_url = 'http://www.jobcn.com/search/listalljob_servlet.ujson'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.7 Safari/537.36'
class JobCn(object):
pass
def crawl(jobFunction, pagenum):
payload = {'p.keyword': '', 'p.keyword2': '', 'p.keywordType': '', 'p.pageNo': pagenum, 'p.pageSize': 40,
'p.sortBy': 'postdate', 'p.statistics': 'false', 'p.totalRow': '', 'p.cachePageNo': 1,
'p.cachePosIds': '', 'p.cachePosUpddates': '', 'p.jobFunction': jobFunction}
headers = {'user-agent': user_agent, 'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate', 'Host': 'www.jobcn.com', 'Origin': 'http://www.jobcn.com',
'Referer': 'http://www.jobcn.com/search/listalljob.xhtml'}
r = requests.post(base_url, data=payload, headers=headers)
if r.status_code == 200:
print(r.json())
return r.json()
elif r.status_code == 403:
print('Access Denied!')
return None
def get_max_page(jobFunction):
base_url = 'http://www.jobcn.com/search/listalljob_servlet.ujson'
payload = {'p.keyword': '', 'p.keyword2': '', 'p.keywordType': '', 'p.pageNo': 1, 'p.pageSize': 40,
'p.sortBy': 'postdate', 'p.statistics': 'false', 'p.totalRow': '', 'p.cachePageNo': 1,
'p.cachePosIds': '', 'p.cachePosUpddates': '', 'p.jobFunction': jobFunction}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate', 'Host': 'www.jobcn.com', 'Origin': 'http://www.jobcn.com',
'Referer': 'http://www.jobcn.com/search/listalljob.xhtml'}
r = requests.post(base_url, data=payload, headers=headers)
max_page = r.json()['pageCount']
return max_page
def get_xml_joblist(job_xml_path):
"""
read job params from job.xml
:param job_xml_path:
:return:
"""
tree = etree.parse(job_xml_path)
job_info = {}
for each_job_node in tree.findall('//job'):
jobFunction = each_job_node.attrib['jobFunction']
jobname = each_job_node.text
job_info[jobFunction] = jobname
return job_info
"""
def json_file_to_list(each_job_json_dir):
joblist = []
for each_job_json in os.listdir(each_job_json_dir):
with open(each_job_json_dir + os.sep + each_job_json, mode='r', encoding='utf-8') as f:
for each_line in f.readlines():
try:
json_obj = json.loads(each_line, encoding='utf-8')
joblist.append(json_obj)
except:
pass
return joblist
"""
def write_excel(lists, filename):
wb = Workbook()
ws = wb.active
ws.title = "职位信息"
ws.cell(row=1, column=1).value = '职位ID'
ws.cell(row=1, column=2).value = '职位名称'
ws.cell(row=1, column=3).value = '所属部门'
ws.cell(row=1, column=4).value = '公司名称'
ws.cell(row=1, column=5).value = '薪资待遇'
ws.cell(row=1, column=6).value = '学历要求'
ws.cell(row=1, column=7).value = '公司福利'
ws.cell(row=1, column=8).value = '年龄要求'
ws.cell(row=1, column=9).value = '工作经验'
ws.cell(row=1, column=10).value = '招聘数量'
ws.cell(row=1, column=11).value = '工作地点'
ws.cell(row=1, column=12).value = '联系邮箱'
ws.cell(row=1, column=13).value = '联系电话'
ws.cell(row=1, column=14).value = '公司ID'
rownum = 2
for _ in lists:
ws.cell(row=rownum, column=1).value = _.posId
ws.cell(row=rownum, column=2).value = _.posName
ws.cell(row=rownum, column=3).value = _.deptName
ws.cell(row=rownum, column=4).value = _.comName
ws.cell(row=rownum, column=5).value = _.salaryDesc
ws.cell(row=rownum, column=6).value = _.reqDegree
ws.cell(row=rownum, column=7).value = _.benefitTags
ws.cell(row=rownum, column=8).value = _.ageDesc
ws.cell(row=rownum, column=9).value = _.workYearDesc
ws.cell(row=rownum, column=10).value = _.candidatesNum
ws.cell(row=rownum, column=11).value = _.jobLocation
ws.cell(row=rownum, column=12).value = _.email
ws.cell(row=rownum, column=13).value = _.contactTel
ws.cell(row=rownum, column=14).value = _.comId
rownum += 1
wb.save(os.path.join(JOBCN_DATA_DIR, '%s.xlsx' % filename))
print('Excel generates successfully......')
def start():
if os.path.exists(JOBCN_DATA_DIR):
shutil.rmtree(JOBCN_DATA_DIR)
os.makedirs(JOBCN_DATA_DIR)
job_info = get_xml_joblist('job.xml')
for key, value in job_info.items():
joblist = []
'''get the max page number via jobFunction value'''
max_page = get_max_page(key)
print('is crawling %s data......' % value)
index = 1
while index <= max_page:
json_obj = crawl(key, index)
if json_obj is not None:
for _ in json_obj['rows']:
jobcn = JobCn()
jobcn.posId = _['posId']
jobcn.posName = _['posName']
jobcn.deptName = _['deptName']
jobcn.comName = _['comName']
jobcn.salaryDesc = _['salaryDesc']
jobcn.reqDegree = _['reqDegree']
jobcn.benefitTags = _['benefitTags']
jobcn.ageDesc = _['ageDesc']
jobcn.workYearDesc = _['workYearDesc']
jobcn.candidatesNum = _['candidatesNum']
jobcn.jobLocation = _['jobLocation']
jobcn.email = _['email']
jobcn.contactTel = _['contactTel']
jobcn.comId = _['comId']
joblist.append(jobcn)
index += 1
print('%s\'s data is finished......' % value)
time.sleep(2)
write_excel(joblist, value)
if __name__ == '__main__':
start()
|
mit
| -219,106,271,899,237,340 | 36.128049 | 123 | 0.563804 | false |
Axelrod-Python/Axelrod-fingerprint
|
update_fingerprints.py
|
1
|
8815
|
"""
A script to obtain the Ashlock Fingerprints of all strategies in the Axelrod
library.
This writes a hash of the source code of each strategy to file: db.csv.
If the source code of a strategy changes **or** a new strategy is introduced
then the fingerprint is regenerated for that strategy.
"""
import inspect
import hashlib
import csv
import string
import numpy as np
import matplotlib.pyplot as plt
import axelrod as axl
def hash_strategy(strategy):
"""
Hash the source code of a strategy
"""
try:
source_code = "".join(inspect.getsourcelines(strategy)[0])
except OSError: # Some classes are dynamically created
source_code = "".join(inspect.getsourcelines(strategy.strategy)[0])
hash_object = hashlib.md5(source_code.encode("utf-8"))
hashed_source = hash_object.hexdigest()
return hashed_source
def write_strategy_to_db(strategy, filename="db.csv", fingerprint="Ashlock"):
"""
Write the hash of a strategy to the db
"""
hashed_source = hash_strategy(strategy)
with open(filename, "a") as db:
try:
db.write(
"{},{},{}\n".format(
strategy.original_name, fingerprint, hashed_source
)
)
except AttributeError:
db.write(
"{},{},{}\n".format(strategy.name, fingerprint, hashed_source)
)
def read_db(filename="db.csv"):
"""
Read filename and return a dictionary mapping string names to hash of source
code of a strategy
"""
with open(filename, "r") as db:
csvreader = csv.reader(db)
str_to_hash = {(row[0], row[1]): row[2] for row in csvreader}
return str_to_hash
def create_db(filename="db.csv"):
"""
Creates an empty db.csv file
"""
with open(filename, "w"):
pass
def write_data_to_file(fp, filename):
"""
Write the fingerprint data to a file.
"""
columns = ["x", "y", "score"]
with open(filename, "w") as f:
w = csv.writer(f)
w.writerow(columns)
for key, value in fp.data.items():
w.writerow([key.x, key.y, value])
def obtain_fingerprint(
strategy, turns, repetitions, probe=axl.TitForTat, processes=1
):
"""
Obtain the fingerprint for a given strategy and save the figure to the
assets dir
"""
fp = axl.AshlockFingerprint(strategy, probe)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot()
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/{}.png".format(format_filename(name)), bbox_inches="tight"
)
write_data_to_file(fp, "assets/{}.csv".format(format_filename(name)))
def obtain_transitive_fingerprint(strategy, turns, repetitions, processes=1):
"""
Obtain the transitive fingerprint
for a given strategy and save the figure to the assets dir
"""
fp = axl.TransitiveFingerprint(strategy, number_of_opponents=30)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot()
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/transitive_{}.png".format(format_filename(name)),
bbox_inches="tight",
)
np.savetxt(
"assets/transitive_{}.csv".format(format_filename(name)), fp.data
)
def obtain_transitive_fingerprint_v_short(
strategy, turns, repetitions, processes=1
):
"""
Obtain the transitive fingerprint against short run time
for a given strategy and save the figure to the assets dir
"""
short_run_time = [s() for s in axl.short_run_time_strategies]
fp = axl.TransitiveFingerprint(strategy, opponents=short_run_time)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot(display_names=True)
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/transitive_v_short_{}.png".format(format_filename(name)),
bbox_inches="tight",
)
np.savetxt(
"assets/transitive_v_short_{}.csv".format(format_filename(name)),
fp.data,
)
def format_filename(s):
"""
Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
Borrowed from https://gist.github.com/seanh/93666
"""
valid_chars = "-_.() {}{}".format(string.ascii_letters, string.digits)
filename = "".join(c for c in s if c in valid_chars)
filename = filename.replace(" ", "_")
return filename
def write_markdown(strategy):
"""
Write a markdown section of a strategy.
"""
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
markdown = """
## {0}

[data (csv)](./assets/{1}.csv)

[data (csv)](./assets/transitive_{1}.csv)

[data (csv)](./assets/transitive_v_short_{1}.csv)
""".format(
name, format_filename(name)
)
return markdown
def main(
turns,
repetitions,
transitive_turns,
transitive_repetitions,
transitive_v_short_turns,
transitive_v_short_repetitions,
processes,
):
"""
Fingerprint all strategies, if a strategy has already been fingerprinted it
does not get rerun.
"""
version = axl.__version__
markdown = """# Ashlock and transitive fingerprints
See:
[axelrod.readthedocs.io/en/latest/tutorials/further_topics/fingerprinting.html#fingerprinting](http://axelrod.readthedocs.io/en/latest/tutorials/further_topics/fingerprinting.html#fingerprinting)
All strategies included from Axelrod version {}.
This README.md file is autogenerated by running:
```
$ python update_fingerprints.py
```
Each individual fingerprint can be obtained by running:
```python
import axelrod as axl
fp = axl.AshlockFingerprint(strategy, probe)
fp.fingerprint(turns={}, repetitions={})
fp.plot()
```
# Axelrod library fingerprints
""".format(
version, turns, repetitions
)
try:
db = read_db()
except FileNotFoundError:
create_db()
db = read_db()
for strategy in axl.short_run_time_strategies:
name = strategy.name
signature = hash_strategy(strategy)
fp = "Ashlock"
if (name, fp) not in db or db[name, fp] != signature:
obtain_fingerprint(
strategy, turns, repetitions, processes=processes
)
write_strategy_to_db(strategy, fingerprint=fp)
fp = "Transitive"
if (name, fp) not in db or db[name, fp] != signature:
obtain_transitive_fingerprint(
strategy,
transitive_turns,
transitive_repetitions,
processes=processes,
)
write_strategy_to_db(strategy, fingerprint=fp)
fp = "Transitive_v_short"
if (name, fp) not in db or db[name, fp] != signature:
obtain_transitive_fingerprint_v_short(
strategy,
transitive_v_short_turns,
transitive_v_short_repetitions,
processes=processes,
)
write_strategy_to_db(strategy, fingerprint=fp)
markdown += write_markdown(strategy)
with open("README.md", "w") as outfile:
outfile.write(markdown)
if __name__ == "__main__":
turns, repetitions = 200, 20
transitive_turns, transitive_repetitions = 200, 20
transitive_v_short_turns, transitive_v_short_repetitions = 200, 20
processes = 20
main(
turns=turns,
repetitions=repetitions,
transitive_turns=transitive_turns,
transitive_repetitions=transitive_repetitions,
transitive_v_short_turns=transitive_v_short_turns,
transitive_v_short_repetitions=transitive_v_short_repetitions,
processes=processes,
)
|
mit
| 3,406,321,965,420,544,500 | 26.720126 | 195 | 0.630062 | false |
hwp-kiel/opencali
|
src/ui/mplwidget.py
|
1
|
1403
|
# Python Qt4 bindings for GUI objects
from PyQt4 import QtGui
# import the Qt4Agg FigureCanvas object, that binds Figure to
# Qt4Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt4agg \
import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
class MplCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.vbl = QtGui.QVBoxLayout()
# add mpl widget to vertical box
self.vbl.addWidget(self.canvas)
# set the layout to th vertical box
self.setLayout(self.vbl)
|
gpl-2.0
| 3,067,225,847,199,053,300 | 35 | 61 | 0.679971 | false |
aosingh/lexpy
|
lexpy/tests/test_trie.py
|
1
|
10452
|
import unittest
import os
from lexpy.trie import Trie
from lexpy.utils import build_trie_from_file
from lexpy.exceptions import InvalidWildCardExpressionError
HERE = os.path.dirname(__file__)
large_dataset = os.path.join(HERE, 'data/words.txt')
small_dataset = os.path.join(HERE, 'data/words2.txt')
class TestWordCount(unittest.TestCase):
def test_word_count_greater_than_zero(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley', 'ashes'])
self.assertGreater(self.trie.get_word_count(), 0, "The number of words should be greater than 0")
self.assertEqual(3, self.trie.get_word_count(), "Word count not equal")
def test_word_count_zero(self):
self.trie = Trie()
self.trie.add_all([])
self.assertEqual(0, self.trie.get_word_count(), "Word count not equal")
class TestTrieExactWordSearch(unittest.TestCase):
def test_word_in_trie(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertTrue('ash' in self.trie, "Word should be in trie")
def test_word_not_int_trie1(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertFalse('salary' in self.trie, "Word should not be in trie")
def test_word_not_int_trie2(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertFalse('mash lolley' in self.trie, "Word should not be in trie")
class TesTrieWordInsert(unittest.TestCase):
def test_word_add(self):
self.trie = Trie()
self.trie.add('axe')
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('axe' in self.trie, "Word should be in trie")
def test_word_add_all_list(self):
self.trie = Trie()
self.trie.add_all(['axe', 'kick']) #list
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('axe' in self.trie, "Word should be in trie")
self.assertTrue('kick' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
def test_word_add_all_set(self):
self.trie = Trie()
self.trie.add_all({'axe', 'kick'}) #set
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('axe' in self.trie, "Word should be in trie")
self.assertTrue('kick' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
def test_word_add_all_tuple(self):
self.trie = Trie()
self.trie.add_all(('axe', 'kick')) #tuple
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('axe' in self.trie, "Word should be in trie")
self.assertTrue('kick' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
def test_word_add_all_with_number(self):
self.trie = Trie()
self.trie.add_all(('axe', 'kick', 3)) #tuple with one integer.
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('axe' in self.trie, "Word should be in trie")
self.assertTrue('kick' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
def test_word_add_all_gen(self):
def gen_words():
a = ['ash', 'ashley', 'simpson']
for word in a:
yield word
self.trie = Trie()
self.trie.add_all(gen_words()) # generator
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertTrue('simpson' in self.trie, "Word should be in trie")
self.assertEqual(3, self.trie.get_word_count(), "Word count not equal")
def test_word_add_all_file_path(self):
self.trie = Trie()
self.trie.add_all(small_dataset) # From a file
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertTrue('simpson' in self.trie, "Word should be in trie")
self.assertEqual(8, self.trie.get_word_count(), "Word count not equal")
class TestTrieNodeCount(unittest.TestCase):
def test_trie_node_count(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
self.assertEqual(7, len(self.trie), "Number of nodes")
class TestTriePrefixExists(unittest.TestCase):
def test_trie_node_prefix_exists(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
self.assertTrue(self.trie.contains_prefix('ash'), "Prefix should be present in Trie")
self.assertTrue(self.trie.contains_prefix('as'), "Prefix should be present in Trie")
self.assertTrue(self.trie.contains_prefix('a'), "Prefix should be present in Trie")
def test_trie_node_prefix_not_exists(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(2, self.trie.get_word_count(), "Word count not equal")
self.assertFalse(self.trie.contains_prefix('xmas'), "Prefix should be present in Trie")
self.assertFalse(self.trie.contains_prefix('xor'), "Prefix should be present in Trie")
self.assertFalse(self.trie.contains_prefix('sh'), "Prefix should be present in Trie")
class TestTriePrefixSearch(unittest.TestCase):
def test_trie_prefix_search(self):
self.trie = Trie()
self.trie.add_all(['ashlame', 'ashley', 'askoiu', 'ashlo'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertFalse('ash' in self.trie, "Word should not be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(4, self.trie.get_word_count(), "Word count not equal")
self.assertTrue(self.trie.contains_prefix('ash'), "Prefix should be present in Trie")
self.assertEqual(sorted(self.trie.search_with_prefix('ash')), sorted(['ashlame', 'ashley', 'ashlo']), 'The lists should be equal')
class TestWildCardSearch(unittest.TestCase):
def test_trie_asterisk_search(self):
self.trie = Trie()
self.trie.add_all(['ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(sorted(self.trie.search('a*')), sorted(['ash', 'ashley']), 'The lists should be equal')
def test_trie_question_search(self):
self.trie = Trie()
self.trie.add_all(['ab', 'as', 'ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(sorted(self.trie.search('a?')), sorted(['ab', 'as']), 'The lists should be equal')
def test_trie_wildcard_search(self):
self.trie = Trie()
self.trie.add_all(['ab', 'as', 'ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertEqual(sorted(self.trie.search('*a******?')), sorted(['ab', 'as', 'ash', 'ashley']), 'The lists should be equal')
def test_trie_wildcard_exception(self):
self.trie = Trie()
self.trie.add_all(['ab', 'as', 'ash', 'ashley'])
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertRaises(InvalidWildCardExpressionError, self.trie.search, '#$%^a')
class TestBuildFromFile(unittest.TestCase):
def test_trie_build_from_file_path(self):
self.trie = build_trie_from_file(small_dataset)
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertTrue('simpson' in self.trie, "Word should be in trie")
self.assertEqual(8, self.trie.get_word_count(), "Word count not equal")
def test_trie_build_from_file_object(self):
with open(small_dataset, 'r') as input_file:
self.trie = build_trie_from_file(input_file)
self.assertIsInstance(self.trie, Trie, "Object should be of type `lexpy.trie.Trie`")
self.assertTrue('ash' in self.trie, "Word should be in trie")
self.assertTrue('ashley' in self.trie, "Word should be in trie")
self.assertTrue('simpson' in self.trie, "Word should be in trie")
self.assertEqual(8, self.trie.get_word_count(), "Word count not equal")
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -8,322,138,378,117,097,000 | 46.949541 | 138 | 0.646575 | false |
harry-7/addons-server
|
src/olympia/constants/reviewers.py
|
1
|
6664
|
from django.utils.translation import ugettext_lazy as _
# Reviewer Tools
REVIEWER_VIEWING_INTERVAL = 8 # How often we ping for "who's watching?"
REVIEWER_REVIEW_LOCK_LIMIT = 3 # How many pages can a reviewer "watch"
# Types of Canned Responses for reviewer tools.
CANNED_RESPONSE_ADDON = 1
CANNED_RESPONSE_THEME = 2
CANNED_RESPONSE_PERSONA = 3
CANNED_RESPONSE_CHOICES = {
CANNED_RESPONSE_ADDON: _('Add-on'),
CANNED_RESPONSE_THEME: _('Static Theme'),
CANNED_RESPONSE_PERSONA: _('Persona'),
}
# Risk tiers for post-review weight.
POST_REVIEW_WEIGHT_HIGHEST_RISK = 150
POST_REVIEW_WEIGHT_HIGH_RISK = 100
POST_REVIEW_WEIGHT_MEDIUM_RISK = 20
# Reviewer Incentive Scores.
# Note: Don't change these since they're used as keys in the database.
REVIEWED_MANUAL = 0
REVIEWED_ADDON_FULL = 10
_REVIEWED_ADDON_PRELIM = 11 # Deprecated for new reviews - no more prelim.
REVIEWED_ADDON_UPDATE = 12
REVIEWED_DICT_FULL = 20
_REVIEWED_DICT_PRELIM = 21 # Deprecated for new reviews - no more prelim.
REVIEWED_DICT_UPDATE = 22
REVIEWED_LP_FULL = 30
_REVIEWED_LP_PRELIM = 31 # Deprecated for new reviews - no more prelim.
REVIEWED_LP_UPDATE = 32
REVIEWED_PERSONA = 40
REVIEWED_STATICTHEME = 41
# TODO: Leaving room for persona points based on queue.
REVIEWED_SEARCH_FULL = 50
_REVIEWED_SEARCH_PRELIM = 51 # Deprecated for new reviews - no more prelim.
REVIEWED_SEARCH_UPDATE = 52
REVIEWED_XUL_THEME_FULL = 60
_REVIEWED_XUL_THEME_PRELIM = 61 # Deprecated for new reviews - no more prelim.
REVIEWED_XUL_THEME_UPDATE = 62
REVIEWED_ADDON_REVIEW = 80
REVIEWED_ADDON_REVIEW_POORLY = 81
REVIEWED_CONTENT_REVIEW = 101
REVIEWED_EXTENSION_HIGHEST_RISK = 102
REVIEWED_EXTENSION_HIGH_RISK = 103
REVIEWED_EXTENSION_MEDIUM_RISK = 104
REVIEWED_EXTENSION_LOW_RISK = 105
# We need to keep the deprecated choices for existing points in the database.
REVIEWED_CHOICES = {
REVIEWED_MANUAL: _('Manual Reviewer Points'),
REVIEWED_ADDON_FULL: _('New Add-on Review'),
_REVIEWED_ADDON_PRELIM: _('Preliminary Add-on Review'),
REVIEWED_ADDON_UPDATE: _('Updated Add-on Review'),
REVIEWED_DICT_FULL: _('New Dictionary Review'),
_REVIEWED_DICT_PRELIM: _('Preliminary Dictionary Review'),
REVIEWED_DICT_UPDATE: _('Updated Dictionary Review'),
REVIEWED_LP_FULL: _('New Language Pack Review'),
_REVIEWED_LP_PRELIM: _('Preliminary Language Pack Review'),
REVIEWED_LP_UPDATE: _('Updated Language Pack Review'),
REVIEWED_PERSONA: _('Theme Review'),
REVIEWED_STATICTHEME: _('Theme (Static) Review'),
REVIEWED_SEARCH_FULL: _('New Search Provider Review'),
_REVIEWED_SEARCH_PRELIM: _('Preliminary Search Provider Review'),
REVIEWED_SEARCH_UPDATE: _('Updated Search Provider Review'),
REVIEWED_XUL_THEME_FULL: _('New Complete Theme Review'),
_REVIEWED_XUL_THEME_PRELIM: _('Preliminary Complete Theme Review'),
REVIEWED_XUL_THEME_UPDATE: _('Updated Complete Theme Review'),
REVIEWED_ADDON_REVIEW: _('Moderated Add-on Review'),
REVIEWED_ADDON_REVIEW_POORLY: _('Add-on Review Moderation Reverted'),
REVIEWED_CONTENT_REVIEW: _('Add-on Content Review'),
REVIEWED_EXTENSION_HIGHEST_RISK:
_('Post-Approval Add-on Review (Highest Risk)'),
REVIEWED_EXTENSION_HIGH_RISK:
_('Post-Approval Add-on Review (High Risk)'),
REVIEWED_EXTENSION_MEDIUM_RISK:
_('Post-Approval Add-on Review (Medium Risk)'),
REVIEWED_EXTENSION_LOW_RISK:
_('Post-Approval Add-on Review (Low Risk)'),
}
REVIEWED_OVERDUE_BONUS = 2
REVIEWED_OVERDUE_LIMIT = 7
REVIEWED_SCORES = {
REVIEWED_MANUAL: 0,
REVIEWED_ADDON_FULL: 120,
REVIEWED_ADDON_UPDATE: 80,
REVIEWED_DICT_FULL: 60,
REVIEWED_DICT_UPDATE: 60,
REVIEWED_LP_FULL: 60,
REVIEWED_LP_UPDATE: 60,
REVIEWED_PERSONA: 5,
REVIEWED_STATICTHEME: 5,
REVIEWED_SEARCH_FULL: 30,
REVIEWED_SEARCH_UPDATE: 30,
REVIEWED_XUL_THEME_FULL: 80,
REVIEWED_XUL_THEME_UPDATE: 80,
REVIEWED_ADDON_REVIEW: 1,
REVIEWED_ADDON_REVIEW_POORLY: -1, # -REVIEWED_ADDON_REVIEW,
REVIEWED_CONTENT_REVIEW: 10,
REVIEWED_EXTENSION_HIGHEST_RISK: 140,
REVIEWED_EXTENSION_HIGH_RISK: 120,
REVIEWED_EXTENSION_MEDIUM_RISK: 90,
REVIEWED_EXTENSION_LOW_RISK: 0,
}
REVIEWED_AMO = (
REVIEWED_ADDON_FULL,
REVIEWED_ADDON_UPDATE,
REVIEWED_DICT_FULL,
REVIEWED_DICT_UPDATE,
REVIEWED_LP_FULL,
REVIEWED_LP_UPDATE,
REVIEWED_SEARCH_FULL,
REVIEWED_SEARCH_UPDATE,
REVIEWED_XUL_THEME_FULL,
REVIEWED_XUL_THEME_UPDATE,
REVIEWED_STATICTHEME,
REVIEWED_ADDON_REVIEW,
REVIEWED_CONTENT_REVIEW,
REVIEWED_EXTENSION_HIGHEST_RISK,
REVIEWED_EXTENSION_HIGH_RISK,
REVIEWED_EXTENSION_MEDIUM_RISK,
REVIEWED_EXTENSION_LOW_RISK,
)
REVIEWED_LEVELS = [
{'name': _('Level 1'), 'points': 2160},
{'name': _('Level 2'), 'points': 4320},
{'name': _('Level 3'), 'points': 8700},
{'name': _('Level 4'), 'points': 21000},
{'name': _('Level 5'), 'points': 45000},
{'name': _('Level 6'), 'points': 96000},
{'name': _('Level 7'), 'points': 300000},
{'name': _('Level 8'), 'points': 1200000},
{'name': _('Level 9'), 'points': 3000000},
]
# Amount of hours to hide add-on reviews from users with permission
# Addons:DelayedReviews
REVIEW_LIMITED_DELAY_HOURS = 20
# Review queue pagination
REVIEWS_PER_PAGE = 200
REVIEWS_PER_PAGE_MAX = 400
# Theme review queue constants.
THEME_INITIAL_LOCKS = 5 # Initial number of themes to check out.
THEME_LOCK_EXPIRY = 30 # Minutes.
ACTION_MOREINFO = 0
ACTION_FLAG = 1
ACTION_DUPLICATE = 2
ACTION_REJECT = 3
ACTION_APPROVE = 4
REVIEW_ACTIONS = {
ACTION_MOREINFO: _('Request More Info'),
ACTION_FLAG: _('Flag'),
ACTION_DUPLICATE: _('Duplicate'),
ACTION_REJECT: _('Reject'),
ACTION_APPROVE: _('Approve')
}
THEME_REJECT_REASONS = {
# 0: _('Other rejection reason'),
1: _('Sexual or pornographic content'),
2: _('Inappropriate or offensive content'),
3: _('Violence, war, or weaponry images'),
4: _('Nazi or other hate content'),
5: _('Defamatory content'),
6: _('Online gambling'),
7: _('Spam content'),
8: _('Low-quality, stretched, or blank image'),
9: _('Header image alignment problem'),
}
WOULD_NOT_HAVE_BEEN_AUTO_APPROVED = 0
WOULD_HAVE_BEEN_AUTO_APPROVED = 1
AUTO_APPROVED = 2
NOT_AUTO_APPROVED = 3
AUTO_APPROVAL_VERDICT_CHOICES = (
(WOULD_NOT_HAVE_BEEN_AUTO_APPROVED,
'Would have been auto-approved (dry-run mode was in effect)'),
(WOULD_HAVE_BEEN_AUTO_APPROVED,
'Would *not* have been auto-approved (dry-run mode was in effect)'),
(AUTO_APPROVED, 'Was auto-approved'),
(NOT_AUTO_APPROVED, 'Was *not* auto-approved'),
)
|
bsd-3-clause
| 1,205,564,029,379,759,400 | 33.174359 | 79 | 0.686224 | false |
trailofbits/iverify-oss
|
vendor/iphone-dataprotection/python_scripts/crypto/aeswrap.py
|
1
|
3161
|
import struct
from Crypto.Cipher import AES
"""
http://www.ietf.org/rfc/rfc3394.txt
quick'n'dirty AES wrap implementation
used by iOS 4 KeyStore kernel extension for wrapping/unwrapping encryption keys
"""
def unpack64bit(s):
return struct.unpack(">Q",s)[0]
def pack64bit(s):
return struct.pack(">Q",s)
def AESUnwrap(kek, wrapped):
C = []
for i in xrange(len(wrapped)/8):
C.append(unpack64bit(wrapped[i*8:i*8+8]))
n = len(C) - 1
R = [0] * (n+1)
A = C[0]
for i in xrange(1,n+1):
R[i] = C[i]
for j in reversed(xrange(0,6)):
for i in reversed(xrange(1,n+1)):
todec = pack64bit(A ^ (n*j+i))
todec += pack64bit(R[i])
B = AES.new(kek).decrypt(todec)
A = unpack64bit(B[:8])
R[i] = unpack64bit(B[8:])
#assert A == 0xa6a6a6a6a6a6a6a6, "AESUnwrap: integrity check FAIL, wrong kek ?"
if A != 0xa6a6a6a6a6a6a6a6:
#print "AESUnwrap: integrity check FAIL, wrong kek ?"
return None
res = "".join(map(pack64bit, R[1:]))
return res
def AESwrap(kek, data):
A = 0xa6a6a6a6a6a6a6a6
R = [0]
for i in xrange(len(data)/8):
R.append(unpack64bit(data[i*8:i*8+8]))
n = len(R) - 1
for j in xrange(0,6):
for i in xrange(1,n+1):
B = AES.new(kek).encrypt(pack64bit(A) + pack64bit(R[i]))
A = unpack64bit(B[:8]) ^ (n*j+i)
R[i] = unpack64bit(B[8:])
res = pack64bit(A) + "".join(map(pack64bit, R[1:]))
return res
if __name__ == "__main__":
#format (kek, data, expected_ciphertext)
test_vectors = [
("000102030405060708090A0B0C0D0E0F", "00112233445566778899AABBCCDDEEFF", "1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5"),
("000102030405060708090A0B0C0D0E0F1011121314151617", "00112233445566778899AABBCCDDEEFF", "96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D"),
("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", "00112233445566778899AABBCCDDEEFF", "64E8C3F9CE0F5BA263E9777905818A2A93C8191E7D6E8AE7"),
("000102030405060708090A0B0C0D0E0F1011121314151617", "00112233445566778899AABBCCDDEEFF0001020304050607", "031D33264E15D33268F24EC260743EDCE1C6C7DDEE725A936BA814915C6762D2"),
("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", "00112233445566778899AABBCCDDEEFF0001020304050607", "A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1"),
("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", "00112233445566778899AABBCCDDEEFF000102030405060708090A0B0C0D0E0F", "28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD21")
]
for kek, data, expected in test_vectors:
ciphertext = AESwrap(kek.decode("hex"), data.decode("hex"))
assert ciphertext == expected.decode("hex")
assert AESUnwrap(kek.decode("hex"), ciphertext) == data.decode("hex")
print "All tests OK !"
|
bsd-2-clause
| -9,087,806,445,580,642,000 | 43.157143 | 259 | 0.636191 | false |
mmagnus/rna-pdb-tools
|
rna_tools/tools/ClashCalc/ClashCalc.py
|
1
|
2343
|
#!/usr/bin/python
from Bio.PDB import NeighborSearch, PDBParser, Selection, Atom
from numpy import array
def check_clash(str_name, v=True):
"""check_clash, fract of clashes!
if zero contacts then error -> fix ->
Problem, contacts, str_name: 311 505 na-prot_13536.pdb
Sterical clashes 0.615841584158
c is counter
"""
print(str_name)
structure = open(str_name)
#model = structure[0]
atoms_A = []
atoms_B = []
for line in structure.readlines():
if line[:4] == "ATOM":
#print line
at_nam = line[12:16].strip()
coor = [float(line[30:38]),float(line[38:46]), float(line[46:54])]
at = Atom.Atom(at_nam,coor,0.0,1.0,' ',at_nam,1,at_nam[0])
if line[21] == "A":
atoms_A.append(at)
elif line[21] == "B":
atoms_B.append(at)
else: pass
#atoms_B = Selection.unfold_entities(structure[0]['B'], 'A')
#print len(atoms_A), len(atoms_B)
if len(atoms_A) > len(atoms_B):
less = atoms_B
more = atoms_A
else:
less = atoms_A
more = atoms_B
problem = 0
contacts = 0
ns=NeighborSearch(more)
for at in less:
neighbors=ns.search(array(at.get_coord()),2.0,'A')
if neighbors != []:
problem +=1
contacts +=1
else:
neighbors1=ns.search(array(at.get_coord()),4.0,'A')
if neighbors1 != []:
contacts +=1
if v:
print('problem:', float(problem))
print('contacts:', float(contacts))
try:
fract = float(problem)/float(contacts)
except ZeroDivisionError:
fract = problem # or skip this structure
print('ZeroDivison -- skip:', problem, contacts, str_name)
return fract
#print 'Contacts, str_name:', problem, contacts, str_name, "Sterical clashes ", fract
return fract
if __name__ == '__main__':
print(check_clash('test_data/no_clash.pdb'))
print(check_clash('test_data/super_clash.pdb'))
print(check_clash('test_data/prot-na_2392.pdb'))
|
gpl-3.0
| 4,982,397,807,123,278,000 | 33.455882 | 93 | 0.502774 | false |
YannThorimbert/ThorPy-1.4.3
|
thorpy/elements/clickable.py
|
1
|
1761
|
from pygame.event import post, Event
from thorpy.elements.pressable import Pressable
from thorpy.elements.hoverable import Hoverable
from thorpy.miscgui.constants import STATE_NORMAL, STATE_PRESSED, EVENT_PRESS, THORPY_EVENT
class Clickable(Pressable, Hoverable):
"""Clickable Element (Pressable and hoverable)"""
def __init__(self, text="", elements=None, normal_params=None,
press_params=None):
"""Pressable and hoverable element.
<text>: the text of the element.
"""
super(Clickable, self).__init__(text, elements, normal_params,
press_params)
self.normal_params.polite_set("states hover",
list([STATE_NORMAL, STATE_PRESSED]))
def finish(self):
Pressable.finish(self)
self._set_hovered_states_auto()
## Hoverable.finish(self)
def _remove_help(self):
if self._help_element:
self._help_element.unblit()
self._help_element.update()
self._help_element.set_recursive("visible", False)
self._waited = 0
def _press(self):
self.change_state(STATE_PRESSED)
self._hover()
ev_press = Event(THORPY_EVENT, id=EVENT_PRESS, el=self)
post(ev_press)
self._remove_help()
def _unpress(self):
self.change_state(STATE_NORMAL)
def _reaction_unpress(self, pygame_event):
state_ok = self.current_state == self._states[
STATE_PRESSED]
if state_ok:
self._unpress()
if self.collide(pygame_event.pos, STATE_PRESSED):
self._hover()
self.run_user_func()
else:
self._unhover()
|
mit
| 2,201,228,198,744,322,300 | 32.226415 | 91 | 0.578648 | false |
ddurieux/alignak
|
test/test_bad_sat_realm_conf.py
|
1
|
2145
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, naparuba@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestBadSatRealmConf(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_bad_sat_realm_conf.cfg')
def test_badconf(self):
self.assertFalse(self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
| 7,821,338,763,417,088,000 | 33.031746 | 78 | 0.730877 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/hierarchy.py
|
4
|
1511
|
"""
Flow Hierarchy.
"""
import networkx as nx
__all__ = ["flow_hierarchy"]
def flow_hierarchy(G, weight=None):
"""Returns the flow hierarchy of a directed network.
Flow hierarchy is defined as the fraction of edges not participating
in cycles in a directed graph [1]_.
Parameters
----------
G : DiGraph or MultiDiGraph
A directed graph
weight : key,optional (default=None)
Attribute to use for node weights. If None the weight defaults to 1.
Returns
-------
h : float
Flow hierarchy value
Notes
-----
The algorithm described in [1]_ computes the flow hierarchy through
exponentiation of the adjacency matrix. This function implements an
alternative approach that finds strongly connected components.
An edge is in a cycle if and only if it is in a strongly connected
component, which can be found in $O(m)$ time using Tarjan's algorithm.
References
----------
.. [1] Luo, J.; Magee, C.L. (2011),
Detecting evolving patterns of self-organizing networks by flow
hierarchy measurement, Complexity, Volume 16 Issue 6 53-61.
DOI: 10.1002/cplx.20368
http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf
"""
if not G.is_directed():
raise nx.NetworkXError("G must be a digraph in flow_hierarchy")
scc = nx.strongly_connected_components(G)
return 1.0 - sum(G.subgraph(c).size(weight) for c in scc) / float(G.size(weight))
|
gpl-3.0
| -9,176,417,389,095,951,000 | 31.148936 | 94 | 0.668432 | false |
asavonic/traftrack
|
traftrack/yamaps.py
|
1
|
2009
|
import image
import logging
import tempfile
from os import path
DEBUG = False
def make_map_url(ty, coord, area_size, zoom):
return \
('https://static-maps.yandex.ru/1.x/?lang=ru_RU'
'&ll={lat}%2C{lon}'
'&z={zoom}&l={ty}&size={size_x},{size_y}'
).format(
ty=ty,
lat=coord[0], lon=coord[1],
size_x=area_size[0], size_y=area_size[1],
zoom=zoom)
traffic_levels = [
'AllGreen', # no traffic at all
'Minor', # no red, just a moderate amount of yellow
'BeCareful', # yellow, a few red segments
'NoWay', # many red segments
'GetTheHellOutOfHere' # almost all red
]
def decide_traffic_level(red, yellow, green):
'''Translate traffic colors given in percents to the one of the
`traffic_levels` constants
'''
if red < 5:
# AllGreen or Minor
if yellow < 10:
return 'AllGreen'
else:
return 'Minor'
else:
if red < 15:
return 'BeCareful'
elif red < 30:
return 'NoWay'
else:
return 'GetTheHellOutOfHere'
assert False, \
'Cannot decide traffic for RYG: {}%, {}%, {}%'.format(
red, yellow, green)
def get_traffic(mask_path, coord, area_size, zoom):
if DEBUG:
log = logging.getLogger()
temp = tempfile.mkdtemp(prefix='traftrack')
log.debug('image dump directory: %s', temp)
image.DEBUG_DUMP_DIR = temp
map_img = image.load_img_url(make_map_url('trf', coord, area_size, zoom))
mask_img = image.load_img_file(mask_path)
red, yellow, green = image.compute_histo_RYG(map_img, mask_img)
total = red + yellow + green
red = 100 * red / total
yellow = 100 * yellow / total
green = 100 * green / total
if DEBUG:
log.debug('traffic stats: red = %d%%, yellow = %d%%, green = %d%%',
red, yellow, green)
return decide_traffic_level(red, yellow, green)
|
mit
| -6,032,079,998,426,408,000 | 25.090909 | 77 | 0.561473 | false |
seraphlnWu/observer
|
observer/node/controller.py
|
1
|
13942
|
# coding=utf8
'''
controller 节点
controller 节点负责字节点注册,数据任务分发
'''
import socket
import time
from uuid import uuid4
from collections import deque
from twisted.spread import pb
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
Deferred,
succeed,
)
from twisted.internet import reactor
from twisted.application import service
from twisted.python import failure
import twisted.spread.banana
from observer.node import BANANA_SIZE_LIMIT
from observer.utils import log
from observer.node.errors import UnknownService
from observer.utils.twisted_utils import ReconnectingPBClientFactory
twisted.spread.banana.SIZE_LIMIT = BANANA_SIZE_LIMIT
MAX_DELAYED = 60 # max delay of a pb client
class PBClientFactory(ReconnectingPBClientFactory):
''' '''
def __init__(self, rootCallback):
ReconnectingPBClientFactory.__init__(self)
self._rootCallback = rootCallback
self.maxDelay = MAX_DELAYED
def gotRootObject(self, root):
''' '''
self._rootCallback(root)
class ControllerServiceBase(service.Service):
''' controlelr node. '''
servicename = ''
version_major = 1
version_minor = 0
def __init__(self, cfg):
''' '''
# not used now
self.client_register_timeout = cfg.client_register_timeout
self.port = cfg.controller_port
self.clients = {}
self.client_timeout = {}
self.versions = cfg.versions # {'servicename': version_tuple}
self.client_request_timeout = cfg.client_request_timeout
self.push_id = 0
self.pull_id = 0
self.push_requests = {}
self.push_queue = {}
self.controller = None
self.pull_requests = {}
self.pull_queue = {}
self.processing_timeout = {}
self.task_queue = {}
self.processing_queue = {}
self.requestid = 1
def splitName(self, name):
''' split the given task name '''
names = name.split('.')
return ('observer.' + '.'.join(names[:-1]), names[-1])
def getService(self, name):
''' get the target service by given service name '''
return self.services.get(name, set())
def newClientId(self):
''' generate a new uuid4().int value '''
return uuid4().int
def newRequestId(self):
''' generate a new request id '''
reqid = self.requestid
self.requestid += 1
return reqid
def clientRegisterTimeout(self, clientid):
''' '''
del self.client_timeout[clientid]
def clientProcTimeout(self, requestid, clientid):
''' '''
try:
del self.processing_timeout[requestid]
servicename = self.push_requests[requestid]['servicename']
except KeyError: # already processed
return
self.clients[clientid]['processing'].discard(requestid)
self.addRequest(servicename, requestid)
def getController(self):
''' '''
if self.controller is not None:
return succeed(self.controller)
else:
d = Deferred()
return d
def startService(self):
''' start the controller as a service '''
for servicename in self.versions:
self.task_queue[servicename] = [] # record tasks to crawl
self.processing_queue[servicename] = [] # record which client ops
def stopService(self):
''' stop the controller '''
for timeout in self.client_timeout.values():
timeout.cancel()
def register(
self,
servicename,
version_major,
version_minor,
nodename,
client,
):
''' register the client to controller '''
clientid = self.newClientId()
# 如果给定的servicename不在versions中,表示该节点是无效节点
if servicename not in self.versions:
log.info("Added client: %s %s Failed. No such servicename" % (str(clientid), servicename))
return ('%s is not in a known service' % servicename, 0)
version = self.versions[servicename]
client_version = (version_major, version_minor)
if client_version < version[0]:
return ('version %s is below %s please update the client' % (
repr(client_version),
repr(version[0])),
0)
if client_version > version[1]:
return ('version %s is above %s please update the controller' % (
repr(client_version),
repr(version[1])),
0)
self.clients[clientid] = {
'client': client,
'id': clientid,
'servicename': servicename,
'name': nodename,
'processing': {},
'ip': client.broker.transport.getPeer().host,
'last_call': time.time(),
}
# called when disconnect
client.notifyOnDisconnect(lambda c: self.unregister(clientid))
log.info("Added client: %s %s" % (str(clientid), servicename))
return ('succeed', clientid)
def unregister(self, clientid):
''' unregister the given clientid '''
if clientid not in self.clients:
return False
del self.clients[clientid]
log.info("Removed client: " + str(clientid))
return True
def addRequest(self, servicename, requestid):
''' add a new request '''
pull_queue = self.pull_queue.get(servicename, deque())
if pull_queue:
# if pull_queue exists. pop a record from pull_queue
# remove the current record in client[pulling] and push
# it into client[processing].
# get a defer from pull_request and get a request from
# push_requests by requestid, finally, active the *request*
pull_id = pull_queue.popleft()
pull_request = self.pull_requests[pull_id]
client = self.clients[pull_request['clientid']]
client['pulling'].discard(pull_id)
client['processing'].add(requestid)
pull_defer = pull_request['defer']
request = self.push_requests[requestid]
pull_defer.callback(request)
else:
# add a push record into push_queue
self.push_queue[servicename].append(requestid)
@inlineCallbacks
def remoteRequest(self, name, *args, **kwargs):
''' '''
servicename, method = self.splitName(name)
if servicename not in self.versions:
raise UnknownService("Unknown Service: " + servicename)
requestid = self.newRequestId()
defer = Deferred()
request = {
'id': requestid,
'servicename': servicename,
#'clientid': None,
'method': method,
'args': args,
'kwargs': kwargs,
'attemps': 0,
#'defer': defer,
}
self.push_requests[requestid] = request
self.addRequest(servicename, requestid)
try:
result = yield defer
returnValue(result)
return
except pb.RemoteError, error:
log.error("Got Error: " + error.remoteType)
raise
def gotResult(self, *args, **kwargs):
''' '''
raise NotImplementedError("Should Implemented in subclass")
@inlineCallbacks
def clientPush(self, clientid, name, *args, **kwargs):
''' push a record into client[pushing] '''
# if the given method name does not exists in self.versions,
# raise an UnknownServiceError
client = self.clients[clientid]
servicename, method = self.splitName(name)
if name not in self.versions:
raise UnknownService("Unknown Service " + servicename)
# generate a new requestId
requestid = self.newRequestId()
defer = Deferred()
defer.addCallback(self.gotResult, args, kwargs)
request = {
'id': requestid,
'servicename': servicename,
'clientid': clientid,
'method': method,
'arg': args,
'kwargs': kwargs,
'attemps': 0,
'defer': defer,
}
client['pushing'].add(requestid)
self.push_requests[requestid] = request
try:
result = {'status': 'success', 'message': ''}
returnValue(result)
except pb.RemoteError, error:
log.error("Got error: " + error.remoteType)
result = {'status': 'failure', 'message': error.remoteType}
returnValue(result)
raise
finally:
client['pushing'].discard(requestid)
@inlineCallbacks
def clientPull(self, clientid):
''' '''
client = self.clients[clientid]
push_queue = self.push_queue[name]
# if there are no push_queue
if not push_queue:
# generate a new pull record and add into pull_queue
defer = Deferred()
pullid = self.newRequestId()
pull = {
'id': pullid,
'servicename': name,
'defer': defer,
'clientid': clientid,
}
client['pulling'].add(pullid)
self.pull_requests[pullid] = pull
self.pull_queue[name].append(pullid)
request = yield defer
del self.pull_requests[pullid]
requestid = request['id']
client['processing'].add(requestid)
else:
# get a request from push_queue and add into processing queue
requestid = push_queue.popleft()
client['processing'].add(requestid)
request = self.push_requests[requestid]
self.processing_timeout[requestid] = reactor.callLater(
self.client_request_timeout,
self.clientProcTimeout,
requestid,
clientid,
)
log.info("Sent To: clientid %s, requestid %s." % (
clientid,
request['id'],
))
# return requestid, method, args, kwargs to client and
# client run it.
returnValue((
request['id'],
request['method'],
request['args'],
request['kwargs'],
))
def clientReturn(self, clientid, requestid, result):
''' '''
log.info("Returned: clientid: %s, requestid: %s" % (
clientid,
requestid,
))
# remove this request from processing deque
client = self.clients[clientid]
client['processing'].discard(requestid)
# try to cancel the processing request.
# if occured an exception, that means the request
# was already finishd.
try:
self.processing_timeout[requestid].cancel()
del self.processing_timeout[requestid]
except KeyError: # 已经处理完成
pass
if requestid in self.push_requests:
push = self.push_requests[requestid]
if 'error' not in result:
push['defer'].callback(result['result'])
else:
error = result['error']
push['defer'].errback(failure.Failure(
pb.RemoteError(
error['type'],
error['value'],
error['traceback'],
)))
servicename = push['servicename']
# remove this request from push_queue
try:
self.push_queue[servicename].remove(requestid)
except:
pass
if push['clientid'] is not None:
try:
self.clients[push['clientid']]['pushing'].discard(requestid)
except:
pass
class ControllerChildNode(pb.Referenceable):
''' '''
def __init__(self, service):
self.service = service
def remote_preregister(self, clientid):
''' '''
return self.service.preregister(clientid)
class ControllerNode(pb.Root):
''' start the controller node as service '''
def __init__(self, service):
''' '''
self.service = service
service.node = self
#def request(self, name, *args, **kwargs):
# ''' '''
# return self.service.remoteRequest(name, *args, **kwargs)
#def remote_request(self, clientid, name, *args, **kwargs):
# ''' '''
# return self.service.clientPush(clientid, name, *args, **kwargs)
#def remote_pull(self, clientid):
# ''' '''
# return self.service.clientPull(clientid)
#def remote_return(self, clientid, requestid, result):
# ''' '''
# return self.service.clientReturn(clientid, requestid, result)
#def remote_push(self, requestid, name, *args, **kwargs):
# ''' '''
# return self.service.clientPush(requestid, name, *args, **kwargs)
def remote_register(
self,
service,
version_major,
version_minor,
nodename,
client,
):
''' '''
return self.service.register(
service,
version_major,
version_minor,
nodename,
client,
)
def remote_unregister(self, clientid):
''' '''
return self.service.unregister(clientid)
def remote_nextRequest(self):
''' '''
return self.service.nextRequest()
def remote_fail(self, name, *args, **kwargs):
''' '''
return self.service.clientFail(name, *args, **kwargs)
def remote_sendResult(self, requestid, skid, result):
''' '''
return self.service.sendResult(requestid, skid, result)
|
gpl-2.0
| -4,159,788,212,689,429,500 | 29.253275 | 102 | 0.556221 | false |
Evensgn/MNIST-learning
|
mnist_svm.py
|
1
|
1201
|
import numpy as np
import matplotlib.pyplot as plt
GRAY_SCALE_RANGE = 255
import pickle
data_filename = 'data_deskewed.pkl'
print('Loading data from file \'' + data_filename + '\' ...')
with open(data_filename, 'rb') as f:
train_labels = pickle.load(f)
train_images = pickle.load(f)
test_labels = pickle.load(f)
test_images = pickle.load(f)
num_pixel = pickle.load(f)
print('Data loading complete.')
train_images = np.array(train_images)
train_images.resize(train_images.size // num_pixel, num_pixel)
test_images = np.array(test_images)
test_images.resize(test_images.size // num_pixel, num_pixel)
test_labels = np.array(test_labels)
train_labels = np.array(train_labels)
## normalization
train_images = train_images / GRAY_SCALE_RANGE
test_images = test_images / GRAY_SCALE_RANGE
from sklearn import svm, metrics
# clf = svm.SVC(gamma = 0.001)
clf = svm.SVC(kernel = 'linear')
clf.fit(train_images[:1000], train_labels[:1000])
prediction = clf.predict(test_images)
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(test_labels, prediction)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(test_labels, prediction))
|
mit
| 2,050,780,318,703,645,700 | 29.05 | 82 | 0.720233 | false |
Parnassos/Eupheme
|
eupheme/config.py
|
1
|
3763
|
"""Configuration module.
This module deals with the config object used by the Application class to
make some parts of the application's behaviour configurable.
"""
import yaml
import eupheme.mime as mime
class Config:
"""Config class.
A slightly neater way of accessing the yaml config file instead of
dictionaries.
"""
defaults = {
'charsets': {'utf-8', 'ascii'},
'methods': {'GET', 'POST', 'PUT'},
'default':
{
'mimetype': 'text/html',
'charset': 'utf-8'
}
}
def __init__(self, data):
"""Create a new Config instance."""
for key in data:
if isinstance(data[key], dict):
setattr(self, key, Config(data[key]))
else:
# Anything that isn't a dict we probably want as
# a final property.
setattr(self, key, data[key])
def __setattr__(self, name, value):
self.__dict__[name] = value
def __repr__(self):
return repr(self.__dict__)
def load(path=None):
"""Create a new config instance.
Create a new config instance that loads the contents of the
provided path and tries to parse it as yaml.
Returns a Config object.
"""
# If no path was provided just create an empty dict
if path is None:
data = {}
else:
yml = open(path, 'r')
data = yaml.safe_load(yml)
# Make sure defaults are set valid and then turn them into
# objects usable by our code.
data = _check_defaults(data)
_verify(data)
return _objectify(data)
def _verify(data):
"""Verify the contents of the config and points out any errors."""
# Make sure the lengths of all the keys are correct
assert len(data['charsets']) > 0, \
'Must support at least one charset'
assert len(data['methods']) > 0, \
'Must support at least one method'
# Make sure the default charset is in the list of supported charsets
assert data['default']['charset'] in data['charsets'], \
'Default charset has to be in the list of supported charsets'
def _objectify(data):
"""Transform the data into a proper Config object.
Takes a dict with information and returns a proper Config object.
"""
conf = Config(data)
# Convert the charsets into CharacterSet objects
conf.charsets = set(
mime.CharacterSet.parse(charset) for charset in conf.charsets
)
# Make sure methods is a set rather than a list
conf.methods = set(conf.methods)
conf.default.charset = mime.CharacterSet.parse(conf.default.charset)
conf.default.mimetype = mime.MimeType.parse(conf.default.mimetype)
return conf
def _check_defaults(data):
"""Make sure the default values are available in the dictionary.
Makes sure all the default values are filled, also makes sure
missing keys are added to the dict.
Returns a dict with default data filled where necessary.
"""
# Make sure data is not none whn we reach the the actual checks
if data is None:
data = {}
# If there's no default key at all, add it
if 'default' not in data:
data['default'] = Config.defaults['default']
config = data['default']
defaults = data['default']
# Set the default values if any of the keys are missing
if 'mimetype' not in config:
config['mimetype'] = defaults['mimetype']
if 'charset' not in config:
config['charset'] = defaults['charset']
config = data
defaults = Config.defaults
if 'charsets' not in data:
config['charsets'] = defaults['charsets']
if 'methods' not in data:
config['methods'] = defaults['methods']
return config
|
bsd-3-clause
| -2,609,194,535,855,000,000 | 24.425676 | 73 | 0.62211 | false |
catalpainternational/OIPA
|
OIPA/geodata/migrations/0001_initial.py
|
1
|
10087
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('iati', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Adm1Region',
fields=[
('adm1_code', models.CharField(max_length=10, serialize=False, primary_key=True)),
('OBJECTID_1', models.IntegerField(null=True, blank=True)),
('diss_me', models.IntegerField(null=True, blank=True)),
('adm1_cod_1', models.CharField(max_length=20, null=True, blank=True)),
('iso_3166_2', models.CharField(max_length=2, null=True, blank=True)),
('wikipedia', models.CharField(max_length=150, null=True, blank=True)),
('adm0_sr', models.IntegerField(null=True, blank=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('name_alt', models.CharField(max_length=200, null=True, blank=True)),
('name_local', models.CharField(max_length=100, null=True, blank=True)),
('type', models.CharField(max_length=100, null=True, blank=True)),
('type_en', models.CharField(max_length=100, null=True, blank=True)),
('code_local', models.CharField(max_length=100, null=True, blank=True)),
('code_hasc', models.CharField(max_length=100, null=True, blank=True)),
('note', models.TextField(null=True, blank=True)),
('hasc_maybe', models.CharField(max_length=100, null=True, blank=True)),
('region', models.CharField(max_length=100, null=True, blank=True)),
('region_cod', models.CharField(max_length=100, null=True, blank=True)),
('provnum_ne', models.IntegerField(null=True, blank=True)),
('gadm_level', models.IntegerField(null=True, blank=True)),
('check_me', models.IntegerField(null=True, blank=True)),
('scalerank', models.IntegerField(null=True, blank=True)),
('datarank', models.IntegerField(null=True, blank=True)),
('abbrev', models.CharField(max_length=100, null=True, blank=True)),
('postal', models.CharField(max_length=100, null=True, blank=True)),
('area_sqkm', models.CharField(max_length=100, null=True, blank=True)),
('sameascity', models.IntegerField(null=True, blank=True)),
('labelrank', models.IntegerField(null=True, blank=True)),
('featurecla', models.CharField(max_length=100, null=True, blank=True)),
('name_len', models.IntegerField(null=True, blank=True)),
('mapcolor9', models.IntegerField(null=True, blank=True)),
('mapcolor13', models.IntegerField(null=True, blank=True)),
('fips', models.CharField(max_length=100, null=True, blank=True)),
('fips_alt', models.CharField(max_length=100, null=True, blank=True)),
('woe_id', models.IntegerField(null=True, blank=True)),
('woe_label', models.CharField(max_length=100, null=True, blank=True)),
('woe_name', models.CharField(max_length=100, null=True, blank=True)),
('center_location', django.contrib.gis.db.models.fields.PointField(srid=4326, blank=True)),
('sov_a3', models.CharField(max_length=3, null=True, blank=True)),
('adm0_a3', models.CharField(max_length=3, null=True, blank=True)),
('adm0_label', models.IntegerField(null=True, blank=True)),
('admin', models.CharField(max_length=100, null=True, blank=True)),
('geonunit', models.CharField(max_length=100, null=True, blank=True)),
('gu_a3', models.CharField(max_length=3, null=True, blank=True)),
('gn_id', models.IntegerField(null=True, blank=True)),
('gn_name', models.CharField(max_length=100, null=True, blank=True)),
('gns_id', models.IntegerField(null=True, blank=True)),
('gns_name', models.CharField(max_length=100, null=True, blank=True)),
('gn_level', models.IntegerField(null=True, blank=True)),
('gn_region', models.CharField(max_length=100, null=True, blank=True)),
('gn_a1_code', models.CharField(max_length=100, null=True, blank=True)),
('region_sub', models.CharField(max_length=100, null=True, blank=True)),
('sub_code', models.CharField(max_length=100, null=True, blank=True)),
('gns_level', models.IntegerField(null=True, blank=True)),
('gns_lang', models.CharField(max_length=100, null=True, blank=True)),
('gns_adm1', models.CharField(max_length=100, null=True, blank=True)),
('gns_region', models.CharField(max_length=100, null=True, blank=True)),
('polygon', models.TextField(null=True, blank=True)),
('geometry_type', models.CharField(max_length=50, null=True, blank=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'admin1 regions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Adm2Region',
fields=[
('code', models.CharField(max_length=10, serialize=False, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('region', models.ForeignKey(to='geodata.Adm1Region')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geoname_id', models.IntegerField(null=True, blank=True)),
('name', models.CharField(max_length=200)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326, blank=True)),
('ascii_name', models.CharField(max_length=200, null=True, blank=True)),
('alt_name', models.CharField(max_length=200, null=True, blank=True)),
('namepar', models.CharField(max_length=200, null=True, blank=True)),
],
options={
'verbose_name_plural': 'cities',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Country',
fields=[
('code', models.CharField(max_length=2, serialize=False, primary_key=True)),
('numerical_code_un', models.IntegerField(null=True, blank=True)),
('name', models.CharField(max_length=100, db_index=True)),
('alt_name', models.CharField(max_length=100, null=True, blank=True)),
('language', models.CharField(max_length=2, null=True)),
('dac_country_code', models.IntegerField(null=True, blank=True)),
('iso3', models.CharField(max_length=3, null=True, blank=True)),
('alpha3', models.CharField(max_length=3, null=True, blank=True)),
('fips10', models.CharField(max_length=2, null=True, blank=True)),
('center_longlat', django.contrib.gis.db.models.fields.PointField(srid=4326, blank=True)),
('polygon', models.TextField(null=True, blank=True)),
('data_source', models.CharField(max_length=20, null=True, blank=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('capital_city', models.ForeignKey(related_name='capital_city', blank=True, to='geodata.City', null=True)),
],
options={
'verbose_name_plural': 'countries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Region',
fields=[
('code', models.SmallIntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=80)),
('center_longlat', django.contrib.gis.db.models.fields.PointField(srid=4326, blank=True)),
('parental_region', models.ForeignKey(blank=True, to='geodata.Region', null=True)),
('region_vocabulary', models.ForeignKey(default=1, to='iati.RegionVocabulary')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='country',
name='region',
field=models.ForeignKey(blank=True, to='geodata.Region', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='country',
name='un_region',
field=models.ForeignKey(related_name='un_region', blank=True, to='geodata.Region', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='country',
name='unesco_region',
field=models.ForeignKey(related_name='unesco_region', blank=True, to='geodata.Region', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='city',
name='country',
field=models.ForeignKey(blank=True, to='geodata.Country', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='adm1region',
name='country',
field=models.ForeignKey(blank=True, to='geodata.Country', null=True),
preserve_default=True,
),
]
|
agpl-3.0
| 8,997,692,114,947,381,000 | 55.668539 | 123 | 0.564885 | false |
OCA/stock-logistics-barcode
|
stock_scanner/models/scanner_scenario_transition.py
|
1
|
3439
|
# © 2011 Sylvain Garancher <sylvain.garancher@syleam.fr>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import sys
import traceback
from odoo import models, api, fields, exceptions
from odoo import _
import logging
logger = logging.getLogger('stock_scanner')
class ScannerScenarioTransition(models.Model):
_name = 'scanner.scenario.transition'
_description = 'Transition for scenario'
_order = 'sequence'
@api.model
def _transition_type_get(self):
return [
('scanner', 'Scanner'),
('keyboard', 'Keyboard'),
]
# ===========================================================================
# COLUMNS
# ===========================================================================
name = fields.Char(
string='Name',
required=True,
help='Name of the transition.')
sequence = fields.Integer(
string='Sequence',
default=0,
required=False,
help='Sequence order.')
from_id = fields.Many2one(
comodel_name='scanner.scenario.step',
string='From',
required=True,
ondelete='cascade',
help='Step which launches this transition.')
to_id = fields.Many2one(
comodel_name='scanner.scenario.step',
string='To',
required=True,
ondelete='cascade',
help='Step which is reached by this transition.')
condition = fields.Char(
string='Condition',
required=True,
default='True',
help='The transition is followed only if this condition is evaluated '
'as True.')
transition_type = fields.Selection(
selection='_transition_type_get',
string='Transition Type',
default="keyboard",
help='Type of transition.')
tracer = fields.Char(
string='Tracer',
required=False,
default=False,
help='Used to determine fron which transition we arrive to the '
'destination step.')
scenario_id = fields.Many2one(
comodel_name='scanner.scenario',
string='Scenario',
required=False,
related="from_id.scenario_id",
store=True,
ondelete='cascade',
readonly=True)
@api.constrains('from_id', 'to_id')
def _check_scenario(self):
if self.from_id.scenario_id.id != self.to_id.scenario_id.id:
raise exceptions.UserError(
_('Error ! You can not create recursive scenarios.'),
)
return True
@api.constrains('condition')
def _check_condition_syntax(self):
"""
Syntax check the python condition of a transition
"""
for transition in self:
try:
compile(transition.condition, '<string>', 'eval')
except SyntaxError as exception:
logger.error(''.join(traceback.format_exception(
sys.exc_info()[0],
sys.exc_info()[1],
sys.exc_info()[2],
)))
raise exceptions.ValidationError(
_('Error in condition for transition "%s"'
' at line %d, offset %d:\n%s') % (
transition.name,
exception.lineno,
exception.offset,
exception.msg,
))
return True
|
agpl-3.0
| 3,440,874,921,048,147,000 | 31.130841 | 81 | 0.528505 | false |
rckclmbr/pyportify
|
pyportify/pkcs1/keys.py
|
1
|
5665
|
import fractions
from . import primitives
from . import exceptions
from .defaults import default_crypto_random
from .primes import get_prime, DEFAULT_ITERATION
class RsaPublicKey(object):
__slots__ = ('n', 'e', 'bit_size', 'byte_size')
def __init__(self, n, e):
self.n = n
self.e = e
self.bit_size = primitives.integer_bit_size(n)
self.byte_size = primitives.integer_byte_size(n)
def __repr__(self):
return '<RsaPublicKey n: %d e: %d bit_size: %d>' % \
(self.n, self.e, self.bit_size)
def rsavp1(self, s):
if not (0 <= s <= self.n-1):
raise exceptions.SignatureRepresentativeOutOfRange
return self.rsaep(s)
def rsaep(self, m):
if not (0 <= m <= self.n-1):
raise exceptions.MessageRepresentativeOutOfRange
return pow(m, self.e, self.n)
class RsaPrivateKey(object):
__slots__ = ('n', 'd', 'bit_size', 'byte_size')
def __init__(self, n, d):
self.n = n
self.d = d
self.bit_size = primitives.integer_bit_size(n)
self.byte_size = primitives.integer_byte_size(n)
def __repr__(self):
return '<RsaPrivateKey n: %d d: %d bit_size: %d>' % \
(self.n, self.d, self.bit_size)
def rsadp(self, c):
if not (0 <= c <= self.n-1):
raise exceptions.CiphertextRepresentativeOutOfRange
return pow(c, self.d, self.n)
def rsasp1(self, m):
if not (0 <= m <= self.n-1):
raise exceptions.MessageRepresentativeOutOfRange
return self.rsadp(m)
class MultiPrimeRsaPrivateKey(object):
__slots__ = ('primes', 'blind', 'blind_inv', 'n', 'e', 'exponents', 'crts',
'bit_size', 'byte_size')
def __init__(self, primes, e, blind=True, rnd=default_crypto_random):
self.primes = primes
self.n = primitives.product(*primes)
self.e = e
self.bit_size = primitives.integer_bit_size(self.n)
self.byte_size = primitives.integer_byte_size(self.n)
self.exponents = []
for prime in primes:
exponent, a, b = primitives.bezout(e, prime-1)
assert b == 1
if exponent < 0:
exponent += prime-1
self.exponents.append(exponent)
self.crts = [1]
R = primes[0]
for prime in primes[1:]:
crt, a, b = primitives.bezout(R, prime)
assert b == 1
R *= prime
self.crts.append(crt)
public = RsaPublicKey(self.n, self.e)
if blind:
while True:
blind_factor = rnd.getrandbits(self.bit_size-1)
self.blind = public.rsaep(blind_factor)
u, v, gcd = primitives.bezout(blind_factor, self.n)
if gcd == 1:
self.blind_inv = u if u > 0 else u + self.n
assert (blind_factor * self.blind_inv) % self.n == 1
break
else:
self.blind = None
self.blind_inv = None
def __repr__(self):
return '<RsaPrivateKey n: %d primes: %s bit_size: %d>' % \
(self.n, self.primes, self.bit_size)
def rsadp(self, c):
if not (0 <= c <= self.n-1):
raise exceptions.CiphertextRepresentativeOutOfRange
R = 1
m = 0
if self.blind:
c = (c * self.blind) % self.n
contents = zip(self.primes, self.exponents, self.crts)
for prime, exponent, crt in contents:
m_i = primitives._pow(c, exponent, prime)
h = ((m_i - m) * crt) % prime
m += R * h
R *= prime
if self.blind_inv:
m = (m * self.blind_inv) % self.n
return m
def rsasp1(self, m):
if not (0 <= m <= self.n-1):
raise exceptions.MessageRepresentativeOutOfRange
return self.rsadp(m)
def generate_key_pair(size=512, number=2, rnd=default_crypto_random,
k=DEFAULT_ITERATION, primality_algorithm=None,
strict_size=True, e=0x10001):
'''Generates an RSA key pair.
size:
the bit size of the modulus, default to 512.
number:
the number of primes to use, default to 2.
rnd:
the random number generator to use, default to SystemRandom from the
random library.
k:
the number of iteration to use for the probabilistic primality
tests.
primality_algorithm:
the primality algorithm to use.
strict_size:
whether to use size as a lower bound or a strict goal.
e:
the public key exponent.
Returns the pair (public_key, private_key).
'''
primes = []
lbda = 1
bits = size // number + 1
n = 1
while len(primes) < number:
if number - len(primes) == 1:
bits = size - primitives.integer_bit_size(n) + 1
prime = get_prime(bits, rnd, k, algorithm=primality_algorithm)
if prime in primes:
continue
if e is not None and fractions.gcd(e, lbda) != 1:
continue
if (strict_size and number - len(primes) == 1 and
primitives.integer_bit_size(n*prime) != size):
continue
primes.append(prime)
n *= prime
lbda *= prime - 1
if e is None:
e = 0x10001
while e < lbda:
if fractions.gcd(e, lbda) == 1:
break
e += 2
assert 3 <= e <= n-1
public = RsaPublicKey(n, e)
private = MultiPrimeRsaPrivateKey(primes, e, blind=True, rnd=rnd)
return public, private
|
apache-2.0
| -4,043,512,826,432,681,500 | 31.745665 | 79 | 0.538394 | false |
SimonSapin/tinycss
|
tinycss/decoding.py
|
2
|
9175
|
# coding: utf-8
"""
tinycss.decoding
----------------
Decoding stylesheets from bytes to Unicode.
http://www.w3.org/TR/CSS21/syndata.html#charset
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import operator
import re
from binascii import unhexlify
__all__ = ['decode'] # Everything else is implementation detail
def decode(css_bytes, protocol_encoding=None,
linking_encoding=None, document_encoding=None):
"""
Determine the character encoding from the passed metadata and the
``@charset`` rule in the stylesheet (if any); and decode accordingly.
If no encoding information is available or decoding fails,
decoding defaults to UTF-8 and then fall back on ISO-8859-1.
:param css_bytes:
a CSS stylesheet as a byte string
:param protocol_encoding:
The "charset" parameter of a "Content-Type" HTTP header (if any),
or similar metadata for other protocols.
:param linking_encoding:
``<link charset="">`` or other metadata from the linking mechanism
(if any)
:param document_encoding:
Encoding of the referring style sheet or document (if any)
:return:
A tuple of an Unicode string, with any BOM removed, and the
encoding that was used.
"""
if protocol_encoding:
css_unicode = try_encoding(css_bytes, protocol_encoding)
if css_unicode is not None:
return css_unicode, protocol_encoding
for encoding, pattern in ENCODING_MAGIC_NUMBERS:
match = pattern(css_bytes)
if match:
has_at_charset = isinstance(encoding, tuple)
if has_at_charset:
extract, endianness = encoding
encoding = extract(match.group(1))
# Get an ASCII-only unicode value.
# This is the only thing that works on both Python 2 and 3
# for bytes.decode()
# Non-ASCII encoding names are invalid anyway,
# but make sure they stay invalid.
encoding = encoding.decode('ascii', 'replace')
encoding = encoding.replace('\ufffd', '?')
if encoding.replace('-', '').replace('_', '').lower() in [
'utf16', 'utf32']:
encoding += endianness
encoding = encoding.encode('ascii', 'replace').decode('ascii')
css_unicode = try_encoding(css_bytes, encoding)
if css_unicode and not (has_at_charset and not
css_unicode.startswith('@charset "')):
return css_unicode, encoding
break
for encoding in [linking_encoding, document_encoding]:
if encoding:
css_unicode = try_encoding(css_bytes, encoding)
if css_unicode is not None:
return css_unicode, encoding
css_unicode = try_encoding(css_bytes, 'UTF-8')
if css_unicode is not None:
return css_unicode, 'UTF-8'
return try_encoding(css_bytes, 'ISO-8859-1', fallback=False), 'ISO-8859-1'
def try_encoding(css_bytes, encoding, fallback=True):
if fallback:
try:
css_unicode = css_bytes.decode(encoding)
# LookupError means unknown encoding
except (UnicodeDecodeError, LookupError):
return None
else:
css_unicode = css_bytes.decode(encoding)
if css_unicode and css_unicode[0] == '\ufeff':
# Remove any Byte Order Mark
css_unicode = css_unicode[1:]
return css_unicode
def hex2re(hex_data):
return re.escape(unhexlify(hex_data.replace(' ', '').encode('ascii')))
class Slicer(object):
"""Slice()[start:stop:end] == slice(start, stop, end)"""
def __getitem__(self, slice_):
return operator.itemgetter(slice_)
Slice = Slicer()
# List of (bom_size, encoding, pattern)
# bom_size is in bytes and can be zero
# encoding is a string or (slice_, endianness) for "as specified"
# slice_ is a slice object.How to extract the specified
ENCODING_MAGIC_NUMBERS = [
((Slice[:], ''), re.compile(
hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22') +
b'([^\x22]*?)' +
hex2re('22 3B')).match),
('UTF-8', re.compile(
hex2re('EF BB BF')).match),
((Slice[:], ''), re.compile(
hex2re('40 63 68 61 72 73 65 74 20 22') +
b'([^\x22]*?)' +
hex2re('22 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('FE FF 00 40 00 63 00 68 00 61 00 72 00 73 00 65 00'
'74 00 20 00 22') +
b'((\x00[^\x22])*?)' +
hex2re('00 22 00 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('00 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00'
'20 00 22') +
b'((\x00[^\x22])*?)' +
hex2re('00 22 00 3B')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('FF FE 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74'
'00 20 00 22 00') +
b'(([^\x22]\x00)*?)' +
hex2re('22 00 3B 00')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00 20'
'00 22 00') +
b'(([^\x22]\x00)*?)' +
hex2re('22 00 3B 00')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00'
'00 00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00'
'00 74 00 00 00 20 00 00 00 22') +
b'((\x00\x00\x00[^\x22])*?)' +
hex2re('00 00 00 22 00 00 00 3B')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00'
'00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00'
'00 20 00 00 00 22') +
b'((\x00\x00\x00[^\x22])*?)' +
hex2re('00 00 00 22 00 00 00 3B')).match),
# Python does not support 2143 or 3412 endianness, AFAIK.
# I guess we could fix it up ourselves but meh. Patches welcome.
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00'
# '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00'
# '74 00 00 00 20 00 00 00 22 00') +
# b'((\x00\x00[^\x22]\x00)*?)' +
# hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00'
# '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00'
# '20 00 00 00 22 00') +
# b'((\x00\x00[^\x22]\x00)*?)' +
# hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00'
# '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74'
# '00 00 00 20 00 00 00 22 00 00') +
# b'((\x00[^\x22]\x00\x00)*?)' +
# hex2re('00 22 00 00 00 3B 00 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00'
# '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20'
# '00 00 00 22 00 00') +
# b'((\x00[^\x22]\x00\x00)*?)' +
# hex2re('00 22 00 00 00 3B 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61'
'00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00'
'00 00 20 00 00 00 22 00 00 00') +
b'(([^\x22]\x00\x00\x00)*?)' +
hex2re('22 00 00 00 3B 00 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00 72'
'00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20 00'
'00 00 22 00 00 00') +
b'(([^\x22]\x00\x00\x00)*?)' +
hex2re('22 00 00 00 3B 00 00 00')).match),
('UTF-32-BE', re.compile(
hex2re('00 00 FE FF')).match),
('UTF-32-LE', re.compile(
hex2re('FF FE 00 00')).match),
# ('UTF-32-2143', re.compile(
# hex2re('00 00 FF FE')).match),
# ('UTF-32-3412', re.compile(
# hex2re('FE FF 00 00')).match),
('UTF-16-BE', re.compile(
hex2re('FE FF')).match),
('UTF-16-LE', re.compile(
hex2re('FF FE')).match),
# Some of there are supported by Python, but I didn’t bother.
# You know the story with patches ...
# # as specified, transcoded from EBCDIC to ASCII
# ('as_specified-EBCDIC', re.compile(
# hex2re('7C 83 88 81 99 A2 85 A3 40 7F')
# + b'([^\x7F]*?)'
# + hex2re('7F 5E')).match),
# # as specified, transcoded from IBM1026 to ASCII
# ('as_specified-IBM1026', re.compile(
# hex2re('AE 83 88 81 99 A2 85 A3 40 FC')
# + b'([^\xFC]*?)'
# + hex2re('FC 5E')).match),
# # as specified, transcoded from GSM 03.38 to ASCII
# ('as_specified-GSM_03.38', re.compile(
# hex2re('00 63 68 61 72 73 65 74 20 22')
# + b'([^\x22]*?)'
# + hex2re('22 3B')).match),
]
|
bsd-3-clause
| 8,937,828,354,006,947,000 | 35.256917 | 78 | 0.539736 | false |
kasmith/cbmm-project-christmas
|
SimulationModels/get_sim_data_v2.py
|
1
|
5112
|
from __future__ import division, print_function
from physicsTable import *
from physicsTable.constants import *
from physicsTable.models import PointSimulation
import os, json, glob
KAP_V_NORM = 20 # simulation for conditions with motion (small noise in velocity)
KAP_V_NOMOT = 1e-10 # simulation for no motion condition (very high noise, ie almost uniform distribution)
KAP_B = 25
KAP_M = 50000
P_ERR = 25
TIMEUP = 50.
N_SIMS = 5000
CPUS = 1
WRITE_JSON = True
# Regex used to list trials that are going to be simulated
TRIAL_REGEX_CONT = '*_*_*.json' # containment trials
TRIAL_REGEX_REG = 'regular_*.json' # regular trials
def run_single_sim(table, n_sims, kap_v, kap_b, kap_m, p_err, timeup, cpus):
ps = PointSimulation(table, kap_v, kap_b, kap_m, p_err, nsims=n_sims, cpus=cpus, maxtime=timeup)
ps.runSimulation()
outcomes = ps.getOutcomes()
bounces = ps.getBounces()
times = ps.getTimes()
p_green = outcomes[GREENGOAL]/n_sims
p_red = outcomes[REDGOAL]/n_sims
p_timeup = 1 - p_green - p_red
avg_bounces = sum(bounces) / len(bounces)
avg_time = sum(times) / len(times)
return p_green, p_red, p_timeup, avg_bounces, avg_time
def get_sim_data(n_sims=N_SIMS, kap_v_norm=KAP_V_NORM, kap_v_nomot = KAP_V_NOMOT, kap_b=KAP_B, kap_m=KAP_M, p_err=P_ERR, timeup = TIMEUP, cpus=CPUS):
goal_dict = get_goal_dict()
with open('sim_data_full.csv', 'w') as csv_out:
csv_out.write('Trial,IsContained,Direction,Goal,PGreen,PRed,PTimeUp,AvgBounces,AvgTime\n')
json_dict = {}
os_path_c = os.path.join('..', 'psiturk-rg-cont', 'templates', 'trials', TRIAL_REGEX_CONT)
for f in glob.iglob(os_path_c):
trial_name = f.split(os.path.sep)[-1][:-5]
print('Running simulations for: ' + trial_name)
tr = loadFromJSON(f)
json_dict[trial_name] = {}
for dir in ['forward','reverse','none']:
tab = tr.makeTable()
if dir == 'reverse':
tab.balls.setvel(map(lambda x: -x, tab.balls.getvel()))
if dir == 'none':
kap_v = kap_v_nomot
else:
tab.step(.5)
kap_v = kap_v_norm
p_green, p_red, p_timeup, avg_bounces, avg_time = run_single_sim(tab, n_sims, kap_v, kap_b, kap_m, p_err, timeup, cpus)
goal = goal_dict[trial_name]
csv_line = ','.join(
(trial_name, 'contained',dir, goal, str(p_green), str(p_red), str(p_timeup), str(avg_bounces), str(avg_time))) + '\n'
csv_out.write(csv_line)
if WRITE_JSON:
json_dict[trial_name][dir] = {'goal': goal, 'p_green': p_green, 'p_red': p_red, 'avg_bounces': avg_bounces, 'avg_time': avg_time}
os_path_r = os.path.join('..', 'psiturk-rg-cont', 'templates', 'trials', TRIAL_REGEX_REG)
for f in glob.iglob(os_path_r):
trial_name = f.split(os.path.sep)[-1][:-5]
print('Running simulations for: ' + trial_name)
tr = loadFromJSON(f)
json_dict[trial_name] = {}
for dir in ['forward', 'none']:
tab = tr.makeTable()
if dir == 'none':
kap_v = kap_v_nomot
else:
tab.step(.5)
kap_v = kap_v_norm
p_green, p_red, p_timeup, avg_bounces, avg_time = run_single_sim(tab, n_sims, kap_v, kap_b, kap_m,
p_err, timeup, cpus)
goal = goal_dict[trial_name]
csv_line = ','.join(
(trial_name, 'regular', dir, goal, str(p_green), str(p_red), str(p_timeup), str(avg_bounces),
str(avg_time))) + '\n'
csv_out.write(csv_line)
if WRITE_JSON:
json_dict[trial_name][dir] = {'goal': goal, 'p_green': p_green, 'p_red': p_red,
'avg_bounces': avg_bounces, 'avg_time': avg_time}
if WRITE_JSON:
with open('sim_data_full.json', 'w') as json_out:
json.dump(json_dict, json_out)
def loadFromJSON(jsonfl):
with open(jsonfl,'rU') as jfl:
j = json.load(jfl)
tr = RedGreenTrial(j['Name'], j['Dims'], j['ClosedEnds'])
b = j['Ball']
tr.addBall(b[0],b[1],b[2],b[3],b[4])
for w in j['Walls']:
tr.addWall(w[0],w[1],w[2],w[3])
for o in j['Occluders']:
tr.addOcc(o[0],o[1],o[2])
for g in j['Goals']:
tr.addGoal(g[0],g[1],g[2],g[3])
return tr
def get_goal_dict():
goal_dict = {}
data_path = os.path.join('..', 'ContainmentAnalysis', 'rawdata.csv')
with open(data_path, 'r') as f:
for line in f:
line_split = line.split(',')
trial_name = line_split[2]
if trial_name not in goal_dict:
goal_dict[trial_name] = line_split[-4]
return goal_dict
if __name__ == '__main__':
get_sim_data()
|
mit
| -7,143,892,807,414,389,000 | 38.022901 | 149 | 0.530516 | false |
rmrector/script.artwork.beef
|
lib/advancedsettings.py
|
1
|
4861
|
import xbmc
import xbmcgui
import xbmcvfs
import xml.etree.ElementTree as ET
from contextlib import closing
from lib.libs.pykodi import log, localize as L
FILENAME = 'special://userdata/advancedsettings.xml'
FILENAME_BAK = FILENAME + '.beef.bak'
ROOT_TAG = 'advancedsettings'
VIDEO_TAG = 'videolibrary'
MUSIC_TAG = 'musiclibrary'
ARTTYPE_TAG = 'arttype'
# 0: mediatype tag name, 1: library tag name, 2: Kodi's hardcoded art types to exclude from AS.xml
mediatype_map = {'tvshow': ('tvshowextraart', VIDEO_TAG, ('poster', 'banner', 'fanart')),
'season': ('tvshowseasonextraart', VIDEO_TAG, ('poster', 'banner', 'fanart')),
'episode': ('episodeextraart', VIDEO_TAG, ('thumb',)),
'movie': ('movieextraart', VIDEO_TAG, ('poster', 'fanart')),
'set': ('moviesetextraart', VIDEO_TAG, ('poster', 'fanart')),
'musicvideo': ('musicvideoextraart', VIDEO_TAG, ('poster', 'fanart')),
'artist': ('artistextraart', MUSIC_TAG, ('thumb', 'fanart')),
'album': ('albumextraart', MUSIC_TAG, ('thumb',))}
unsupported_types = ('song',)
MALFORMED = 32654
BACKUP_SUCCESSFUL = 32655
BACKUP_UNSUCCESSFUL = 32656
RESTORE_SUCCESSFUL = 32657
RESTORE_UNSUCCESSFUL = 32658
RESTART_KODI = 32659
def save_arttypes(arttype_map):
root = read_xml()
if root is None:
xbmcgui.Dialog().notification("Artwork Beef", L(MALFORMED), xbmcgui.NOTIFICATION_WARNING)
return False
set_arttypes(root, arttype_map)
if save_backup():
save_xml(root)
xbmcgui.Dialog().ok("Artwork Beef", L(RESTART_KODI))
def set_arttypes(root, arttype_map):
for key, artlist in arttype_map.items():
if key not in mediatype_map:
if key not in unsupported_types:
log("Can't set arttypes for '{0}' in advancedsettings.xml".format(key), xbmc.LOGNOTICE)
continue
typemap = mediatype_map[key]
library_elem = root.find(typemap[1])
if library_elem is None:
library_elem = ET.SubElement(root, typemap[1])
mediatype_elem = library_elem.find(typemap[0])
if artlist:
if mediatype_elem is None:
mediatype_elem = ET.SubElement(library_elem, typemap[0])
else:
mediatype_elem.clear()
for arttype in artlist:
if arttype in typemap[2]:
continue
arttype_elem = ET.SubElement(mediatype_elem, ARTTYPE_TAG)
arttype_elem.text = arttype
elif mediatype_elem is not None:
library_elem.remove(mediatype_elem)
def read_xml():
if not xbmcvfs.exists(FILENAME):
return ET.Element(ROOT_TAG)
parser = ET.XMLParser(target=CommentedTreeBuilder())
with closing(xbmcvfs.File(FILENAME)) as as_xml:
try:
return ET.parse(as_xml, parser).getroot()
except ET.ParseError:
log("Can't parse advancedsettings.xml", xbmc.LOGWARNING)
def save_xml(advancedsettings):
indent(advancedsettings)
with closing(xbmcvfs.File(FILENAME, 'w')) as as_xml:
ET.ElementTree(advancedsettings).write(as_xml, 'utf-8', True)
def save_backup():
if xbmcvfs.exists(FILENAME):
xbmcvfs.copy(FILENAME, FILENAME_BAK)
result = xbmcvfs.exists(FILENAME_BAK)
if result:
xbmcgui.Dialog().notification("Artwork Beef", L(BACKUP_SUCCESSFUL))
else:
xbmcgui.Dialog().notification("Artwork Beef", L(BACKUP_UNSUCCESSFUL), xbmc.LOGWARNING)
return result
log("advancedsettings.xml doesn't exist, can't save backup", xbmc.LOGNOTICE)
return True
def restore_backup():
if xbmcvfs.exists(FILENAME_BAK):
xbmcvfs.copy(FILENAME_BAK, FILENAME)
xbmcvfs.delete(FILENAME_BAK)
result = xbmcvfs.exists(FILENAME)
if result:
xbmcgui.Dialog().notification("Artwork Beef", L(RESTORE_SUCCESSFUL))
else:
xbmcgui.Dialog().notification("Artwork Beef", L(RESTORE_UNSUCCESSFUL), xbmc.LOGWARNING)
return result
log("advancedsettings.xml.beef.bak doesn't exist, can't restore backup", xbmc.LOGWARNING)
return False
def has_backup():
return xbmcvfs.exists(FILENAME_BAK)
def indent(element, level=0):
i = "\n" + level*"\t"
if len(element):
if not element.text or not element.text.strip():
element.text = i + "\t"
if not element.tail or not element.tail.strip():
element.tail = i
for element in element:
indent(element, level + 1)
if not element.tail or not element.tail.strip():
element.tail = i
else:
if level and (not element.tail or not element.tail.strip()):
element.tail = i
class CommentedTreeBuilder(ET.TreeBuilder):
def comment(self, data):
self.start(ET.Comment, {})
self.data(data)
self.end(ET.Comment)
|
mit
| 9,046,435,007,444,909,000 | 35.276119 | 103 | 0.637112 | false |
haf/puppet-dak
|
daklib/config.py
|
1
|
4474
|
#!/usr/bin/env python
"""
Config access class
@contact: Debian FTPMaster <ftpmaster@debian.org>
@copyright: 2008 Mark Hymers <mhy@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# <NCommander> mhy, how about "Now with 20% more monty python references"
################################################################################
import os
import apt_pkg
import socket
################################################################################
default_config = "/etc/dak/dak.conf" #: default dak config, defines host properties
# suppress some deprecation warnings in squeeze related to apt_pkg
# module
import warnings
warnings.filterwarnings('ignore', ".*apt_pkg.* is deprecated.*", DeprecationWarning)
################################################################################
def which_conf_file():
return os.getenv("DAK_CONFIG", default_config)
class Config(object):
"""
A Config object is a singleton containing
information about the DAK configuration
"""
__shared_state = {}
def __init__(self, *args, **kwargs):
self.__dict__ = self.__shared_state
if not getattr(self, 'initialised', False):
self.initialised = True
self._readconf()
self._setup_routines()
def _readconf(self):
apt_pkg.init()
self.Cnf = apt_pkg.Configuration()
apt_pkg.read_config_file_isc(self.Cnf, which_conf_file())
# Check whether our dak.conf was the real one or
# just a pointer to our main one
res = socket.gethostbyaddr(socket.gethostname())
conffile = self.Cnf.get("Config::" + res[0] + "::DakConfig")
if conffile:
apt_pkg.read_config_file_isc(self.Cnf, conffile)
# Rebind some functions
# TODO: Clean this up
self.get = self.Cnf.get
self.subtree = self.Cnf.subtree
self.value_list = self.Cnf.value_list
self.find = self.Cnf.find
self.find_b = self.Cnf.find_b
self.find_i = self.Cnf.find_i
def has_key(self, name):
return name in self.Cnf
def __contains__(self, name):
return name in self.Cnf
def __getitem__(self, name):
return self.Cnf[name]
def __setitem__(self, name, value):
self.Cnf[name] = value
@staticmethod
def get_db_value(name, default=None, rettype=None):
from daklib.dbconn import DBConfig, DBConn, NoResultFound
try:
res = DBConn().session().query(DBConfig).filter(DBConfig.name == name).one()
except NoResultFound:
return default
if rettype:
return rettype(res.value)
else:
return res.value
def _setup_routines(self):
"""
This routine is the canonical list of which fields need to exist in
the config table. If your dak instance is to work, we suggest reading it
Of course, what the values do is another matter
"""
for field in [('db_revision', None, int),
('defaultsuitename', 'unstable', str),
('exportpath', '', str)
]:
setattr(self, 'get_%s' % field[0], lambda s=None, x=field[0], y=field[1], z=field[2]: self.get_db_value(x, y, z))
setattr(Config, '%s' % field[0], property(fget=getattr(self, 'get_%s' % field[0])))
def get_defaultsuite(self):
from daklib.dbconn import get_suite
suitename = self.defaultsuitename
if not suitename:
return None
else:
return get_suite(suitename)
defaultsuite = property(get_defaultsuite)
|
gpl-2.0
| 216,816,975,558,764,000 | 31.897059 | 125 | 0.583371 | false |
dr-jpk/saltefficiency
|
weekly/weekly_summary_plots.py
|
1
|
8536
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 10:06:20 2015
@author: jpk
ToDo: automate the subsystems check. A query that checks all the subsystems in
case things change in the future should prevent issues with the pis chart
colours
"""
import sys
import os
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import report_queries as rq
import numpy as np
import matplotlib.dates as mdates
def priority_breakdown_pie_chart(x, ds, dirname='./logs/'):
'''
make a pie chart from the dataframe
'''
temp = list(x['Priority'])
no_blocks = map(int, list(x['No. Blocks']))
labels = ['P'+str(temp[i])+' - ' + str(no_blocks[i]) for i in range(0,len(temp))]
values = list(x['Tsec'])
# set colours for the priorities
colours = ['b','c','g','m','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.7), fontsize=8)
title_txt = 'Weekly Priority Breakdown - ' + str(int(x['No. Blocks'].sum())) + ' Blocks Total' + '\n {}'.format(ds)
ax.set_title(title_txt, fontsize=12)
filename = dirname+'priority_breakdown_pie_chart_' +'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(filename, dpi=100)
# pl.show()
def weekly_total_time_breakdown_pie_chart(x, ds, dirname='./logs/'):
labels = ['Science - {}'.format(x['ScienceTime'][0]),
'Engineering - {}'.format(x['EngineeringTime'][0]),
'Weather - {}'.format(x['TimeLostToWeather'][0]),
'Problems - {}'.format(x['TimeLostToProblems'][0])]
values = [int(x['Science']),
int(x['Engineering']),
int(x['Weather']),
int(x['Problems'])]
colours = ['b','c','g','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.8), fontsize=8)
title_txt = 'Weekly Time Breakdown - {} Total\n{}'.format(x['NightLength'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_total_time_breakdown_pie_chart_' + '-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_subsystem_breakdown_pie_chart(x, y, col_dict, ds, dirname='./logs/'):
subsystem = list(x['SaltSubsystem'])
time = list(x['TotalTime'])
labels = [subsystem[i] + ' - ' + time[i] for i in range(0,len(subsystem))]
values = list(x['Time'])
colours = [col_dict[i] for i in subsystem]
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'k'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.65), fontsize=8)
title_txt = 'Weekly Problems Breakdown - {}\n{}'.format(y['TotalTime'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_subsystem_breakdown_pie_chart_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_time_breakdown(x, ds, dirname='./logs/'):
'''
produce a bar stacked bar chart plot of the time breakdown per day for the
past week.
'''
fig = pl.figure(figsize=(10,4),facecolor='w')
ax = fig.add_subplot(111)
width = 0.55
ax.grid(which='major', axis='y')
# science time per day
s = ax.bar(x['Date'],
x['Science'],
width,
color = 'b',
edgecolor='w')
# engineering time per day
e = ax.bar(x['Date'],
x['Engineering'],
width,
bottom = x['Science'],
color = 'c',
edgecolor='w')
# weather time per day
w = ax.bar(x['Date'],
x['Weather'],
width,
bottom = x['Science'] + x['Engineering'],
color = 'g',
edgecolor='w')
# problem time per day
p = ax.bar(x['Date'],
x['Problems'],
width,
bottom = x['Science'] + x['Engineering'] + x['Weather'],
color = 'r',
edgecolor='w')
ax.set_ylabel('Hours', fontsize=11)
ax.set_xlabel('Date', fontsize=11)
fig.legend((s[0], e[0], w[0], p[0]),
('Science Time',
'Engineering Time',
'Time lost to Weather',
'Time lost to Problems'),
frameon=False,
fontsize=10,
loc=(0.0,0.70))
title_txt = 'Weekly Time Breakdown - {}'.format(ds)
ax.set_title(title_txt, fontsize=11)
ax.xaxis_date()
date_formatter = mdates.DateFormatter('%a \n %Y-%m-%d')
ax.xaxis.set_major_formatter(date_formatter)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.autofmt_xdate(rotation=0, ha = 'left')
fig.subplots_adjust(left=0.22, bottom=0.20, right=0.96, top=None,
wspace=None, hspace=None)
pl.autoscale()
filename = 'weekly_time_breakdown_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
if __name__=='__main__':
# set the colours for all the subsystems:
subsystems_list = ['BMS', 'DOME', 'TC', 'PMAS', 'SCAM', 'TCS', 'STRUCT',
'TPC', 'HRS', 'PFIS','Proposal', 'Operations',
'ELS', 'ESKOM']
cmap = pl.cm.jet
colour_map = cmap(np.linspace(0.0, 1.0, len(subsystems_list)))
col_dict = {}
for i in range(0, len(subsystems_list)):
col_dict[subsystems_list[i]] = colour_map[i]
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306,user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
wsbt_d = rq.weekly_subsystem_breakdown_total(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
date_string = '{} - {}'.format(dr_d['StartDate'][0], dr_d['EndDate'][0])
# testing the pie_chart method
priority_breakdown_pie_chart(wpb_d, date_string)
weekly_total_time_breakdown_pie_chart(wttb_d, date_string)
weekly_subsystem_breakdown_pie_chart(wsb_d, wsbt_d, col_dict, date_string)
weekly_time_breakdown(wtb_d, date_string)
mysql_con.close()
|
bsd-3-clause
| -7,651,931,648,000,412,000 | 34.566667 | 137 | 0.526828 | false |
googlefonts/ufo2ft
|
Lib/ufo2ft/filters/base.py
|
1
|
7630
|
import logging
from types import SimpleNamespace
from fontTools.misc.loggingTools import Timer
from ufo2ft.util import _GlyphSet, _LazyFontName
logger = logging.getLogger(__name__)
class BaseFilter:
# tuple of strings listing the names of required positional arguments
# which will be set as attributes of the filter instance
_args = ()
# dictionary containing the names of optional keyword arguments and
# their default values, which will be set as instance attributes
_kwargs = {}
# pre-filter when True, post-filter when False, meaning before or after default
# filters
_pre = False
def __init__(self, *args, **kwargs):
self.options = options = SimpleNamespace()
num_required = len(self._args)
num_args = len(args)
# process positional arguments as keyword arguments
if num_args < num_required:
args = (
*args,
*(kwargs.pop(a) for a in self._args[num_args:] if a in kwargs),
)
num_args = len(args)
duplicated_args = [k for k in self._args if k in kwargs]
if duplicated_args:
num_duplicated = len(duplicated_args)
raise TypeError(
"got {} duplicated positional argument{}: {}".format(
num_duplicated,
"s" if num_duplicated > 1 else "",
", ".join(duplicated_args),
)
)
# process positional arguments
if num_args < num_required:
missing = [repr(a) for a in self._args[num_args:]]
num_missing = len(missing)
raise TypeError(
"missing {} required positional argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", ", ".join(missing)
)
)
elif num_args > num_required:
extra = [repr(a) for a in args[num_required:]]
num_extra = len(extra)
raise TypeError(
"got {} unsupported positional argument{}: {}".format(
num_extra, "s" if num_extra > 1 else "", ", ".join(extra)
)
)
for key, value in zip(self._args, args):
setattr(options, key, value)
# process optional keyword arguments
for key, default in self._kwargs.items():
setattr(options, key, kwargs.pop(key, default))
# process special pre argument
self.pre = kwargs.pop("pre", self._pre)
# process special include/exclude arguments
include = kwargs.pop("include", None)
exclude = kwargs.pop("exclude", None)
if include is not None and exclude is not None:
raise ValueError("'include' and 'exclude' arguments are mutually exclusive")
if callable(include):
# 'include' can be a function (e.g. lambda) that takes a
# glyph object and returns True/False based on some test
self.include = include
self._include_repr = lambda: repr(include)
elif include is not None:
# or it can be a list of glyph names to be included
included = set(include)
self.include = lambda g: g.name in included
self._include_repr = lambda: repr(include)
elif exclude is not None:
# alternatively one can provide a list of names to not include
excluded = set(exclude)
self.include = lambda g: g.name not in excluded
self._exclude_repr = lambda: repr(exclude)
else:
# by default, all glyphs are included
self.include = lambda g: True
# raise if any unsupported keyword arguments
if kwargs:
num_left = len(kwargs)
raise TypeError(
"got {}unsupported keyword argument{}: {}".format(
"an " if num_left == 1 else "",
"s" if len(kwargs) > 1 else "",
", ".join(f"'{k}'" for k in kwargs),
)
)
# run the filter's custom initialization code
self.start()
def __repr__(self):
items = []
if self._args:
items.append(
", ".join(repr(getattr(self.options, arg)) for arg in self._args)
)
if self._kwargs:
items.append(
", ".join(
"{}={!r}".format(k, getattr(self.options, k))
for k in sorted(self._kwargs)
)
)
if hasattr(self, "_include_repr"):
items.append(f"include={self._include_repr()}")
elif hasattr(self, "_exclude_repr"):
items.append(f"exclude={self._exclude_repr()}")
return "{}({})".format(type(self).__name__, ", ".join(items))
def start(self):
"""Subclasses can perform here custom initialization code."""
pass
def set_context(self, font, glyphSet):
"""Populate a `self.context` namespace, which is reset before each
new filter call.
Subclasses can override this to provide contextual information
which depends on other data in the font that is not available in
the glyphs objects currently being filtered, or set any other
temporary attributes.
The default implementation simply sets the current font and glyphSet,
and initializes an empty set that keeps track of the names of the
glyphs that were modified.
Returns the namespace instance.
"""
self.context = SimpleNamespace(font=font, glyphSet=glyphSet)
self.context.modified = set()
return self.context
def filter(self, glyph):
"""This is where the filter is applied to a single glyph.
Subclasses must override this method, and return True
when the glyph was modified.
"""
raise NotImplementedError
@property
def name(self):
return self.__class__.__name__
def __call__(self, font, glyphSet=None):
"""Run this filter on all the included glyphs.
Return the set of glyph names that were modified, if any.
If `glyphSet` (dict) argument is provided, run the filter on
the glyphs contained therein (which may be copies).
Otherwise, run the filter in-place on the font's default
glyph set.
"""
fontName = _LazyFontName(font)
if glyphSet is not None and getattr(glyphSet, "name", None):
logger.info("Running %s on %s-%s", self.name, fontName, glyphSet.name)
else:
logger.info("Running %s on %s", self.name, fontName)
if glyphSet is None:
glyphSet = _GlyphSet.from_layer(font)
context = self.set_context(font, glyphSet)
filter_ = self.filter
include = self.include
modified = context.modified
with Timer() as t:
# we sort the glyph names to make loop deterministic
for glyphName in sorted(glyphSet.keys()):
if glyphName in modified:
continue
glyph = glyphSet[glyphName]
if include(glyph) and filter_(glyph):
modified.add(glyphName)
num = len(modified)
if num > 0:
logger.debug(
"Took %.3fs to run %s on %d glyph%s",
t,
self.name,
len(modified),
"" if num == 1 else "s",
)
return modified
|
mit
| 3,577,641,209,452,104,700 | 35.859903 | 88 | 0.553342 | false |
apanda/modeling
|
tests/examples/ContentCacheTest.py
|
1
|
1667
|
import components
def ContentCacheTest ():
"""Learning firewall test"""
ctx = components.Context (['a', 'b', 'c', 'd', 'cc'],\
['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_cc'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net, ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(ctx.d, net, ctx)
cc = components.ContentCache(ctx.cc, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(cc, ctx.ip_cc)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_cc]
net.RoutingTable(a, [(x, cc) for x in addresses])
net.RoutingTable(b, [(x, cc) for x in addresses])
net.RoutingTable(c, [(x, cc) for x in addresses])
net.RoutingTable(d, [(x, cc) for x in addresses])
net.RoutingTable(cc, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d)])
net.Attach(a, b, c, d, cc)
endhosts = [a, b, c, d]
net.Attach(a, b, c, d, cc)
endhosts = [a, b, c, d]
class ContentCacheTestReturn (object):
def __init__ (self, net, ctx, a, b, c, d, cc):
self.net = net
self.ctx = ctx
self.a = a
self.b = b
self.c = c
self.d = d
self.cc = cc
self.check = components.PropertyChecker (ctx, net)
return ContentCacheTestReturn(net, ctx, a, b, c, d, cc)
|
bsd-3-clause
| -5,992,749,475,282,901,000 | 39.658537 | 72 | 0.472705 | false |
HyperloopTeam/FullOpenMDAO
|
cantera-2.0.2/interfaces/python/Cantera/stoich.py
|
1
|
1370
|
from Cantera import exceptions
from Cantera.num import array
from Cantera.elements import elementMoles
def det3(A):
"""Determinant of a 3x3 matrix."""
return (A[0,0]*(A[1,1]*A[2,2] - A[1,2]*A[2,1])
- A[0,1]*(A[1,0]*A[2,2] - A[1,2]*A[2,0])
+ A[0,2]*(A[1,0]*A[2,1] - A[2,0]*A[1,1]))
def stoich_fuel_to_oxidizer(mix, fuel, oxidizer):
"""Fuel to oxidizer ratio for stoichiometric combustion.
This function only works for fuels composed of carbon, hydrogen,
and/or oxygen. The fuel to oxidizer ratio is returned that results in
"""
# fuel
mix.setMoleFractions(fuel)
f_carbon = elementMoles(mix, 'C')
f_oxygen = elementMoles(mix, 'O')
f_hydrogen = elementMoles(mix, 'H')
#oxidizer
mix.setMoleFractions(oxidizer)
o_carbon = elementMoles(mix, 'C')
o_oxygen = elementMoles(mix, 'O')
o_hydrogen = elementMoles(mix, 'H')
B = array([f_carbon, f_hydrogen, f_oxygen],'d')
A = array([[1.0, 0.0, -o_carbon],
[0.0, 2.0, -o_hydrogen],
[2.0, 1.0, -o_oxygen]], 'd')
num = array(A,'d')
num[:,2] = B
r = det3(num)/det3(A)
if r <= 0.0:
raise CanteraError('negative or zero computed stoichiometric fuel/oxidizer ratio!')
return 1.0/r
if __name__ == "__main__":
g = GRI30()
print stoich_fuel_to_oxidizer(g, 'CH4:1', 'O2:1')
|
gpl-2.0
| -2,443,989,361,483,333,000 | 29.444444 | 91 | 0.584672 | false |
goedzo/PokemonGo-Bot
|
pokemongo_bot/inventory.py
|
1
|
52151
|
from __future__ import print_function
import json
import logging
import os
from collections import OrderedDict
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.services.item_recycle_worker import ItemRecycler
'''
Helper class for updating/retrieving Inventory data
Interesting info and formulas:
https://drive.google.com/file/d/0B0TeYGBPiuzaenhUNE5UWnRCVlU/view
https://www.reddit.com/r/pokemongodev/comments/4w7mdg/combat_damage_calculation_formula_exactly/
'''
class FileIOException(Exception):
pass
#
# Abstraction
class _StaticInventoryComponent(object):
# optionally load static data from file,
# dropping the data in a static variable named STATIC_DATA
STATIC_DATA_FILE = None
STATIC_DATA = None
def __init__(self):
if self.STATIC_DATA_FILE is not None:
self.init_static_data()
@classmethod
def init_static_data(cls):
if not hasattr(cls, 'STATIC_DATA') or cls.STATIC_DATA is None:
cls.STATIC_DATA = cls.process_static_data(
json.load(open(cls.STATIC_DATA_FILE)))
@classmethod
def process_static_data(cls, data):
# optional hook for processing the static data
# default is to use the data directly
return data
class _BaseInventoryComponent(_StaticInventoryComponent):
TYPE = None # base key name for items of this type
ID_FIELD = None # identifier field for items of this type
def __init__(self):
self._data = {}
super(_BaseInventoryComponent, self).__init__()
def parse(self, item):
# optional hook for parsing the dict for this item
# default is to use the dict directly
return item
def retrieve_data(self, inventory):
assert self.TYPE is not None
assert self.ID_FIELD is not None
ret = {}
for item in inventory:
data = item['inventory_item_data']
if self.TYPE in data:
item = data[self.TYPE]
key = item[self.ID_FIELD]
ret[key] = self.parse(item)
return ret
def refresh(self, inventory):
self._data = self.retrieve_data(inventory)
def get(self, object_id):
return self._data.get(object_id)
def all(self):
return list(self._data.values())
#
# Inventory Components
class Player(_BaseInventoryComponent):
TYPE = 'player_stats'
def __init__(self, bot):
self.bot = bot
self._exp = None
self._level = None
self.next_level_xp = None
self.pokemons_captured = None
self.poke_stop_visits = None
self.player_stats = None
super(_BaseInventoryComponent, self).__init__()
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def exp(self):
return self._exp
@exp.setter
def exp(self, value):
# if new exp is larger than or equal to next_level_xp
if value >= self.next_level_xp:
self.level = self._level + 1
# increase next_level_xp to a big amount
# will be fix on the next heartbeat
self.next_level_xp += 10000000
self._exp = value
def refresh(self,inventory):
self.player_stats = self.retrieve_data(inventory)
def parse(self, item):
if not item:
item = {}
self.next_level_xp = item.get('next_level_xp', 0)
self.exp = item.get('experience', 0)
self.level = item.get('level', 0)
self.pokemons_captured = item.get('pokemons_captured', 0)
self.poke_stop_visits = item.get('poke_stop_visits', 0)
def retrieve_data(self, inventory):
ret = {}
for item in inventory:
data = item['inventory_item_data']
if self.TYPE in data:
item = data[self.TYPE]
ret = item
self.parse(item)
return ret
class Candies(_BaseInventoryComponent):
TYPE = 'candy'
ID_FIELD = 'family_id'
@classmethod
def family_id_for(cls, pokemon_id):
return Pokemons.candyid_for(pokemon_id)
def get(self, pokemon_id):
family_id = self.family_id_for(pokemon_id)
return self._data.setdefault(family_id, Candy(family_id, 0))
def parse(self, item):
candy = item['candy'] if 'candy' in item else 0
return Candy(item['family_id'], candy)
class Pokedex(_BaseInventoryComponent):
TYPE = 'pokedex_entry'
ID_FIELD = 'pokemon_id'
def seen(self, pokemon_id):
return pokemon_id in self._data
def captured(self, pokemon_id):
return self.seen(pokemon_id) and self._data.get(pokemon_id, {}).get('times_captured', 0) > 0
def shiny_seen(self, pokemon_id):
return self._data.get(pokemon_id, {}).get('encountered_shiny', False)
def shiny_captured(self, pokemon_id):
return self._data.get(pokemon_id, {}).get('captured_shiny', False)
class Item(object):
"""
Representation of an item.
"""
def __init__(self, item_id, item_count):
"""
Representation of an item
:param item_id: ID of the item
:type item_id: int
:param item_count: Quantity of the item
:type item_count: int
:return: An item
:rtype: Item
"""
self.id = item_id
self.name = Items.name_for(self.id)
self.count = item_count
def remove(self, amount):
"""
Remove a specified amount of an item from the cached inventory.
Note that it does **NOT** removes it in the server, it only removes it from the local cached inventory.
:param amount: Amount to remove
:type amount: int
:return: Nothing
:rtype: None
"""
if self.count < amount:
raise Exception('Tried to remove more {} than you have'.format(self.name))
self.count -= amount
def recycle(self, amount_to_recycle):
"""
Recycle (discard) the specified amount of item from the item inventory.
It is making a call to the server to request a recycling as well as updating the cached inventory.
:param amount_to_recycle: The amount to recycle.
:type amount_to_recycle: int
:return: Returns whether or not the task went well
:rtype: worker_result.WorkerResult
"""
if self.count < amount_to_recycle:
raise Exception('Tried to remove more {} than you have'.format(self.name))
item_recycler = ItemRecycler(_inventory.bot, self, amount_to_recycle)
item_recycler_work_result = item_recycler.work()
if item_recycler.is_recycling_success():
self.remove(amount_to_recycle)
return item_recycler_work_result
def add(self, amount):
"""
Add a specified amount of the item to the local cached inventory
:param amount: Amount to add
:type amount: int
:return: Nothing.
:rtype: None
"""
if amount < 0:
raise Exception('Must add positive amount of {}'.format(self.name))
self.count += amount
def __str__(self):
return self.name + " : " + str(self.count)
class Items(_BaseInventoryComponent):
TYPE = 'item'
ID_FIELD = 'item_id'
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'items.json')
def parse(self, item_data):
"""
Make an instance of an Item from raw item data.
:param item_data: Item data to make an item from
:return: Instance of the Item.
:rtype: Item
"""
item_id = item_data.get(Items.ID_FIELD, None)
item_count = item_data['count'] if 'count' in item_data else 0
return Item(item_id, item_count)
def all(self):
"""
Get EVERY Item from the cached inventory.
:return: List of evey item in the cached inventory
:rtype: list of Item
"""
return list(self._data.values())
def get(self, item_id):
"""
Get ONE Item from the cached inventory.
:param item_id: Item's ID to search for.
:return: Instance of the item from the cached inventory
:rtype: Item
"""
return self._data.setdefault(item_id, Item(item_id, 0))
@classmethod
def name_for(cls, item_id):
"""
Search the name for an item from its ID.
:param item_id: Item's ID to search for.
:return: Item's name.
:rtype: str
"""
return cls.STATIC_DATA[str(item_id)]
@classmethod
def get_space_used(cls):
"""
Counts the space used in item inventory.
:return: The space used in item inventory.
:rtype: int
"""
space_used = 1
for item_in_inventory in _inventory.items.all():
space_used += item_in_inventory.count
return space_used
@classmethod
def get_space_left(cls):
"""
Compute the space left in item inventory.
:return: The space left in item inventory. 0 if the player has more item than his item inventory can carry.
:rtype: int
"""
_inventory.retrieve_inventories_size()
space_left = _inventory.item_inventory_size - cls.get_space_used()
# Space left should never be negative. Returning 0 if the computed value is negative.
return space_left if space_left >= 0 else 0
@classmethod
def has_space_for_loot(cls):
"""
Returns a value indicating whether or not the item inventory has enough space to loot a fort
:return: True if the item inventory has enough space; otherwise, False.
:rtype: bool
"""
max_number_of_items_looted_at_stop = 5
return cls.get_space_left() >= max_number_of_items_looted_at_stop
class AppliedItem(object):
"""
Representation of an applied item, like incense.
"""
def __init__(self, item_id, expire_ms, applied_ms):
"""
Representation of an applied item
:param item_id: ID of the item
:type item_id: int
:param expire_ms: expire in ms
:type expire_ms: in
:param applied_ms: applied at
:type applied_ms: int
:return: An applied item
:rtype: AppliedItemItem
"""
self.id = item_id
self.name = Items.name_for(self.id)
self.applied_ms = applied_ms
self.expire_ms = expire_ms
def refresh(self,inventory):
self.retrieve_data(inventory)
def parse(self, item):
if not item:
item = {}
self.id = item.get('id', 0)
self.name = Items.name_for(self.id)
self.expire_ms = item.get('expire_ms', 0)
self.applied_ms = item.get('applied_ms', 0)
def retrieve_data(self, inventory):
ret = {}
for item in inventory:
data = item['inventory_item_data']
if self.TYPE in data:
item = data[self.TYPE]
ret = item
self.parse(item)
return ret
def __str__(self):
return self.name
class AppliedItems(_BaseInventoryComponent):
TYPE='applied_items'
ID_FIELD = 'item_id'
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'items.json')
def all(self):
"""
Get EVERY Item from the cached inventory.
:return: List of evey item in the cached inventory
:rtype: list of Item
"""
return list(self._data.values())
def get(self, item_id):
"""
Get ONE Item from the cached inventory.
:param item_id: Item's ID to search for.
:return: Instance of the item from the cached inventory
:rtype: Item
"""
return self._data.setdefault(item_id, Item(item_id, 0))
@classmethod
def name_for(cls, item_id):
"""
Search the name for an item from its ID.
:param item_id: Item's ID to search for.
:return: Item's name.
:rtype: str
"""
return cls.STATIC_DATA[str(item_id)]
class Pokemons(_BaseInventoryComponent):
TYPE = 'pokemon_data'
ID_FIELD = 'id'
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'pokemon.json')
@classmethod
def process_static_data(cls, data):
data = [PokemonInfo(d) for d in data]
# process evolution info
for p in data:
next_all = p.next_evolutions_all
if len(next_all) <= 0:
continue
# only next level evolutions, not all possible
p.next_evolution_ids = [idx for idx in next_all
if data[idx-1].prev_evolution_id == p.id]
# only final evolutions
p.last_evolution_ids = [idx for idx in next_all
if not data[idx-1].has_next_evolution]
assert len(p.last_evolution_ids) > 0
return data
@classmethod
def get_space_used(cls):
"""
Counts the space used in pokemon inventory.
:return: The space used in pokemon inventory.
:rtype: int
"""
return len(_inventory.pokemons.all_with_eggs())
@classmethod
def get_space_left(cls):
"""
Compute the space left in pokemon inventory.
:return: The space left in pokemon inventory.
:rtype: int
"""
_inventory.retrieve_inventories_size()
space_left = _inventory.pokemon_inventory_size - cls.get_space_used()
return space_left
@classmethod
def data_for(cls, pokemon_id):
# type: (int) -> PokemonInfo
return cls.STATIC_DATA[pokemon_id - 1]
@classmethod
def name_for(cls, pokemon_id):
return cls.data_for(pokemon_id).name
@classmethod
def candyid_for(cls, pokemon_id):
return cls.data_for(pokemon_id).candyid
@classmethod
def id_for(cls, pokemon_name):
# TODO: Use a better searching algorithm. This one is O(n)
for data in cls.STATIC_DATA:
if data.name.lower() == pokemon_name.lower():
return data.id
raise Exception('Could not find pokemon named {}'.format(pokemon_name))
@classmethod
def first_evolution_id_for(cls, pokemon_id):
return cls.data_for(pokemon_id).first_evolution_id
@classmethod
def prev_evolution_id_for(cls, pokemon_id):
return cls.data_for(pokemon_id).prev_evolution_id
@classmethod
def next_evolution_ids_for(cls, pokemon_id):
return cls.data_for(pokemon_id).next_evolution_ids
@classmethod
def last_evolution_ids_for(cls, pokemon_id):
return cls.data_for(pokemon_id).last_evolution_ids
@classmethod
def has_next_evolution(cls, pokemon_id):
return cls.data_for(pokemon_id).has_next_evolution
@classmethod
def evolution_cost_for(cls, pokemon_id):
return cls.data_for(pokemon_id).evolution_cost
@classmethod
def evolution_item_for(cls, pokemon_id):
return cls.data_for(pokemon_id).evolution_item
@classmethod
def evolution_items_needed_for(cls, pokemon_id):
return cls.data_for(pokemon_id).evolution_item_needed
def parse(self, item):
if 'is_egg' in item:
return Egg(item)
return Pokemon(item)
def all(self):
# by default don't include eggs in all pokemon (usually just
# makes caller's lives more difficult)
return [p for p in super(Pokemons, self).all() if not isinstance(p, Egg)]
def all_with_eggs(self):
# count pokemon AND eggs, since eggs are counted as bag space
return super(Pokemons, self).all()
def add(self, pokemon):
if pokemon.unique_id <= 0:
raise ValueError("Can't add a pokemon without id")
if pokemon.unique_id in self._data:
raise ValueError("Pokemon already present in the inventory")
self._data[pokemon.unique_id] = pokemon
def get_from_unique_id(self, pokemon_unique_id):
if pokemon_unique_id not in self._data:
raise ValueError("Pokemon not present in the inventory")
return self._data[pokemon_unique_id]
def remove(self, pokemon_unique_id):
if pokemon_unique_id not in self._data:
raise ValueError("Pokemon not present in the inventory")
self._data.pop(pokemon_unique_id)
#
# Static Components
class Types(_StaticInventoryComponent):
"""
Types of attacks and pokemons
See more information:
https://i.redd.it/oy7lrixl8r9x.png
https://www.reddit.com/r/TheSilphRoad/comments/4t8seh/pokemon_go_type_advantage_chart/
https://github.com/jehy/Pokemon-Go-Weakness-calculator/blob/master/app/src/main/java/ru/jehy/pokemonweaknesscalculator/MainActivity.java#L31
"""
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'types.json')
@classmethod
def process_static_data(cls, data):
# create instances
ret = OrderedDict()
for t in sorted(data, key=lambda x: x["name"]):
name = str(t["name"])
ret[name] = Type(name, t["effectiveAgainst"], t["weakAgainst"])
# additional manipulations
size = len(ret)
by_effectiveness = {}
by_resistance = {}
for t in ret.itervalues(): # type: Type
t.attack_effective_against = [ret[name] for name in t.attack_effective_against]
t.attack_weak_against = [ret[name] for name in t.attack_weak_against]
# group types effective against, weak against specific types
for l, d in (t.attack_effective_against, by_effectiveness), \
(t.attack_weak_against, by_resistance):
for tt in l:
if tt not in d:
d[tt] = set()
d[tt].add(t)
# calc average factor for damage of this type relative to all types
t.rate = (size
+ ((EFFECTIVENESS_FACTOR-1) * len(t.attack_effective_against))
- ((1-RESISTANCE_FACTOR) * len(t.attack_weak_against))) / size
# set pokemon type resistance/weakness info
for t in ret.itervalues(): # type: Type
t.pokemon_resistant_to = by_resistance[t]
t.pokemon_vulnerable_to = by_effectiveness[t]
return ret
@classmethod
def get(cls, type_name):
# type: (Union[string, Type]) -> Type
type_name = str(type_name)
if type_name not in cls.STATIC_DATA:
raise ValueError("Unknown type: {}".format(type_name))
return cls.STATIC_DATA[type_name]
@classmethod
def all(cls):
return cls.STATIC_DATA.values()
@classmethod
def rating(cls):
return sorted(cls.all(), key=lambda x: x.rate, reverse=True)
class LevelToCPm(_StaticInventoryComponent):
"""
Data for the CP multipliers at different levels
See http://pokemongo.gamepress.gg/cp-multiplier
See https://github.com/justinleewells/pogo-optimizer/blob/edd692d/data/game/level-to-cpm.json
"""
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'level_to_cpm.json')
MAX_LEVEL = 40
MAX_CPM = .0
@classmethod
def init_static_data(cls):
super(LevelToCPm, cls).init_static_data()
cls.MAX_CPM = cls.cp_multiplier_for(cls.MAX_LEVEL)
assert cls.MAX_CPM > .0
@classmethod
def cp_multiplier_for(cls, level):
return cls.STATIC_DATA[int(2 * (level - 1))]
@classmethod
def level_from_cpm(cls, cp_multiplier):
return min(range(len(cls.STATIC_DATA)), key=lambda i: abs(cls.STATIC_DATA[i] - cp_multiplier)) * 0.5 + 1
class _Attacks(_StaticInventoryComponent):
BY_NAME = {} # type: Dict[string, Attack]
BY_TYPE = {} # type: Dict[List[Attack]]
BY_DPS = [] # type: List[Attack]
@classmethod
def process_static_data(cls, moves):
ret = {}
by_type = {}
by_name = {}
fast = cls is FastAttacks
for attack in moves:
attack = Attack(attack) if fast else ChargedAttack(attack)
ret[attack.id] = attack
by_name[attack.name] = attack
attack_type = str(attack.type)
if attack_type not in by_type:
by_type[attack_type] = []
by_type[attack_type].append(attack)
for t in by_type.iterkeys():
attacks = sorted(by_type[t], key=lambda m: m.dps, reverse=True)
min_dps = attacks[-1].dps
max_dps = attacks[0].dps - min_dps
if max_dps > .0:
for attack in attacks: # type: Attack
attack.rate_in_type = (attack.dps - min_dps) / max_dps
by_type[t] = attacks
cls.BY_NAME = by_name
cls.BY_TYPE = by_type
cls.BY_DPS = sorted(ret.values(), key=lambda m: m.dps, reverse=True)
return ret
@classmethod
def data_for(cls, attack_id):
# type: (int) -> Attack
if attack_id not in cls.STATIC_DATA:
raise ValueError("Attack {} not found in {}".format(
attack_id, cls.__name__))
return cls.STATIC_DATA[attack_id]
@classmethod
def by_name(cls, name):
# type: (string) -> Attack
return cls.BY_NAME[name]
@classmethod
def list_for_type(cls, type_name):
# type: (Union[string, Type]) -> List[Attack]
"""
:return: Attacks sorted by DPS in descending order
"""
return cls.BY_TYPE[str(type_name)]
@classmethod
def all(cls):
return cls.STATIC_DATA.values()
@classmethod
def all_by_dps(cls):
return cls.BY_DPS
class FastAttacks(_Attacks):
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'fast_moves.json')
class ChargedAttacks(_Attacks):
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'charged_moves.json')
#
# Instances
class Type(object):
def __init__(self, name, effective_against, weak_against):
# type: (string, Iterable[Type], Iterable[Type]) -> None
self.name = name
# effective way to represent type with one character
# for example it's very useful for nicknaming pokemon
# using its attack types
#
# if first char is unique - use it, in other case
# use suitable substitute
type_to_one_char_map = {
'Bug': 'B',
'Dark': 'K',
'Dragon': 'D',
'Electric': 'E',
'Fairy': 'Y',
'Fighting': 'T',
'Fire': 'F',
'Flying': 'L',
'Ghost': 'H',
'Grass': 'A',
'Ground': 'G',
'Ice': 'I',
'Normal': 'N',
'Poison': 'P',
'Psychic': 'C',
'Rock': 'R',
'Steel': 'S',
'Water': 'W',
}
self.as_one_char = type_to_one_char_map[name]
# attack of this type is effective against ...
self.attack_effective_against = set(effective_against)
# attack of this type is weak against ...
self.attack_weak_against = set(weak_against)
# pokemon of this type is resistant to ...
self.pokemon_resistant_to = set() # type: Set[Type]
# pokemon of this type is vulnerable to ...
self.pokemon_vulnerable_to = set() # type: Set[Type]
# average factor for damage of this type relative to all types
self.rate = 1.
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Candy(object):
def __init__(self, family_id, quantity):
self.type = Pokemons.name_for(family_id)
self.quantity = quantity
def consume(self, amount):
if self.quantity < amount:
raise Exception('Tried to consume more {} candy than you have'.format(self.type))
self.quantity -= amount
def add(self, amount):
if amount < 0:
raise Exception('Must add positive amount of candy')
self.quantity += amount
class Egg(object):
def __init__(self, data):
self._data = data
def has_next_evolution(self):
return False
class PokemonInfo(object):
"""
Static information about pokemon kind
"""
def __init__(self, data):
self._data = data
self.id = int(data["Number"])
self.name = data['Name'] # type: string
self.classification = data['Classification'] # type: string
# prepare types
self.type1 = Types.get(data['Type I'][0])
self.type2 = None
self.types = [self.type1] # required type
for t in data.get('Type II', []):
self.type2 = Types.get(t)
self.types.append(self.type2) # second type
break
# base chance to capture pokemon
self.capture_rate = data['CaptureRate']
# chance of the pokemon to flee away
self.flee_rate = data['FleeRate']
# km needed for buddy reward
self.buddy_distance_needed = data['BuddyDistanceNeeded']
# prepare attacks (moves)
self.fast_attacks = self._process_attacks()
self.charged_attack = self._process_attacks(charged=True)
# prepare movesets
self.movesets = self._process_movesets()
# Basic Values of the pokemon (identical for all pokemons of one kind)
self.base_attack = data['BaseAttack']
self.base_defense = data['BaseDefense']
self.base_stamina = data['BaseStamina']
# calculate maximum CP for the pokemon (best IVs, lvl 40)
self.max_cp = _calc_cp(self.base_attack, self.base_defense,
self.base_stamina)
#
# evolutions info for this pokemon
# id of the very first level evolution
self.first_evolution_id = self.id
# id of the previous evolution (one level only)
self.prev_evolution_id = None
# ids of all available previous evolutions in the family
self.prev_evolutions_all = []
if 'Previous evolution(s)' in data:
ids = [int(e['Number']) for e in data['Previous evolution(s)']]
self.first_evolution_id = ids[0]
self.prev_evolution_id = ids[-1]
self.prev_evolutions_all = ids
# Number of candies for the next evolution (if possible)
self.evolution_cost = 0
# Next evolution doesn't need a special item
self.evolution_item = None
self.evolution_item_needed = 0
# next evolution flag
self.has_next_evolution = 'Next evolution(s)' in data \
or 'Next Evolution Requirements' in data
# ids of the last level evolutions
self.last_evolution_ids = [self.id]
# ids of the next possible evolutions (one level only)
self.next_evolution_ids = []
#candies
self.candyid = int(data['Candy']['FamilyID'])
self.candyName = (data['Candy']['Name'])
self.next_evolutions_all = []
if self.has_next_evolution:
ids = [int(e['Number']) for e in data['Next evolution(s)']]
self.next_evolutions_all = ids
self.evolution_cost = int(data['Next Evolution Requirements']['Amount'])
if 'EvoItem' in data['Next Evolution Requirements']:
self.evolution_item = int(data['Next Evolution Requirements']['EvoItem'])
self.evolution_item_needed = int(data['Next Evolution Requirements']['EvoItemNeeded'])
@property
def family_id(self):
return self.first_evolution_id
@property
def is_seen(self):
return pokedex().seen(self.id)
@property
def is_captured(self):
return pokedex().captured(self.id)
def _process_movesets(self):
# type: () -> List[Moveset]
"""
The optimal moveset is the combination of two moves, one quick move
and one charge move, that deals the most damage over time.
Because each quick move gains a certain amount of energy (different
for different moves) and each charge move requires a different amount
of energy to use, sometimes, a quick move with lower DPS will be
better since it charges the charge move faster. On the same note,
sometimes a charge move that has lower DPS will be more optimal since
it may require less energy or it may last for a longer period of time.
Attacker have STAB (Same-type attack bonus - x1.25) pokemon have the
same type as attack. So we add it to the "Combo DPS" of the moveset.
The defender attacks in intervals of 1 second for the first 2 attacks,
and then in intervals of 2 seconds for the remainder of the attacks.
This explains why we see two consecutive quick attacks at the beginning
of the match. As a result, we add +2 seconds to the DPS calculation
for defender DPS output.
So to determine an optimal defensive moveset, we follow the same method
as we did for optimal offensive movesets, but instead calculate the
highest "Combo DPS" with an added 2 seconds to the quick move cool down.
Note: critical hits have not yet been implemented in the game
See http://pokemongo.gamepress.gg/optimal-moveset-explanation
See http://pokemongo.gamepress.gg/defensive-tactics
"""
# Prepare movesets
movesets = []
for fm in self.fast_attacks:
for chm in self.charged_attack:
movesets.append(Moveset(fm, chm, self.types, self.id))
assert len(movesets) > 0
# Calculate attack perfection for each moveset
movesets = sorted(movesets, key=lambda m: m.dps_attack)
worst_dps = movesets[0].dps_attack
best_dps = movesets[-1].dps_attack
if best_dps > worst_dps:
for moveset in movesets:
current_dps = moveset.dps_attack
moveset.attack_perfection = \
(current_dps - worst_dps) / (best_dps - worst_dps)
# Calculate defense perfection for each moveset
movesets = sorted(movesets, key=lambda m: m.dps_defense)
worst_dps = movesets[0].dps_defense
best_dps = movesets[-1].dps_defense
if best_dps > worst_dps:
for moveset in movesets:
current_dps = moveset.dps_defense
moveset.defense_perfection = \
(current_dps - worst_dps) / (best_dps - worst_dps)
return sorted(movesets, key=lambda m: m.dps, reverse=True)
def _process_attacks(self, charged=False):
# type: (bool) -> List[Attack]
key = 'Fast Attack(s)' if not charged else 'Special Attack(s)'
moves_dict = (ChargedAttacks if charged else FastAttacks).BY_NAME
moves = []
for name in self._data[key]:
if name not in moves_dict:
raise KeyError('Unknown {} attack: "{}"'.format(
'charged' if charged else 'fast', name))
moves.append(moves_dict[name])
moves = sorted(moves, key=lambda m: m.dps, reverse=True)
assert len(moves) > 0
return moves
class Pokemon(object):
def __init__(self, data):
self._data = data
# Unique ID for this particular Pokemon
self.unique_id = data.get('id', 0)
# Let's try this
self.encounter_id = data.get('encounter_id')
# Id of the such pokemons in pokedex
self.pokemon_id = data['pokemon_id']
# Static information
self.static = Pokemons.data_for(self.pokemon_id)
# Shiny information
self.display_data = data.get('pokemon_display')
self.shiny = self.display_data.get('shiny', False)
# self.form = self.display_data.get('form', )
# Combat points value
self.cp = data['cp']
# Base CP multiplier, fixed at the catch time
self.cp_bm = data['cp_multiplier']
# Changeable part of the CP multiplier, increasing at power up
self.cp_am = data.get('additional_cp_multiplier', .0)
# Resulting CP multiplier
self.cp_m = self.cp_bm + self.cp_am
# Current pokemon level (half of level is a normal value)
self.level = LevelToCPm.level_from_cpm(self.cp_m)
if 'level' not in self._data:
self._data['level'] = self.level
# Maximum health points
self.hp_max = data['stamina_max']
# Current health points
self.hp = data.get('stamina', 0) #self.hp_max)
assert 0 <= self.hp <= self.hp_max
# Individial Values of the current specific pokemon (different for each)
self.iv_attack = data.get('individual_attack', 0)
self.iv_defense = data.get('individual_defense', 0)
self.iv_stamina = data.get('individual_stamina', 0)
# Basic Values of the pokemon (identical for all pokemons of one kind)
base_attack = self.static.base_attack
base_defense = self.static.base_defense
base_stamina = self.static.base_stamina
self.name = self.static.name
self.nickname_raw = data.get('nickname', '')
self.nickname = self.nickname_raw or self.name
self.in_fort = 'deployed_fort_id' in data
if 'deployed_fort_id' in data:
self.fort_id = data['deployed_fort_id']
self.is_favorite = data.get('favorite', 0) is 1
self.buddy_candy = data.get('buddy_candy_awarded', 0)
self.is_bad = data.get('is_bad', False)
self.buddy_distance_needed = self.static.buddy_distance_needed
self.fast_attack = FastAttacks.data_for(data['move_1'])
self.charged_attack = ChargedAttacks.data_for(data['move_2']) # type: ChargedAttack
# Individial values (IV) perfection percent
self.iv = self._compute_iv_perfection()
# IV CP perfection - kind of IV perfection percent but calculated
# using weight of each IV in its contribution to CP of the best
# evolution of current pokemon
# So it tends to be more accurate than simple IV perfection
self.ivcp = self._compute_cp_perfection()
# Exact value of current CP (not rounded)
self.cp_exact = _calc_cp(
base_attack, base_defense, base_stamina,
self.iv_attack, self.iv_defense, self.iv_stamina, self.cp_m)
#assert max(int(self.cp_exact), 10) == self.cp
# Percent of maximum possible CP
self.cp_percent = self.cp_exact / self.static.max_cp
# Get moveset instance with calculated DPS and perfection percents
self.moveset = self._get_moveset()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def update_nickname(self, new_nickname):
self.nickname_raw = new_nickname
self.nickname = self.nickname_raw or self.name
def can_evolve_now(self):
if self.evolution_item is None:
return self.has_next_evolution() and \
self.candy_quantity >= self.evolution_cost
else:
evo_items = items().get(self.evolution_item).count
return self.has_next_evolution() and \
self.candy_quantity >= self.evolution_cost and \
evo_items >= self.evolution_items_needed
def has_next_evolution(self):
return self.static.has_next_evolution
def has_seen_next_evolution(self):
for pokemon_id in self.next_evolution_ids:
if pokedex().captured(pokemon_id):
return True
return False
@property
def family_id(self):
return self.static.family_id
@property
def first_evolution_id(self):
return self.static.first_evolution_id
@property
def prev_evolution_id(self):
return self.static.prev_evolution_id
@property
def next_evolution_ids(self):
return self.static.next_evolution_ids
@property
def last_evolution_ids(self):
return self.static.last_evolution_ids
@property
def candy_quantity(self):
return candies().get(self.pokemon_id).quantity
@property
def evolution_cost(self):
return self.static.evolution_cost
@property
def evolution_item(self):
return self.static.evolution_item
@property
def evolution_items_needed(self):
return self.static.evolution_item_needed
@property
def iv_display(self):
return '{}/{}/{}'.format(self.iv_attack, self.iv_defense, self.iv_stamina)
def _compute_iv_perfection(self):
total_iv = self.iv_attack + self.iv_defense + self.iv_stamina
iv_perfection = round((total_iv / 45.0), 2)
return iv_perfection
def _compute_cp_perfection(self):
"""
CP perfect percent is more accurate than IV perfect
We know attack plays an important role in CP, and different
pokemons have different base value, that's means 15/14/15 is
better than 14/15/15 for lot of pokemons, and if one pokemon's
base def is more than base sta, 15/15/14 is better than 15/14/15.
See https://github.com/jabbink/PokemonGoBot/issues/469
So calculate CP perfection at final level for the best of the final
evolutions of the pokemon.
"""
variants = []
iv_attack = self.iv_attack
iv_defense = self.iv_defense
iv_stamina = self.iv_stamina
cp_m = LevelToCPm.MAX_CPM
last_evolution_ids = self.last_evolution_ids
for pokemon_id in last_evolution_ids:
poke_info = Pokemons.data_for(pokemon_id)
base_attack = poke_info.base_attack
base_defense = poke_info.base_defense
base_stamina = poke_info.base_stamina
# calculate CP variants at maximum level
worst_cp = _calc_cp(base_attack, base_defense, base_stamina,
0, 0, 0, cp_m)
perfect_cp = _calc_cp(base_attack, base_defense, base_stamina,
cp_multiplier=cp_m)
current_cp = _calc_cp(base_attack, base_defense, base_stamina,
iv_attack, iv_defense, iv_stamina, cp_m)
cp_perfection = (current_cp - worst_cp) / (perfect_cp - worst_cp)
variants.append(cp_perfection)
# get best value (probably for the best evolution)
cp_perfection = max(variants)
return cp_perfection
def _get_moveset(self):
move1 = self.fast_attack
move2 = self.charged_attack
movesets = self.static.movesets
current_moveset = None
for moveset in movesets: # type: Moveset
if moveset.fast_attack == move1 and moveset.charged_attack == move2:
current_moveset = moveset
break
if current_moveset is None:
error = "Unexpected moveset [{}, {}] for #{} {}," \
" please update info in pokemon.json and create issue/PR" \
.format(move1, move2, self.pokemon_id, self.name)
# raise ValueError(error)
logging.getLogger(type(self).__name__).error(error)
current_moveset = Moveset(
move1, move2, self.static.types, self.pokemon_id)
return current_moveset
class Attack(object):
def __init__(self, data):
# self._data = data # Not needed - all saved in fields
self.id = data['id']
self.name = data['name']
self.type = Types.get(data['type'])
self.damage = data['damage']
self.duration = data['duration'] / 1000.0 # duration in seconds
# Energy addition for fast attack
# Energy cost for charged attack
self.energy = data['energy']
# Damage Per Second
# recalc for better precision
self.dps = self.damage / self.duration
# Perfection of the attack in it's type (from 0 to 1)
self.rate_in_type = .0
@property
def damage_with_stab(self):
# damage with STAB (Same-type attack bonus)
return self.damage * STAB_FACTOR
@property
def dps_with_stab(self):
# DPS with STAB (Same-type attack bonus)
return self.dps * STAB_FACTOR
@property
def effective_against(self):
return self.type.attack_effective_against
@property
def weak_against(self):
return self.type.attack_weak_against
@property
def energy_per_second(self):
return self.energy / self.duration
@property
def dodge_window(self):
# TODO: Attack Dodge Window
return NotImplemented
@property
def is_charged(self):
return False
def __str__(self):
return self.name
def __repr__(self):
return self.name
class ChargedAttack(Attack):
def __init__(self, data):
super(ChargedAttack, self).__init__(data)
@property
def is_charged(self):
return True
class Moveset(object):
def __init__(self, fm, chm, pokemon_types=(), pokemon_id=-1):
# type: (Attack, ChargedAttack, List[Type], int) -> None
if len(pokemon_types) <= 0 < pokemon_id:
pokemon_types = Pokemons.data_for(pokemon_id).types
self.pokemon_id = pokemon_id
self.fast_attack = fm
self.charged_attack = chm
# See Pokemons._process_movesets()
# See http://pokemongo.gamepress.gg/optimal-moveset-explanation
# See http://pokemongo.gamepress.gg/defensive-tactics
fm_number = 100 # for simplicity we use 100
fm_energy = fm.energy * fm_number
fm_damage = fm.damage * fm_number
fm_secs = fm.duration * fm_number
# Defender attacks in intervals of 1 second for the
# first 2 attacks, and then in intervals of 2 seconds
# So add 1.95 seconds to the quick move cool down for defense
# 1.95 is something like an average here
# TODO: Do something better?
fm_defense_secs = (fm.duration + 1.95) * fm_number
chm_number = fm_energy / chm.energy
chm_damage = chm.damage * chm_number
chm_secs = chm.duration * chm_number
damage_sum = fm_damage + chm_damage
# raw Damage-Per-Second for the moveset
self.dps = damage_sum / (fm_secs + chm_secs)
# average DPS for defense
self.dps_defense = damage_sum / (fm_defense_secs + chm_secs)
# apply STAB (Same-type attack bonus)
if fm.type in pokemon_types:
fm_damage *= STAB_FACTOR
if chm.type in pokemon_types:
chm_damage *= STAB_FACTOR
# DPS for attack (counting STAB)
self.dps_attack = (fm_damage + chm_damage) / (fm_secs + chm_secs)
# Moveset perfection percent for attack and for defense
# Calculated for current pokemon kind only, not between all pokemons
# So 100% perfect moveset can be weak if pokemon is weak (e.g. Caterpie)
self.attack_perfection = .0
self.defense_perfection = .0
# TODO: True DPS for real combat (floor(Attack/200 * MovePower * STAB) + 1)
# See http://pokemongo.gamepress.gg/pokemon-attack-explanation
def __str__(self):
return '[{}, {}]'.format(self.fast_attack, self.charged_attack)
def __repr__(self):
return '[{}, {}]'.format(self.fast_attack, self.charged_attack)
class Inventory(object):
def __init__(self, bot):
self.bot = bot
self.pokedex = Pokedex()
self.candy = Candies()
self.items = Items()
self.applied_items = AppliedItems()
self.pokemons = Pokemons()
self.player = Player(self.bot) # include inventory inside Player?
self.egg_incubators = None
self.refresh()
self.item_inventory_size = None
self.pokemon_inventory_size = None
def refresh(self, inventory=None):
if inventory is None:
request = self.bot.api.create_request()
request.get_inventory()
inventory = request.call()
inventory = inventory['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
for i in (self.pokedex, self.candy, self.items, self.pokemons, self.player):
i.refresh(inventory)
# self.applied_items = [x["inventory_item_data"] for x in inventory if "applied_items" in x["inventory_item_data"]]
self.egg_incubators = [x["inventory_item_data"] for x in inventory if "egg_incubators" in x["inventory_item_data"]]
self.update_web_inventory()
def init_inventory_outfile(self):
web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username)
if not os.path.exists(web_inventory):
self.bot.logger.info('No inventory file %s found. Creating a new one' % web_inventory)
json_inventory = []
with open(web_inventory, "w") as outfile:
json.dump(json_inventory, outfile)
def update_web_inventory(self):
web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username)
if not os.path.exists(web_inventory):
self.init_inventory_outfile()
json_inventory = self.jsonify_inventory()
try:
with open(web_inventory, "w") as outfile:
json.dump(json_inventory, outfile)
except (IOError, ValueError) as e:
self.bot.logger.info('[x] Error while opening inventory file for write: %s' % e, 'red')
except:
raise FileIOException("Unexpected error writing to {}".web_inventory)
def jsonify_inventory(self):
json_inventory = []
json_inventory.append({"inventory_item_data": {"player_stats": self.player.player_stats}})
for pokedex in self.pokedex.all():
json_inventory.append({"inventory_item_data": {"pokedex_entry": pokedex}})
for family_id, candy in self.candy._data.items():
json_inventory.append({"inventory_item_data": {"candy": {"family_id": family_id, "candy": candy.quantity}}})
for item_id, item in self.items._data.items():
json_inventory.append({"inventory_item_data": {"item": {"item_id": item_id, "count": item.count}}})
for pokemon in self.pokemons.all_with_eggs():
json_inventory.append({"inventory_item_data": {"pokemon_data": pokemon._data}})
for inc in self.egg_incubators:
json_inventory.append({"inventory_item_data": inc})
# for item in self.applied_items:
# json_inventory.append({"inventory_applied_item_data": {"applied_item": {"item_id": item.item_id, "applied_ms": item.applied_ms, "expire_ms": item.expire_ms}}})
return json_inventory
def retrieve_inventories_size(self):
"""
Retrieves the item inventory size
:return: Nothing.
:rtype: None
"""
# TODO: Force to update it if the player upgrades its size
if self.item_inventory_size is None or self.pokemon_inventory_size is None:
request = self.bot.api.create_request()
request.get_player()
player_data = request.call()['responses']['GET_PLAYER']['player_data']
self.item_inventory_size = player_data['max_item_storage']
self.pokemon_inventory_size = player_data['max_pokemon_storage']
#
# Other
# STAB (Same-type attack bonus)
# Factor applied to attack of the same type as pokemon
STAB_FACTOR = 1.2
# Factor applied to attack when it's effective against defending pokemon type
EFFECTIVENESS_FACTOR = 1.4
# Factor applied to attack when it's weak against defending pokemon type
RESISTANCE_FACTOR = 0.714
IMMUNITY_FACTOR = 0.51
_inventory = None # type: Inventory
def _calc_cp(base_attack, base_defense, base_stamina,
iv_attack=15, iv_defense=15, iv_stamina=15,
cp_multiplier=.0):
"""
CP calculation
CP = (Attack * Defense^0.5 * Stamina^0.5 * CP_Multiplier^2) / 10
CP = (BaseAtk+AtkIV) * (BaseDef+DefIV)^0.5 * (BaseStam+StamIV)^0.5 * Lvl(CPScalar)^2 / 10
See https://www.reddit.com/r/TheSilphRoad/comments/4t7r4d/exact_pokemon_cp_formula/
See https://www.reddit.com/r/pokemongodev/comments/4t7xb4/exact_cp_formula_from_stats_and_cpm_and_an_update/
See http://pokemongo.gamepress.gg/pokemon-stats-advanced
See http://pokemongo.gamepress.gg/cp-multiplier
See http://gaming.stackexchange.com/questions/280491/formula-to-calculate-pokemon-go-cp-and-hp
:param base_attack: Pokemon BaseAttack
:param base_defense: Pokemon BaseDefense
:param base_stamina: Pokemon BaseStamina
:param iv_attack: Pokemon IndividualAttack (0..15)
:param iv_defense: Pokemon IndividualDefense (0..15)
:param iv_stamina: Pokemon IndividualStamina (0..15)
:param cp_multiplier: CP Multiplier (0.79030001 is max - value for level 40)
:return: CP as float
"""
assert base_attack > 0
assert base_defense > 0
assert base_stamina > 0
if cp_multiplier <= .0:
cp_multiplier = LevelToCPm.MAX_CPM
assert cp_multiplier > .0
return (base_attack + iv_attack) \
* ((base_defense + iv_defense)**0.5) \
* ((base_stamina + iv_stamina)**0.5) \
* (cp_multiplier ** 2) / 10
# Initialize static data in the right order
Types() # init Types
LevelToCPm() # init LevelToCPm
FastAttacks() # init FastAttacks
ChargedAttacks() # init ChargedAttacks
Pokemons() # init Pokemons
#
# Usage helpers
# TODO : Complete the doc
# Only type return have been filled for now. It helps the IDE to suggest methods of the class.
def init_inventory(bot):
"""
Initialises the cached inventory, retrieves data from the server.
:param bot: Instance of the bot.
:type bot: pokemongo_bot.PokemonGoBot
:return: Nothing.
:rtype: None
"""
global _inventory
_inventory = Inventory(bot)
def refresh_inventory(data=None):
"""
Refreshes the cached inventory, retrieves data from the server.
:return: Nothing.
:rtype: None
"""
try:
_inventory.refresh(data)
except AttributeError:
print('_inventory was not initialized')
def jsonify_inventory():
try:
return _inventory.jsonify_inventory()
except AttributeError:
print('_inventory was not initialized')
return []
def update_web_inventory():
_inventory.update_web_inventory()
def get_item_inventory_size():
"""
Access to the Item inventory size.
:return: Item inventory size.
:rtype: int
"""
_inventory.retrieve_inventories_size()
return _inventory.item_inventory_size
def get_pokemon_inventory_size():
"""
Access to the Item inventory size.
:return: Item inventory size.
:rtype: int
"""
_inventory.retrieve_inventories_size()
return _inventory.pokemon_inventory_size
def pokedex():
"""
:return:
:rtype: Pokedex
"""
# Are new pokemons added to the pokedex ?
return _inventory.pokedex
def player():
return _inventory.player
def candies():
"""
:return:
:rtype: Candies
"""
return _inventory.candy
def pokemons():
"""
:return:
:rtype: Pokemons
"""
return _inventory.pokemons
def items():
"""
Access to the cached item inventory.
:return: Instance of the cached item inventory.
:rtype: Items
"""
return _inventory.items
def applied_items():
"""
Access to the cached applied item inventory.
:return: Instance of the cached applied item inventory.
:rtype: Items
"""
return _inventory.applied_items
def types_data():
"""
:return:
:rtype: Types
"""
return Types
def levels_to_cpm():
"""
:return:
:rtype: LevelToCPm
"""
return LevelToCPm
def fast_attacks():
"""
:return:
:rtype: FastAttacks
"""
return FastAttacks
def charged_attacks():
"""
:return:
:rtype: ChargedAttack
"""
return ChargedAttacks
|
mit
| 5,517,166,461,981,990,000 | 31.553683 | 173 | 0.602769 | false |
zappyk-github/zappyk-python
|
lib/lib_external/postgresql/test/test_lib.py
|
2
|
4170
|
##
# .test.test_lib - test the .lib package
##
import sys
import os
import unittest
import tempfile
from .. import exceptions as pg_exc
from .. import lib as pg_lib
from .. import sys as pg_sys
from ..temporal import pg_tmp
ilf = """
preface
[sym]
select 1
[sym_ref]
*[sym]
[sym_ref_trail]
*[sym] WHERE FALSE
[sym_first::first]
select 1
[sym_rows::rows]
select 1
[sym_chunks::chunks]
select 1
[sym_declare::declare]
select 1
[sym_const:const:first]
select 1
[sym_const_rows:const:rows]
select 1
[sym_const_chunks:const:chunks]
select 1
[sym_const_column:const:column]
select 1
[sym_const_ddl:const:]
create temp table sym_const_dll (i int);
[sym_preload:preload:first]
select 1
[sym_proc:proc]
test_ilf_proc(int)
[sym_srf_proc:proc]
test_ilf_srf_proc(int)
[&sym_reference]
SELECT 'SELECT 1';
[&sym_reference_params]
SELECT 'SELECT ' || $1::text;
[&sym_reference_first::first]
SELECT 'SELECT 1::int4';
[&sym_reference_const:const:first]
SELECT 'SELECT 1::int4';
[&sym_reference_proc:proc]
SELECT 'test_ilf_proc(int)'::text
"""
class test_lib(unittest.TestCase):
# NOTE: Module libraries are implicitly tested
# in postgresql.test.test_driver; much functionality
# depends on the `sys` library.
def _testILF(self, lib):
self.assertTrue('preface' in lib.preface)
db.execute("CREATE OR REPLACE FUNCTION test_ilf_proc(int) RETURNS int language sql as 'select $1';")
db.execute("CREATE OR REPLACE FUNCTION test_ilf_srf_proc(int) RETURNS SETOF int language sql as 'select $1';")
b = pg_lib.Binding(db, lib)
self.assertEqual(b.sym_ref(), [(1,)])
self.assertEqual(b.sym_ref_trail(), [])
self.assertEqual(b.sym(), [(1,)])
self.assertEqual(b.sym_first(), 1)
self.assertEqual(list(b.sym_rows()), [(1,)])
self.assertEqual([list(x) for x in b.sym_chunks()], [[(1,)]])
c = b.sym_declare()
self.assertEqual(c.read(), [(1,)])
c.seek(0)
self.assertEqual(c.read(), [(1,)])
self.assertEqual(b.sym_const, 1)
self.assertEqual(b.sym_const_column, [1])
self.assertEqual(b.sym_const_rows, [(1,)])
self.assertEqual(b.sym_const_chunks, [[(1,)]])
self.assertEqual(b.sym_const_ddl, ('CREATE TABLE', None))
self.assertEqual(b.sym_preload(), 1)
# now stored procs
self.assertEqual(b.sym_proc(2,), 2)
self.assertEqual(list(b.sym_srf_proc(2,)), [2])
self.assertRaises(AttributeError, getattr, b, 'LIES')
# reference symbols
self.assertEqual(b.sym_reference()(), [(1,)])
self.assertEqual(b.sym_reference_params('1::int')(), [(1,)])
self.assertEqual(b.sym_reference_params("'foo'::text")(), [('foo',)])
self.assertEqual(b.sym_reference_first()(), 1)
self.assertEqual(b.sym_reference_const(), 1)
self.assertEqual(b.sym_reference_proc()(2,), 2)
@pg_tmp
def testILF_from_lines(self):
lib = pg_lib.ILF.from_lines([l + '\n' for l in ilf.splitlines()])
self._testILF(lib)
@pg_tmp
def testILF_from_file(self):
f = tempfile.NamedTemporaryFile(
delete = False, mode = 'w', encoding = 'utf-8'
)
n = f.name
try:
f.write(ilf)
f.flush()
f.seek(0)
lib = pg_lib.ILF.open(n, encoding = 'utf-8')
self._testILF(lib)
f.close()
finally:
# so annoying...
os.unlink(n)
@pg_tmp
def testLoad(self):
# gotta test it in the cwd...
pid = os.getpid()
frag = 'temp' + str(pid)
fn = 'lib' + frag + '.sql'
try:
with open(fn, 'w') as f:
f.write("[foo]\nSELECT 1")
pg_sys.libpath.insert(0, os.path.curdir)
l = pg_lib.load(frag)
b = pg_lib.Binding(db, l)
self.assertEqual(b.foo(), [(1,)])
finally:
os.remove(fn)
@pg_tmp
def testCategory(self):
lib = pg_lib.ILF.from_lines([l + '\n' for l in ilf.splitlines()])
# XXX: evil, careful..
lib._name = 'name'
c = pg_lib.Category(lib)
c(db)
self.assertEqual(db.name.sym_first(), 1)
c = pg_lib.Category(renamed = lib)
c(db)
self.assertEqual(db.renamed.sym_first(), 1)
@pg_tmp
def testCategoryAliases(self):
lib = pg_lib.ILF.from_lines([l + '\n' for l in ilf.splitlines()])
# XXX: evil, careful..
lib._name = 'name'
c = pg_lib.Category(lib, renamed = lib)
c(db)
self.assertEqual(db.name.sym_first(), 1)
self.assertEqual(db.renamed.sym_first(), 1)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 6,338,807,228,543,037,000 | 23.529412 | 112 | 0.654916 | false |
popara/jonny-api
|
matching/tests.py
|
1
|
6443
|
from fixures import *
from freezegun import freeze_time
from datetime import datetime
from time import sleep
from django.core import mail
from django.conf import settings
from funcy import is_list, all, str_join
from . import tasks as T
import fixures
from .models import JobStatus, job_status, get_questions, get_anon, \
get_anons_answers, get_details, zipthem, answer_as_str
job_id = "simplelogin:190"
def data(r):
return r.content
def job_start(api, job_id):
return api.post('/api/job/start/%s' % job_id)
def job_apply(api, job_id, user_id):
return api.get('/api/job/apply/%s/%s' % (job_id, user_id))
def test_job_start(api, ok, get_job, super_fresh_job, fire_app):
status = 'status'
super_fresh_job(job_id)
job = get_job(job_id)
assert status not in job
r = job_start(api, job_id)
sleep(14)
assert r.status_code == ok
assert r.data == job_id
j = get_job(job_id)
assert j[status] == 'drafting'
def test_applying_for_job(api, ok, get_job, patch_job, fresh_job):
fresh_job(job_id)
user = "simplelogin:3"
r = job_apply(api, job_id, user)
sleep(2)
assert r.status_code == ok
assert 'first' in data(r)
j = get_job(job_id)
assert len(j['applicants']) == 1
def test_applying_for_job_nd(api, ok, get_job, patch_job, fresh_job):
fresh_job(job_id)
user = "simplelogin:3"
user2 = "simplelogin:4"
user3 = "simplelogin:5"
r = job_apply(api, job_id, user)
r2 = job_apply(api, job_id, user2)
r3 = job_apply(api, job_id, user3)
assert r.status_code == ok
assert r2.status_code == ok
assert r3.status_code == ok
assert 'first' in data(r)
assert 'second' in data(r2)
assert 'third' in data(r3)
j = get_job(job_id)
assert len(j['applicants']) == 3
def test_already_applied(api, ok, get_job, patch_job, fresh_job):
fresh_job(job_id)
user = "simplelogin:3"
r = job_apply(api, job_id, user)
assert r.status_code == ok
assert 'first' in data(r)
j = get_job(job_id)
assert len(j['applicants']) == 1
r = job_apply(api, job_id, user)
assert r.status_code == ok
assert 'already' in data(r)
j = get_job(job_id)
assert len(j['applicants']) == 1
def test_getting_expert(available_experts):
u = T.get_experts()
assert len(u) > 1
def test_sending_emails():
e1 = "test@test.te"
ex1 = {"name": "Joe", "email": e1, "id": "simplelogin:1"}
m = T.dispatch_initial_email(job_id, ex1, {})
m1 = mail.outbox[0]
assert len(mail.outbox) == 1
assert m1.to == [e1]
def test_notify_experts(available_experts):
m = T.notify_experts(available_experts, job_id)
assert len(available_experts) == len(mail.outbox)
def test_hard_limit_no_applicants(get_job, patch_job):
patch_job(job_id, {'applicants': []})
T.hard_limit(job_id)
j = get_job(job_id)
#teardown
patch_job(job_id, {'applicants': []})
assert len(mail.outbox) == 1
assert len(j['applicants']) == 1
def test_hard_limit_with_applicants(get_job, patch_job):
A = 'applicants'
apl = {"user_id": "simplelogin:1"}
patch_job(job_id, {A: [apl]})
T.hard_limit(job_id)
j = get_job(job_id)
#teardown
patch_job(job_id, {'applicants': []})
assert len(mail.outbox) == 0
assert len(j['applicants']) == 1
def test_soft_limit_expires(api, get_job, fresh_job, apply_for_job):
fresh_job(job_id)
A = 'applicants'
u1 = "simplelogin:1"
u2 = "simplelogin:2"
r = job_apply(api, job_id, u1)
r2 = job_apply(api, job_id, u2)
assert len(mail.outbox) == 0
j = get_job(job_id)
assert j["status"] == "drafting"
T.soft_limit(job_id)
assert len(mail.outbox) == 1
m1 = mail.outbox[0]
assert m1.to == ['zeljko@jonnyibiza.com']
def test_queue_filled(api, ok, get_job, fresh_job, apply_for_job):
fresh_job(job_id)
settings.SOFT_LIMIT_PERIOD = 2
def u(id):
return "simplelogin:%s" % id
for applicant in range(0, settings.QUEUE_SIZE):
job_apply(api, job_id, u(applicant+1))
r = job_apply(api, job_id, u(settings.QUEUE_SIZE+2))
assert r.status_code != ok
assert "too late" in data(r)
def test_job_state(empty_job, applied_job, full_job):
assert empty_job["status"] == JobStatus.drafting
def test_getting_questions():
fields = ['id', 'name', 'category', 'text', 'type', 'label']
qs = get_questions()
def is_q(q):
return all(map(lambda x: x in q, fields))
assert is_list(qs)
assert all(map(is_q, qs))
def test_get_anon_answers():
a = "anonymous:-Jr8CygRdKAANrQ5ENax"
fields = ['at', 'value', 'id']
ans = get_anons_answers(a)
def is_ans(an):
return all(map(lambda x: x in an, fields))
assert is_list(ans)
assert all(map(is_ans, ans))
def test_match_questions():
ans = [{'id': 'a', 'value': 'aa'}, {'id': 'b', 'value': 'bb'}]
qs = [{'id': 'a', 'text': 'what', 'type': 'freeform'}, {'id': 'b', 'text': 'who', 'type': 'bingo'}]
r = zipthem(qs, ans)
fields = ['question', 'answer']
def is_z(z):
return all(map(lambda a: a in z, fields))
assert is_list(r)
assert all(map(is_z, r))
def test_zippingthem(questions, user_answers):
r = zipthem(questions, user_answers)
assert is_list(r)
def test_answer_as_string(typed_answers):
AT = typed_answers
keys = AT.keys()
o = AT['checklist']['value']
t = 'check-list'
v = answer_as_str(o, t)
assert v == str_join(', ', o)
o = AT['checklist_2']['value']
v = answer_as_str(o, t)
assert 'Somez, Thingzsz'
o = AT['bingo']['value']
v = answer_as_str(o, 'bingo')
assert v == str_join(', ', o)
o = AT['about']['value']
v = answer_as_str(o, 'about')
assert v == 'male, 31, straight'
o = AT['companydetails']['value']
v = answer_as_str(o, 'company-details')
assert '3 Boys' in v
assert '34 Girls' in v
assert 'partner' not in v
assert 'Male friends: 4' in v
o = AT['rolling']['value']
v = answer_as_str(o, 'rolling')
assert 'Rock star!' == v
o = AT['dates']['value']
v = answer_as_str(o, 'dates')
assert 'To June 24' in v
assert 'flexible' in v
o = AT['freeform']['value']
v = answer_as_str(o, 'freeform')
assert v == 'Knock the blast'
def test_dummy(fire_app):
assert fire_app.get('/levels', None)
assert fire_app.get('/users', 'simplelogin:190')
|
mit
| -7,788,216,686,586,730,000 | 24.669323 | 103 | 0.601273 | false |
openstack/heat
|
heat/engine/parameters.py
|
1
|
20706
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import itertools
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import strutils
from heat.common import exception
from heat.common.i18n import _
from heat.common import param_utils
from heat.engine import constraints as constr
PARAMETER_KEYS = (
TYPE, DEFAULT, NO_ECHO, ALLOWED_VALUES, ALLOWED_PATTERN,
MAX_LENGTH, MIN_LENGTH, MAX_VALUE, MIN_VALUE,
DESCRIPTION, CONSTRAINT_DESCRIPTION, LABEL
) = (
'Type', 'Default', 'NoEcho', 'AllowedValues', 'AllowedPattern',
'MaxLength', 'MinLength', 'MaxValue', 'MinValue',
'Description', 'ConstraintDescription', 'Label'
)
class Schema(constr.Schema):
"""Parameter schema."""
KEYS = (
TYPE, DESCRIPTION, DEFAULT, SCHEMA, CONSTRAINTS, HIDDEN,
LABEL, IMMUTABLE, TAGS,
) = (
'Type', 'Description', 'Default', 'Schema', 'Constraints', 'NoEcho',
'Label', 'Immutable', 'Tags',
)
PARAMETER_KEYS = PARAMETER_KEYS
# For Parameters the type name for Schema.LIST is CommaDelimitedList
# and the type name for Schema.MAP is Json
TYPES = (
STRING, NUMBER, LIST, MAP, BOOLEAN,
) = (
'String', 'Number', 'CommaDelimitedList', 'Json', 'Boolean',
)
def __init__(self, data_type, description=None, default=None, schema=None,
constraints=None, hidden=False, label=None, immutable=False,
tags=None):
super(Schema, self).__init__(data_type=data_type,
description=description,
default=default,
schema=schema,
required=default is None,
constraints=constraints,
label=label,
immutable=immutable)
self.hidden = hidden
self.tags = tags
# Schema class validates default value for lists assuming list type. For
# comma delimited list string supported in parameters Schema class, the
# default value has to be parsed into a list if necessary so that
# validation works.
def _validate_default(self, context):
if self.default is not None:
default_value = self.default
if self.type == self.LIST and not isinstance(self.default, list):
try:
default_value = self.default.split(',')
except (KeyError, AttributeError) as err:
raise exception.InvalidSchemaError(
message=_('Default must be a comma-delimited list '
'string: %s') % err)
elif self.type == self.LIST and isinstance(self.default, list):
default_value = [(str(x)) for x in self.default]
try:
self.validate_constraints(default_value, context,
[constr.CustomConstraint])
except (ValueError, TypeError,
exception.StackValidationFailed) as exc:
raise exception.InvalidSchemaError(
message=_('Invalid default %(default)s (%(exc)s)') %
dict(default=self.default, exc=exc))
def set_default(self, default=None):
super(Schema, self).set_default(default)
self.required = default is None
@staticmethod
def get_num(key, context):
val = context.get(key)
if val is not None:
val = Schema.str_to_num(val)
return val
@staticmethod
def _check_dict(schema_dict, allowed_keys, entity):
if not isinstance(schema_dict, dict):
raise exception.InvalidSchemaError(
message=_("Invalid %s, expected a mapping") % entity)
for key in schema_dict:
if key not in allowed_keys:
raise exception.InvalidSchemaError(
message=_("Invalid key '%(key)s' for %(entity)s") % {
"key": key, "entity": entity})
@classmethod
def _validate_dict(cls, param_name, schema_dict):
cls._check_dict(schema_dict,
cls.PARAMETER_KEYS,
"parameter (%s)" % param_name)
if cls.TYPE not in schema_dict:
raise exception.InvalidSchemaError(
message=_("Missing parameter type for parameter: %s") %
param_name)
if not isinstance(schema_dict.get(cls.TAGS, []), list):
raise exception.InvalidSchemaError(
message=_("Tags property should be a list for parameter: %s") %
param_name)
@classmethod
def from_dict(cls, param_name, schema_dict):
"""Return a Parameter Schema object from a legacy schema dictionary.
:param param_name: name of the parameter owning the schema; used
for more verbose logging
:type param_name: str
"""
cls._validate_dict(param_name, schema_dict)
def constraints():
desc = schema_dict.get(CONSTRAINT_DESCRIPTION)
if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
yield constr.Range(Schema.get_num(MIN_VALUE, schema_dict),
Schema.get_num(MAX_VALUE, schema_dict),
desc)
if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
yield constr.Length(Schema.get_num(MIN_LENGTH, schema_dict),
Schema.get_num(MAX_LENGTH, schema_dict),
desc)
if ALLOWED_VALUES in schema_dict:
yield constr.AllowedValues(schema_dict[ALLOWED_VALUES], desc)
if ALLOWED_PATTERN in schema_dict:
yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN], desc)
# make update_allowed true by default on TemplateResources
# as the template should deal with this.
return cls(schema_dict[TYPE],
description=schema_dict.get(DESCRIPTION),
default=schema_dict.get(DEFAULT),
constraints=list(constraints()),
hidden=str(schema_dict.get(NO_ECHO,
'false')).lower() == 'true',
label=schema_dict.get(LABEL))
def validate_value(self, value, context=None):
super(Schema, self).validate_constraints(value, context)
def __getitem__(self, key):
if key == self.TYPE:
return self.type
if key == self.HIDDEN:
return self.hidden
else:
return super(Schema, self).__getitem__(key)
class Parameter(object):
"""A template parameter."""
def __new__(cls, name, schema, value=None):
"""Create a new Parameter of the appropriate type."""
if cls is not Parameter:
return super(Parameter, cls).__new__(cls)
# Check for fully-fledged Schema objects
if not isinstance(schema, Schema):
schema = Schema.from_dict(name, schema)
if schema.type == schema.STRING:
ParamClass = StringParam
elif schema.type == schema.NUMBER:
ParamClass = NumberParam
elif schema.type == schema.LIST:
ParamClass = CommaDelimitedListParam
elif schema.type == schema.MAP:
ParamClass = JsonParam
elif schema.type == schema.BOOLEAN:
ParamClass = BooleanParam
else:
raise ValueError(_('Invalid Parameter type "%s"') % schema.type)
return super(Parameter, cls).__new__(ParamClass)
__slots__ = ('name', 'schema', 'user_value', 'user_default')
def __init__(self, name, schema, value=None):
"""Initialise the parameter.
Initialise the Parameter with a name, schema and optional user-supplied
value.
"""
self.name = name
self.schema = schema
self.user_value = value
self.user_default = None
def validate(self, validate_value=True, context=None):
"""Validates the parameter.
This method validates if the parameter's schema is valid,
and if the default value - if present - or the user-provided
value for the parameter comply with the schema.
"""
err_msg = _("Parameter '%(name)s' is invalid: %(exp)s")
try:
self.schema.validate(context)
if not validate_value:
return
if self.user_value is not None:
self._validate(self.user_value, context)
elif self.has_default():
self._validate(self.default(), context)
else:
raise exception.UserParameterMissing(key=self.name)
except exception.StackValidationFailed as ex:
msg = err_msg % dict(name=self.name, exp=str(ex))
raise exception.StackValidationFailed(message=msg)
except exception.InvalidSchemaError as ex:
msg = err_msg % dict(name=self.name, exp=str(ex))
raise exception.InvalidSchemaError(message=msg)
def value(self):
"""Get the parameter value, optionally sanitising it for output."""
if self.user_value is not None:
return self.user_value
if self.has_default():
return self.default()
raise exception.UserParameterMissing(key=self.name)
def has_value(self):
"""Parameter has a user or default value."""
return self.user_value is not None or self.has_default()
def hidden(self):
"""Return whether the parameter is hidden.
Hidden parameters should be sanitised in any output to the user.
"""
return self.schema.hidden
def description(self):
"""Return the description of the parameter."""
return self.schema.description or ''
def label(self):
"""Return the label or param name."""
return self.schema.label or self.name
def tags(self):
"""Return the tags associated with the parameter"""
return self.schema.tags or []
def has_default(self):
"""Return whether the parameter has a default value."""
return (self.schema.default is not None or
self.user_default is not None)
def default(self):
"""Return the default value of the parameter."""
if self.user_default is not None:
return self.user_default
return self.schema.default
def set_default(self, value):
self.user_default = value
@classmethod
def _value_as_text(cls, value):
return str(value)
def __str__(self):
"""Return a string representation of the parameter."""
value = self.value()
if self.hidden():
return str('******')
else:
return self._value_as_text(value)
class NumberParam(Parameter):
"""A template parameter of type "Number"."""
__slots__ = tuple()
def __int__(self):
"""Return an integer representation of the parameter."""
return int(super(NumberParam, self).value())
def __float__(self):
"""Return a float representation of the parameter."""
return float(super(NumberParam, self).value())
def _validate(self, val, context):
try:
Schema.str_to_num(val)
except (ValueError, TypeError) as ex:
raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(val, context)
def value(self):
return Schema.str_to_num(super(NumberParam, self).value())
class BooleanParam(Parameter):
"""A template parameter of type "Boolean"."""
__slots__ = tuple()
def _validate(self, val, context):
try:
strutils.bool_from_string(val, strict=True)
except ValueError as ex:
raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(val, context)
def value(self):
if self.user_value is not None:
raw_value = self.user_value
else:
raw_value = self.default()
return strutils.bool_from_string(str(raw_value), strict=True)
class StringParam(Parameter):
"""A template parameter of type "String"."""
__slots__ = tuple()
def _validate(self, val, context):
self.schema.validate_value(val, context=context)
def value(self):
return self.schema.to_schema_type(super(StringParam, self).value())
class ParsedParameter(Parameter):
"""A template parameter with cached parsed value."""
__slots__ = ('_parsed',)
def __init__(self, name, schema, value=None):
super(ParsedParameter, self).__init__(name, schema, value)
self._parsed = None
@property
def parsed(self):
if self._parsed is None:
if self.has_value():
if self.user_value is not None:
self._parsed = self.parse(self.user_value)
else:
self._parsed = self.parse(self.default())
else:
self._parsed = self.default_parsed()
return self._parsed
class CommaDelimitedListParam(ParsedParameter, collections.abc.Sequence):
"""A template parameter of type "CommaDelimitedList"."""
__slots__ = tuple()
def default_parsed(self):
return []
def parse(self, value):
# only parse when value is not already a list
if isinstance(value, list):
return [(str(x)) for x in value]
try:
return param_utils.delim_string_to_list(value)
except (KeyError, AttributeError) as err:
message = _('Value must be a comma-delimited list string: %s')
raise ValueError(message % str(err))
return value
def value(self):
if self.has_value():
return self.parsed
raise exception.UserParameterMissing(key=self.name)
def __len__(self):
"""Return the length of the list."""
return len(self.parsed)
def __getitem__(self, index):
"""Return an item from the list."""
return self.parsed[index]
@classmethod
def _value_as_text(cls, value):
return ",".join(value)
def _validate(self, val, context):
try:
parsed = self.parse(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(parsed, context)
class JsonParam(ParsedParameter):
"""A template parameter who's value is map or list."""
__slots__ = tuple()
def default_parsed(self):
return {}
def parse(self, value):
try:
val = value
if not isinstance(val, str):
# turn off oslo_serialization's clever to_primitive()
val = jsonutils.dumps(val, default=None)
if val:
return jsonutils.loads(val)
except (ValueError, TypeError) as err:
message = _('Value must be valid JSON: %s') % err
raise ValueError(message)
return value
def value(self):
if self.has_value():
return self.parsed
raise exception.UserParameterMissing(key=self.name)
def __getitem__(self, key):
return self.parsed[key]
def __iter__(self):
return iter(self.parsed)
def __len__(self):
return len(self.parsed)
@classmethod
def _value_as_text(cls, value):
return encodeutils.safe_decode(jsonutils.dumps(value))
def _validate(self, val, context):
try:
parsed = self.parse(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(parsed, context)
class Parameters(collections.abc.Mapping, metaclass=abc.ABCMeta):
"""Parameters of a stack.
The parameters of a stack, with type checking, defaults, etc. specified by
the stack's template.
"""
def __init__(self, stack_identifier, tmpl, user_params=None,
param_defaults=None):
"""Initialisation of the parameter.
Create the parameter container for a stack from the stack name and
template, optionally setting the user-supplied parameter values.
"""
user_params = user_params or {}
param_defaults = param_defaults or {}
def user_parameter(schema_item):
name, schema = schema_item
return Parameter(name, schema,
user_params.get(name))
self.tmpl = tmpl
self.user_params = user_params
schemata = self.tmpl.param_schemata()
user_parameters = (user_parameter(si) for si in
schemata.items())
pseudo_parameters = self._pseudo_parameters(stack_identifier)
self.params = dict((p.name,
p) for p in itertools.chain(pseudo_parameters,
user_parameters))
self.non_pseudo_param_keys = [p for p in self.params if p not in
self.PSEUDO_PARAMETERS]
for pd_name, param_default in param_defaults.items():
if pd_name in self.params:
self.params[pd_name].set_default(param_default)
def validate(self, validate_value=True, context=None):
"""Validates all parameters.
This method validates if all user-provided parameters are actually
defined in the template, and if all parameters are valid.
"""
self._validate_user_parameters()
for param in self.params.values():
param.validate(validate_value, context)
def __contains__(self, key):
"""Return whether the specified parameter exists."""
return key in self.params
def __iter__(self):
"""Return an iterator over the parameter names."""
return iter(self.params)
def __len__(self):
"""Return the number of parameters defined."""
return len(self.params)
def __getitem__(self, key):
"""Get a parameter value."""
return self.params[key].value()
def map(self, func, filter_func=lambda p: True):
"""Map the supplied function onto each Parameter.
Map the supplied function onto each Parameter (with an optional filter
function) and return the resulting dictionary.
"""
return dict((n, func(p))
for n, p in self.params.items() if filter_func(p))
def set_stack_id(self, stack_identifier):
"""Set the StackId pseudo parameter value."""
if stack_identifier is not None:
self.params[self.PARAM_STACK_ID].schema.set_default(
stack_identifier.arn())
return True
return False
def _validate_user_parameters(self):
schemata = self.tmpl.param_schemata()
for param in self.user_params:
if param not in schemata:
raise exception.UnknownUserParameter(key=param)
@abc.abstractmethod
def _pseudo_parameters(self, stack_identifier):
pass
def immutable_params_modified(self, new_parameters, input_params):
# A parameter must have been present in the old stack for its
# immutability to be enforced
common_params = list(set(new_parameters.non_pseudo_param_keys)
& set(self.non_pseudo_param_keys))
invalid_params = []
for param in common_params:
old_value = self.params[param]
if param in input_params:
new_value = input_params[param]
else:
new_value = new_parameters[param]
immutable = new_parameters.params[param].schema.immutable
if immutable and old_value.value() != new_value:
invalid_params.append(param)
if invalid_params:
return invalid_params
|
apache-2.0
| 5,806,063,626,160,475,000 | 33.858586 | 79 | 0.585772 | false |
dhalleine/tensorflow
|
tensorflow/examples/skflow/iris.py
|
1
|
1465
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, cross_validation
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = learn.DNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
| -4,256,267,123,883,611,000 | 35.625 | 75 | 0.730375 | false |
marshallmcdonnell/interactive_plotting
|
TraitsUI/examples/FileEditor_demo.py
|
1
|
1168
|
#!/usr/bin/env python
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
"""
Implementation of a FileEditor demo plugin for Traits UI demo program.
This demo shows each of the four styles of the FileEditor
"""
# Imports:
from traits.api \
import HasTraits, File
from traitsui.api \
import Item, Group, View
# Define the demo class:
class FileEditorDemo(HasTraits):
""" Defines the main FileEditor demo class. """
# Define a File trait to view:
file_name = File
# Display specification (one Item per editor style):
file_group = Group(
Item('file_name', style='simple', label='Simple'),
Item('_'),
Item('file_name', style='custom', label='Custom'),
Item('_'),
Item('file_name', style='text', label='Text'),
Item('_'),
Item('file_name', style='readonly', label='ReadOnly')
)
# Demo view:
view = View(
file_group,
title='FileEditor',
buttons=['OK'],
resizable=True
)
# Create the demo:
demo = FileEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
|
mit
| -8,845,989,099,445,362,000 | 21.461538 | 70 | 0.607877 | false |
EdinburghGenomics/clarity_scripts
|
scripts/generate_hamilton_input_uct.py
|
1
|
2391
|
#!/usr/bin/env python
from EPPs.common import GenerateHamiltonInputEPP, InvalidStepError
class GenerateHamiltonInputUCT(GenerateHamiltonInputEPP):
""""Generate a CSV containing the necessary information for the KAPA make libraries method"""
_use_load_config = False # prevent the loading of the config
csv_column_headers = ['Input Plate', 'Input Well', 'Sample Name', 'Adapter Well']
output_file_name = 'KAPA_MAKE_LIBRARIES.csv'
# Define the number of input containers that are permitted
_max_nb_input_containers = 1
# Define the number of output containers that are permitted
_max_nb_output_containers = 1
def _generate_csv_dict(self):
# csv_dict will be a dictionary that consists of the lines to be present in the Hamilton input file.
csv_dict = {}
# find all the inputs for the step that are analytes (i.e. samples and not associated files)
for art in self.artifacts:
if art.type == 'Analyte':
output = self.process.outputs_per_input(art.id, Analyte=True)
# the script is only compatible with 1 output for each input i.e. replicates are not allowed
if len(output) > 1:
raise InvalidStepError('Multiple outputs found for an input %s. '
'This step is not compatible with replicates.' % art.name)
# remove semi-colon from locations as this is not compatible with Hamilton Venus software
row, column = art.location[1].split(":")
input_location = row + column
# obtain well location of reagent_label (i.e. index/barcode)
# which is stored as the third charater (column) and fourth character (row)
adapter_well = output[0].reagent_labels[0][3]+output[0].reagent_labels[0][2]
# assemble each line of the Hamilton input file in the correct structure for the Hamilton
csv_line = [art.container.name, input_location, art.name, adapter_well]
# build a dictionary of the lines for the Hamilton input file with a key that facilitates the lines being
# by input container then column then row
csv_dict[art.location[1]] = csv_line
return csv_dict
if __name__ == '__main__':
GenerateHamiltonInputUCT().run()
|
mit
| 5,375,435,361,011,427,000 | 45.882353 | 121 | 0.637808 | false |
OCA/l10n-brazil
|
l10n_br_nfe/models/product_product.py
|
1
|
2017
|
# Copyright 2020 Akretion (Raphaël Valyi <raphael.valyi@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# The generateDS prod mixin (prod XML tag) cannot be injected in
# the product.product object because the tag includes attributes from the
# Odoo fiscal document line. So a part of the mapping is done
# in the fiscal document line:
# from Odoo -> XML by using related fields/_compute
# from XML -> Odoo by overriding the product create method
from odoo import api, models
class ProductProduct(models.Model):
_inherit = "product.product"
_nfe_search_keys = ["default_code", "barcode"]
@api.model
def create(self, values):
parent_dict = self._context.get("parent_dict", {})
if parent_dict.get("nfe40_xProd"):
values["name"] = parent_dict["nfe40_xProd"]
# Price Unit
if parent_dict.get("nfe40_vUnCom"):
values["standard_price"] = parent_dict.get("nfe40_vUnCom")
values["list_price"] = parent_dict.get("nfe40_vUnCom")
# Barcode
if parent_dict.get("nfe40_cEAN") and parent_dict["nfe40_cEAN"] != "SEM GTIN":
values["barcode"] = parent_dict["nfe40_cEAN"]
# NCM
if parent_dict.get("nfe40_NCM"):
ncm = self.env["l10n_br_fiscal.ncm"].search(
[("code_unmasked", "=", parent_dict["nfe40_NCM"])], limit=1
)
values["ncm_id"] = ncm.id
if not ncm: # FIXME should not happen with prod data
ncm = (
self.env["l10n_br_fiscal.ncm"]
.sudo()
.create(
{
"name": parent_dict["nfe40_NCM"],
"code": parent_dict["nfe40_NCM"],
}
)
)
values["ncm_id"] = ncm.id
product = super().create(values)
product.product_tmpl_id._onchange_ncm_id()
return product
|
agpl-3.0
| 2,735,946,614,346,512,400 | 35.654545 | 85 | 0.550099 | false |
gurneyalex/odoo
|
addons/mail/models/mail_activity.py
|
4
|
44705
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
import logging
import pytz
from odoo import api, exceptions, fields, models, _
from odoo.tools.misc import clean_context
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
_logger = logging.getLogger(__name__)
class MailActivityType(models.Model):
""" Activity Types are used to categorize activities. Each type is a different
kind of activity e.g. call, mail, meeting. An activity can be generic i.e.
available for all models using activities; or specific to a model in which
case res_model_id field should be used. """
_name = 'mail.activity.type'
_description = 'Activity Type'
_rec_name = 'name'
_order = 'sequence, id'
@api.model
def default_get(self, fields):
if not self.env.context.get('default_res_model_id') and self.env.context.get('default_res_model'):
self = self.with_context(
default_res_model_id=self.env['ir.model']._get(self.env.context.get('default_res_model'))
)
return super(MailActivityType, self).default_get(fields)
name = fields.Char('Name', required=True, translate=True)
summary = fields.Char('Default Summary', translate=True)
sequence = fields.Integer('Sequence', default=10)
active = fields.Boolean(default=True)
create_uid = fields.Many2one('res.users', index=True)
delay_count = fields.Integer(
'Scheduled Date', default=0,
help='Number of days/week/month before executing the action. It allows to plan the action deadline.')
delay_unit = fields.Selection([
('days', 'days'),
('weeks', 'weeks'),
('months', 'months')], string="Delay units", help="Unit of delay", required=True, default='days')
delay_from = fields.Selection([
('current_date', 'after validation date'),
('previous_activity', 'after previous activity deadline')], string="Delay Type", help="Type of delay", required=True, default='previous_activity')
icon = fields.Char('Icon', help="Font awesome icon e.g. fa-tasks")
decoration_type = fields.Selection([
('warning', 'Alert'),
('danger', 'Error')], string="Decoration Type",
help="Change the background color of the related activities of this type.")
res_model_id = fields.Many2one(
'ir.model', 'Model', index=True,
domain=['&', ('is_mail_thread', '=', True), ('transient', '=', False)],
help='Specify a model if the activity should be specific to a model'
' and not available when managing activities for other models.')
default_next_type_id = fields.Many2one('mail.activity.type', 'Default Next Activity',
domain="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]")
force_next = fields.Boolean("Trigger Next Activity", default=False)
next_type_ids = fields.Many2many(
'mail.activity.type', 'mail_activity_rel', 'activity_id', 'recommended_id',
domain="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]",
string='Recommended Next Activities')
previous_type_ids = fields.Many2many(
'mail.activity.type', 'mail_activity_rel', 'recommended_id', 'activity_id',
domain="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]",
string='Preceding Activities')
category = fields.Selection([
('default', 'None'), ('upload_file', 'Upload Document')
], default='default', string='Action to Perform',
help='Actions may trigger specific behavior like opening calendar view or automatically mark as done when a document is uploaded')
mail_template_ids = fields.Many2many('mail.template', string='Email templates')
default_user_id = fields.Many2one("res.users", string="Default User")
default_description = fields.Html(string="Default Description", translate=True)
#Fields for display purpose only
initial_res_model_id = fields.Many2one('ir.model', 'Initial model', compute="_compute_initial_res_model_id", store=False,
help='Technical field to keep trace of the model at the beginning of the edition for UX related behaviour')
res_model_change = fields.Boolean(string="Model has change", help="Technical field for UX related behaviour", default=False, store=False)
@api.onchange('res_model_id')
def _onchange_res_model_id(self):
self.mail_template_ids = self.mail_template_ids.filtered(lambda template: template.model_id == self.res_model_id)
self.res_model_change = self.initial_res_model_id and self.initial_res_model_id != self.res_model_id
def _compute_initial_res_model_id(self):
for activity_type in self:
activity_type.initial_res_model_id = activity_type.res_model_id
def unlink(self):
if any(self.get_external_id().values()) and not self._context.get(MODULE_UNINSTALL_FLAG):
raise exceptions.ValidationError("You can not delete activity type that are used as master data.")
return super(MailActivityType, self).unlink()
class MailActivity(models.Model):
""" An actual activity to perform. Activities are linked to
documents using res_id and res_model_id fields. Activities have a deadline
that can be used in kanban view to display a status. Once done activities
are unlinked and a message is posted. This message has a new activity_type_id
field that indicates the activity linked to the message. """
_name = 'mail.activity'
_description = 'Activity'
_order = 'date_deadline ASC'
_rec_name = 'summary'
@api.model
def default_get(self, fields):
res = super(MailActivity, self).default_get(fields)
if not fields or 'res_model_id' in fields and res.get('res_model'):
res['res_model_id'] = self.env['ir.model']._get(res['res_model']).id
return res
# owner
res_model_id = fields.Many2one(
'ir.model', 'Document Model',
index=True, ondelete='cascade', required=True)
res_model = fields.Char(
'Related Document Model',
index=True, related='res_model_id.model', compute_sudo=True, store=True, readonly=True)
res_id = fields.Many2oneReference(string='Related Document ID', index=True, required=True, model_field='res_model')
res_name = fields.Char(
'Document Name', compute='_compute_res_name', compute_sudo=True, store=True,
help="Display name of the related document.", readonly=True)
# activity
activity_type_id = fields.Many2one(
'mail.activity.type', string='Activity Type',
domain="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]", ondelete='restrict')
activity_category = fields.Selection(related='activity_type_id.category', readonly=True)
activity_decoration = fields.Selection(related='activity_type_id.decoration_type', readonly=True)
icon = fields.Char('Icon', related='activity_type_id.icon', readonly=True)
summary = fields.Char('Summary')
note = fields.Html('Note', sanitize_style=True)
date_deadline = fields.Date('Due Date', index=True, required=True, default=fields.Date.context_today)
automated = fields.Boolean(
'Automated activity', readonly=True,
help='Indicates this activity has been created automatically and not by any user.')
# description
user_id = fields.Many2one(
'res.users', 'Assigned to',
default=lambda self: self.env.user,
index=True, required=True)
state = fields.Selection([
('overdue', 'Overdue'),
('today', 'Today'),
('planned', 'Planned')], 'State',
compute='_compute_state')
recommended_activity_type_id = fields.Many2one('mail.activity.type', string="Recommended Activity Type")
previous_activity_type_id = fields.Many2one('mail.activity.type', string='Previous Activity Type', readonly=True)
has_recommended_activities = fields.Boolean(
'Next activities available',
compute='_compute_has_recommended_activities',
help='Technical field for UX purpose')
mail_template_ids = fields.Many2many(related='activity_type_id.mail_template_ids', readonly=True)
force_next = fields.Boolean(related='activity_type_id.force_next', readonly=True)
# access
can_write = fields.Boolean(compute='_compute_can_write', help='Technical field to hide buttons if the current user has no access.')
@api.onchange('previous_activity_type_id')
def _compute_has_recommended_activities(self):
for record in self:
record.has_recommended_activities = bool(record.previous_activity_type_id.next_type_ids)
@api.onchange('previous_activity_type_id')
def _onchange_previous_activity_type_id(self):
for record in self:
if record.previous_activity_type_id.default_next_type_id:
record.activity_type_id = record.previous_activity_type_id.default_next_type_id
@api.depends('res_model', 'res_id')
def _compute_res_name(self):
for activity in self:
activity.res_name = activity.res_model and \
self.env[activity.res_model].browse(activity.res_id).display_name
@api.depends('date_deadline')
def _compute_state(self):
for record in self.filtered(lambda activity: activity.date_deadline):
tz = record.user_id.sudo().tz
date_deadline = record.date_deadline
record.state = self._compute_state_from_date(date_deadline, tz)
@api.model
def _compute_state_from_date(self, date_deadline, tz=False):
date_deadline = fields.Date.from_string(date_deadline)
today_default = date.today()
today = today_default
if tz:
today_utc = pytz.UTC.localize(datetime.utcnow())
today_tz = today_utc.astimezone(pytz.timezone(tz))
today = date(year=today_tz.year, month=today_tz.month, day=today_tz.day)
diff = (date_deadline - today)
if diff.days == 0:
return 'today'
elif diff.days < 0:
return 'overdue'
else:
return 'planned'
@api.depends('res_model', 'res_id', 'user_id')
def _compute_can_write(self):
valid_records = self._filter_access_rules('write')
for record in self:
record.can_write = record in valid_records
@api.onchange('activity_type_id')
def _onchange_activity_type_id(self):
if self.activity_type_id:
if self.activity_type_id.summary:
self.summary = self.activity_type_id.summary
self.date_deadline = self._calculate_date_deadline(self.activity_type_id)
self.user_id = self.activity_type_id.default_user_id or self.env.user
if self.activity_type_id.default_description:
self.note = self.activity_type_id.default_description
def _calculate_date_deadline(self, activity_type):
# Date.context_today is correct because date_deadline is a Date and is meant to be
# expressed in user TZ
base = fields.Date.context_today(self)
if activity_type.delay_from == 'previous_activity' and 'activity_previous_deadline' in self.env.context:
base = fields.Date.from_string(self.env.context.get('activity_previous_deadline'))
return base + relativedelta(**{activity_type.delay_unit: activity_type.delay_count})
@api.onchange('recommended_activity_type_id')
def _onchange_recommended_activity_type_id(self):
if self.recommended_activity_type_id:
self.activity_type_id = self.recommended_activity_type_id
def _filter_access_rules(self, operation):
# write / unlink: valid for creator / assigned
if operation in ('write', 'unlink'):
valid = super(MailActivity, self)._filter_access_rules(operation)
if valid and valid == self:
return self
else:
valid = self.env[self._name]
return self._filter_access_rules_remaining(valid, operation, '_filter_access_rules')
def _filter_access_rules_python(self, operation):
# write / unlink: valid for creator / assigned
if operation in ('write', 'unlink'):
valid = super(MailActivity, self)._filter_access_rules_python(operation)
if valid and valid == self:
return self
else:
valid = self.env[self._name]
return self._filter_access_rules_remaining(valid, operation, '_filter_access_rules_python')
def _filter_access_rules_remaining(self, valid, operation, filter_access_rules_method):
""" Return the subset of ``self`` for which ``operation`` is allowed.
A custom implementation is done on activities as this document has some
access rules and is based on related document for activities that are
not covered by those rules.
Access on activities are the following :
* create: (``mail_post_access`` or write) right on related documents;
* read: read rights on related documents;
* write: access rule OR
(``mail_post_access`` or write) rights on related documents);
* unlink: access rule OR
(``mail_post_access`` or write) rights on related documents);
"""
# compute remaining for hand-tailored rules
remaining = self - valid
remaining_sudo = remaining.sudo()
# fall back on related document access right checks. Use the same as defined for mail.thread
# if available; otherwise fall back on read for read, write for other operations.
activity_to_documents = dict()
for activity in remaining_sudo:
# write / unlink: if not updating self or assigned, limit to automated activities to avoid
# updating other people's activities. As unlinking a document bypasses access rights checks
# on related activities this will not prevent people from deleting documents with activities
# create / read: just check rights on related document
activity_to_documents.setdefault(activity.res_model, list()).append(activity.res_id)
for doc_model, doc_ids in activity_to_documents.items():
if hasattr(self.env[doc_model], '_mail_post_access'):
doc_operation = self.env[doc_model]._mail_post_access
elif operation == 'read':
doc_operation = 'read'
else:
doc_operation = 'write'
right = self.env[doc_model].check_access_rights(doc_operation, raise_exception=False)
if right:
valid_doc_ids = getattr(self.env[doc_model].browse(doc_ids), filter_access_rules_method)(doc_operation)
valid += remaining.filtered(lambda activity: activity.res_model == doc_model and activity.res_id in valid_doc_ids.ids)
return valid
def _check_access_assignation(self):
""" Check assigned user (user_id field) has access to the document. Purpose
is to allow assigned user to handle their activities. For that purpose
assigned user should be able to at least read the document. We therefore
raise an UserError if the assigned user has no access to the document. """
for activity in self:
model = self.env[activity.res_model].with_user(activity.user_id).with_context(allowed_company_ids=activity.user_id.company_ids.ids)
try:
model.check_access_rights('read')
except exceptions.AccessError:
raise exceptions.UserError(
_('Assigned user %s has no access to the document and is not able to handle this activity.') %
activity.user_id.display_name)
else:
try:
target_user = activity.user_id
target_record = self.env[activity.res_model].browse(activity.res_id)
if hasattr(target_record, 'company_id') and (
target_record.company_id != target_user.company_id and (
len(target_user.sudo().company_ids) > 1)):
return # in that case we skip the check, assuming it would fail because of the company
model.browse(activity.res_id).check_access_rule('read')
except exceptions.AccessError:
raise exceptions.UserError(
_('Assigned user %s has no access to the document and is not able to handle this activity.') %
activity.user_id.display_name)
# ------------------------------------------------------
# ORM overrides
# ------------------------------------------------------
@api.model
def create(self, values):
activity = super(MailActivity, self).create(values)
need_sudo = False
try: # in multicompany, reading the partner might break
partner_id = activity.user_id.partner_id.id
except exceptions.AccessError:
need_sudo = True
partner_id = activity.user_id.sudo().partner_id.id
# send a notification to assigned user; in case of manually done activity also check
# target has rights on document otherwise we prevent its creation. Automated activities
# are checked since they are integrated into business flows that should not crash.
if activity.user_id != self.env.user:
if not activity.automated:
activity._check_access_assignation()
if not self.env.context.get('mail_activity_quick_update', False):
if need_sudo:
activity.sudo().action_notify()
else:
activity.action_notify()
self.env[activity.res_model].browse(activity.res_id).message_subscribe(partner_ids=[partner_id])
if activity.date_deadline <= fields.Date.today():
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', activity.user_id.partner_id.id),
{'type': 'activity_updated', 'activity_created': True})
return activity
def write(self, values):
if values.get('user_id'):
user_changes = self.filtered(lambda activity: activity.user_id.id != values.get('user_id'))
pre_responsibles = user_changes.mapped('user_id.partner_id')
res = super(MailActivity, self).write(values)
if values.get('user_id'):
if values['user_id'] != self.env.uid:
to_check = user_changes.filtered(lambda act: not act.automated)
to_check._check_access_assignation()
if not self.env.context.get('mail_activity_quick_update', False):
user_changes.action_notify()
for activity in user_changes:
self.env[activity.res_model].browse(activity.res_id).message_subscribe(partner_ids=[activity.user_id.partner_id.id])
if activity.date_deadline <= fields.Date.today():
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', activity.user_id.partner_id.id),
{'type': 'activity_updated', 'activity_created': True})
for activity in user_changes:
if activity.date_deadline <= fields.Date.today():
for partner in pre_responsibles:
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', partner.id),
{'type': 'activity_updated', 'activity_deleted': True})
return res
def unlink(self):
for activity in self:
if activity.date_deadline <= fields.Date.today():
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', activity.user_id.partner_id.id),
{'type': 'activity_updated', 'activity_deleted': True})
return super(MailActivity, self).unlink()
def name_get(self):
res = []
for record in self:
name = record.summary or record.activity_type_id.display_name
res.append((record.id, name))
return res
# ------------------------------------------------------
# Business Methods
# ------------------------------------------------------
def action_notify(self):
if not self:
return
original_context = self.env.context
body_template = self.env.ref('mail.message_activity_assigned')
for activity in self:
if activity.user_id.lang:
# Send the notification in the assigned user's language
self = self.with_context(lang=activity.user_id.lang)
body_template = body_template.with_context(lang=activity.user_id.lang)
activity = activity.with_context(lang=activity.user_id.lang)
model_description = self.env['ir.model']._get(activity.res_model).display_name
body = body_template.render(
dict(activity=activity, model_description=model_description),
engine='ir.qweb',
minimal_qcontext=True
)
record = self.env[activity.res_model].browse(activity.res_id)
if activity.user_id:
record.message_notify(
partner_ids=activity.user_id.partner_id.ids,
body=body,
subject=_('%s: %s assigned to you') % (activity.res_name, activity.summary or activity.activity_type_id.name),
record_name=activity.res_name,
model_description=model_description,
email_layout_xmlid='mail.mail_notification_light',
)
body_template = body_template.with_context(original_context)
self = self.with_context(original_context)
def action_done(self):
""" Wrapper without feedback because web button add context as
parameter, therefore setting context to feedback """
messages, next_activities = self._action_done()
return messages.ids and messages.ids[0] or False
def action_feedback(self, feedback=False, attachment_ids=None):
self = self.with_context(clean_context(self.env.context))
messages, next_activities = self._action_done(feedback=feedback, attachment_ids=attachment_ids)
return messages.ids and messages.ids[0] or False
def action_done_schedule_next(self):
""" Wrapper without feedback because web button add context as
parameter, therefore setting context to feedback """
return self.action_feedback_schedule_next()
def action_feedback_schedule_next(self, feedback=False):
ctx = dict(
clean_context(self.env.context),
default_previous_activity_type_id=self.activity_type_id.id,
activity_previous_deadline=self.date_deadline,
default_res_id=self.res_id,
default_res_model=self.res_model,
)
messages, next_activities = self._action_done(feedback=feedback) # will unlink activity, dont access self after that
if next_activities:
return False
return {
'name': _('Schedule an Activity'),
'context': ctx,
'view_mode': 'form',
'res_model': 'mail.activity',
'views': [(False, 'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def _action_done(self, feedback=False, attachment_ids=None):
""" Private implementation of marking activity as done: posting a message, deleting activity
(since done), and eventually create the automatical next activity (depending on config).
:param feedback: optional feedback from user when marking activity as done
:param attachment_ids: list of ir.attachment ids to attach to the posted mail.message
:returns (messages, activities) where
- messages is a recordset of posted mail.message
- activities is a recordset of mail.activity of forced automically created activities
"""
# marking as 'done'
messages = self.env['mail.message']
next_activities_values = []
# Search for all attachments linked to the activities we are about to unlink. This way, we
# can link them to the message posted and prevent their deletion.
attachments = self.env['ir.attachment'].search_read([
('res_model', '=', self._name),
('res_id', 'in', self.ids),
], ['id', 'res_id'])
activity_attachments = defaultdict(list)
for attachment in attachments:
activity_id = attachment['res_id']
activity_attachments[activity_id].append(attachment['id'])
for activity in self:
# extract value to generate next activities
if activity.force_next:
Activity = self.env['mail.activity'].with_context(activity_previous_deadline=activity.date_deadline) # context key is required in the onchange to set deadline
vals = Activity.default_get(Activity.fields_get())
vals.update({
'previous_activity_type_id': activity.activity_type_id.id,
'res_id': activity.res_id,
'res_model': activity.res_model,
'res_model_id': self.env['ir.model']._get(activity.res_model).id,
})
virtual_activity = Activity.new(vals)
virtual_activity._onchange_previous_activity_type_id()
virtual_activity._onchange_activity_type_id()
next_activities_values.append(virtual_activity._convert_to_write(virtual_activity._cache))
# post message on activity, before deleting it
record = self.env[activity.res_model].browse(activity.res_id)
record.message_post_with_view(
'mail.message_activity_done',
values={
'activity': activity,
'feedback': feedback,
'display_assignee': activity.user_id != self.env.user
},
subtype_id=self.env['ir.model.data'].xmlid_to_res_id('mail.mt_activities'),
mail_activity_type_id=activity.activity_type_id.id,
attachment_ids=[(4, attachment_id) for attachment_id in attachment_ids] if attachment_ids else [],
)
# Moving the attachments in the message
# TODO: Fix void res_id on attachment when you create an activity with an image
# directly, see route /web_editor/attachment/add
activity_message = record.message_ids[0]
message_attachments = self.env['ir.attachment'].browse(activity_attachments[activity.id])
if message_attachments:
message_attachments.write({
'res_id': activity_message.id,
'res_model': activity_message._name,
})
activity_message.attachment_ids = message_attachments
messages |= activity_message
next_activities = self.env['mail.activity'].create(next_activities_values)
self.unlink() # will unlink activity, dont access `self` after that
return messages, next_activities
def action_close_dialog(self):
return {'type': 'ir.actions.act_window_close'}
def activity_format(self):
activities = self.read()
mail_template_ids = set([template_id for activity in activities for template_id in activity["mail_template_ids"]])
mail_template_info = self.env["mail.template"].browse(mail_template_ids).read(['id', 'name'])
mail_template_dict = dict([(mail_template['id'], mail_template) for mail_template in mail_template_info])
for activity in activities:
activity['mail_template_ids'] = [mail_template_dict[mail_template_id] for mail_template_id in activity['mail_template_ids']]
return activities
@api.model
def get_activity_data(self, res_model, domain):
activity_domain = [('res_model', '=', res_model)]
if domain:
res = self.env[res_model].search(domain)
activity_domain.append(('res_id', 'in', res.ids))
grouped_activities = self.env['mail.activity'].read_group(
activity_domain,
['res_id', 'activity_type_id', 'ids:array_agg(id)', 'date_deadline:min(date_deadline)'],
['res_id', 'activity_type_id'],
lazy=False)
# filter out unreadable records
if not domain:
res_ids = tuple(a['res_id'] for a in grouped_activities)
res = self.env[res_model].search([('id', 'in', res_ids)])
grouped_activities = [a for a in grouped_activities if a['res_id'] in res.ids]
res_id_to_deadline = {}
activity_data = defaultdict(dict)
for group in grouped_activities:
res_id = group['res_id']
activity_type_id = (group.get('activity_type_id') or (False, False))[0]
res_id_to_deadline[res_id] = group['date_deadline'] if (res_id not in res_id_to_deadline or group['date_deadline'] < res_id_to_deadline[res_id]) else res_id_to_deadline[res_id]
state = self._compute_state_from_date(group['date_deadline'], self.user_id.sudo().tz)
activity_data[res_id][activity_type_id] = {
'count': group['__count'],
'ids': group['ids'],
'state': state,
'o_closest_deadline': group['date_deadline'],
}
activity_type_infos = []
activity_type_ids = self.env['mail.activity.type'].search(['|', ('res_model_id.model', '=', res_model), ('res_model_id', '=', False)])
for elem in sorted(activity_type_ids, key=lambda item: item.sequence):
mail_template_info = []
for mail_template_id in elem.mail_template_ids:
mail_template_info.append({"id": mail_template_id.id, "name": mail_template_id.name})
activity_type_infos.append([elem.id, elem.name, mail_template_info])
return {
'activity_types': activity_type_infos,
'activity_res_ids': sorted(res_id_to_deadline, key=lambda item: res_id_to_deadline[item]),
'grouped_activities': activity_data,
}
class MailActivityMixin(models.AbstractModel):
""" Mail Activity Mixin is a mixin class to use if you want to add activities
management on a model. It works like the mail.thread mixin. It defines
an activity_ids one2many field toward activities using res_id and res_model_id.
Various related / computed fields are also added to have a global status of
activities on documents.
Activities come with a new JS widget for the form view. It is integrated in the
Chatter widget although it is a separate widget. It displays activities linked
to the current record and allow to schedule, edit and mark done activities.
Use widget="mail_activity" on activity_ids field in form view to use it.
There is also a kanban widget defined. It defines a small widget to integrate
in kanban vignettes. It allow to manage activities directly from the kanban
view. Use widget="kanban_activity" on activitiy_ids field in kanban view to
use it.
Some context keys allow to control the mixin behavior. Use those in some
specific cases like import
* ``mail_activity_automation_skip``: skip activities automation; it means
no automated activities will be generated, updated or unlinked, allowing
to save computation and avoid generating unwanted activities;
"""
_name = 'mail.activity.mixin'
_description = 'Activity Mixin'
activity_ids = fields.One2many(
'mail.activity', 'res_id', 'Activities',
auto_join=True,
groups="base.group_user",)
activity_state = fields.Selection([
('overdue', 'Overdue'),
('today', 'Today'),
('planned', 'Planned')], string='Activity State',
compute='_compute_activity_state',
groups="base.group_user",
help='Status based on activities\nOverdue: Due date is already passed\n'
'Today: Activity date is today\nPlanned: Future activities.')
activity_user_id = fields.Many2one(
'res.users', 'Responsible User',
related='activity_ids.user_id', readonly=False,
search='_search_activity_user_id',
groups="base.group_user")
activity_type_id = fields.Many2one(
'mail.activity.type', 'Next Activity Type',
related='activity_ids.activity_type_id', readonly=False,
search='_search_activity_type_id',
groups="base.group_user")
activity_date_deadline = fields.Date(
'Next Activity Deadline',
compute='_compute_activity_date_deadline', search='_search_activity_date_deadline',
readonly=True, store=False,
groups="base.group_user")
activity_summary = fields.Char(
'Next Activity Summary',
related='activity_ids.summary', readonly=False,
search='_search_activity_summary',
groups="base.group_user",)
activity_exception_decoration = fields.Selection([
('warning', 'Alert'),
('danger', 'Error')],
compute='_compute_activity_exception_type',
search='_search_activity_exception_decoration',
help="Type of the exception activity on record.")
activity_exception_icon = fields.Char('Icon', help="Icon to indicate an exception activity.",
compute='_compute_activity_exception_type')
@api.depends('activity_ids.activity_type_id.decoration_type', 'activity_ids.activity_type_id.icon')
def _compute_activity_exception_type(self):
# prefetch all activity types for all activities, this will avoid any query in loops
self.mapped('activity_ids.activity_type_id.decoration_type')
for record in self:
activity_type_ids = record.activity_ids.mapped('activity_type_id')
exception_activity_type_id = False
for activity_type_id in activity_type_ids:
if activity_type_id.decoration_type == 'danger':
exception_activity_type_id = activity_type_id
break
if activity_type_id.decoration_type == 'warning':
exception_activity_type_id = activity_type_id
record.activity_exception_decoration = exception_activity_type_id and exception_activity_type_id.decoration_type
record.activity_exception_icon = exception_activity_type_id and exception_activity_type_id.icon
def _search_activity_exception_decoration(self, operator, operand):
return [('activity_ids.activity_type_id.decoration_type', operator, operand)]
@api.depends('activity_ids.state')
def _compute_activity_state(self):
for record in self:
states = record.activity_ids.mapped('state')
if 'overdue' in states:
record.activity_state = 'overdue'
elif 'today' in states:
record.activity_state = 'today'
elif 'planned' in states:
record.activity_state = 'planned'
else:
record.activity_state = False
@api.depends('activity_ids.date_deadline')
def _compute_activity_date_deadline(self):
for record in self:
record.activity_date_deadline = record.activity_ids[:1].date_deadline
def _search_activity_date_deadline(self, operator, operand):
if operator == '=' and not operand:
return [('activity_ids', '=', False)]
return [('activity_ids.date_deadline', operator, operand)]
@api.model
def _search_activity_user_id(self, operator, operand):
return [('activity_ids.user_id', operator, operand)]
@api.model
def _search_activity_type_id(self, operator, operand):
return [('activity_ids.activity_type_id', operator, operand)]
@api.model
def _search_activity_summary(self, operator, operand):
return [('activity_ids.summary', operator, operand)]
def write(self, vals):
# Delete activities of archived record.
if 'active' in vals and vals['active'] is False:
self.env['mail.activity'].sudo().search(
[('res_model', '=', self._name), ('res_id', 'in', self.ids)]
).unlink()
return super(MailActivityMixin, self).write(vals)
def unlink(self):
""" Override unlink to delete records activities through (res_model, res_id). """
record_ids = self.ids
result = super(MailActivityMixin, self).unlink()
self.env['mail.activity'].sudo().search(
[('res_model', '=', self._name), ('res_id', 'in', record_ids)]
).unlink()
return result
def toggle_active(self):
""" Before archiving the record we should also remove its ongoing
activities. Otherwise they stay in the systray and concerning archived
records it makes no sense. """
record_to_deactivate = self.filtered(lambda rec: rec.active)
if record_to_deactivate:
# use a sudo to bypass every access rights; all activities should be removed
self.env['mail.activity'].sudo().search([
('res_model', '=', self._name),
('res_id', 'in', record_to_deactivate.ids)
]).unlink()
return super(MailActivityMixin, self).toggle_active()
def activity_send_mail(self, template_id):
""" Automatically send an email based on the given mail.template, given
its ID. """
template = self.env['mail.template'].browse(template_id).exists()
if not template:
return False
for record in self.with_context(mail_post_autofollow=True):
record.message_post_with_template(
template_id,
composition_mode='comment'
)
return True
def activity_schedule(self, act_type_xmlid='', date_deadline=None, summary='', note='', **act_values):
""" Schedule an activity on each record of the current record set.
This method allow to provide as parameter act_type_xmlid. This is an
xml_id of activity type instead of directly giving an activity_type_id.
It is useful to avoid having various "env.ref" in the code and allow
to let the mixin handle access rights.
:param date_deadline: the day the activity must be scheduled on
the timezone of the user must be considered to set the correct deadline
"""
if self.env.context.get('mail_activity_automation_skip'):
return False
if not date_deadline:
date_deadline = fields.Date.context_today(self)
if isinstance(date_deadline, datetime):
_logger.warning("Scheduled deadline should be a date (got %s)", date_deadline)
if act_type_xmlid:
activity_type = self.sudo().env.ref(act_type_xmlid)
else:
activity_type = self.env['mail.activity.type'].sudo().browse(act_values['activity_type_id'])
model_id = self.env['ir.model']._get(self._name).id
activities = self.env['mail.activity']
for record in self:
create_vals = {
'activity_type_id': activity_type.id,
'summary': summary or activity_type.summary,
'automated': True,
'note': note or activity_type.default_description,
'date_deadline': date_deadline,
'res_model_id': model_id,
'res_id': record.id,
'user_id': act_values.get('user_id') or activity_type.default_user_id.id or self.env.uid
}
create_vals.update(act_values)
activities |= self.env['mail.activity'].create(create_vals)
return activities
def activity_schedule_with_view(self, act_type_xmlid='', date_deadline=None, summary='', views_or_xmlid='', render_context=None, **act_values):
""" Helper method: Schedule an activity on each record of the current record set.
This method allow to the same mecanism as `activity_schedule`, but provide
2 additionnal parameters:
:param views_or_xmlid: record of ir.ui.view or string representing the xmlid
of the qweb template to render
:type views_or_xmlid: string or recordset
:param render_context: the values required to render the given qweb template
:type render_context: dict
"""
if self.env.context.get('mail_activity_automation_skip'):
return False
render_context = render_context or dict()
if isinstance(views_or_xmlid, str):
views = self.env.ref(views_or_xmlid, raise_if_not_found=False)
else:
views = views_or_xmlid
if not views:
return
activities = self.env['mail.activity']
for record in self:
render_context['object'] = record
note = views.render(render_context, engine='ir.qweb', minimal_qcontext=True)
activities |= record.activity_schedule(act_type_xmlid=act_type_xmlid, date_deadline=date_deadline, summary=summary, note=note, **act_values)
return activities
def activity_reschedule(self, act_type_xmlids, user_id=None, date_deadline=None, new_user_id=None):
""" Reschedule some automated activities. Activities to reschedule are
selected based on type xml ids and optionally by user. Purpose is to be
able to
* update the deadline to date_deadline;
* update the responsible to new_user_id;
"""
if self.env.context.get('mail_activity_automation_skip'):
return False
Data = self.env['ir.model.data'].sudo()
activity_types_ids = [Data.xmlid_to_res_id(xmlid) for xmlid in act_type_xmlids]
domain = [
'&', '&', '&',
('res_model', '=', self._name),
('res_id', 'in', self.ids),
('automated', '=', True),
('activity_type_id', 'in', activity_types_ids)
]
if user_id:
domain = ['&'] + domain + [('user_id', '=', user_id)]
activities = self.env['mail.activity'].search(domain)
if activities:
write_vals = {}
if date_deadline:
write_vals['date_deadline'] = date_deadline
if new_user_id:
write_vals['user_id'] = new_user_id
activities.write(write_vals)
return activities
def activity_feedback(self, act_type_xmlids, user_id=None, feedback=None):
""" Set activities as done, limiting to some activity types and
optionally to a given user. """
if self.env.context.get('mail_activity_automation_skip'):
return False
Data = self.env['ir.model.data'].sudo()
activity_types_ids = [Data.xmlid_to_res_id(xmlid) for xmlid in act_type_xmlids]
domain = [
'&', '&', '&',
('res_model', '=', self._name),
('res_id', 'in', self.ids),
('automated', '=', True),
('activity_type_id', 'in', activity_types_ids)
]
if user_id:
domain = ['&'] + domain + [('user_id', '=', user_id)]
activities = self.env['mail.activity'].search(domain)
if activities:
activities.action_feedback(feedback=feedback)
return True
def activity_unlink(self, act_type_xmlids, user_id=None):
""" Unlink activities, limiting to some activity types and optionally
to a given user. """
if self.env.context.get('mail_activity_automation_skip'):
return False
Data = self.env['ir.model.data'].sudo()
activity_types_ids = [Data.xmlid_to_res_id(xmlid) for xmlid in act_type_xmlids]
domain = [
'&', '&', '&',
('res_model', '=', self._name),
('res_id', 'in', self.ids),
('automated', '=', True),
('activity_type_id', 'in', activity_types_ids)
]
if user_id:
domain = ['&'] + domain + [('user_id', '=', user_id)]
self.env['mail.activity'].search(domain).unlink()
return True
|
agpl-3.0
| -903,450,253,741,445,000 | 48.617092 | 188 | 0.616441 | false |
nullzero/wpcgi
|
wpcgi/tools/wikitranslator/form.py
|
1
|
1097
|
#!/data/project/nullzerobot/python/bin/python
from flask.ext.wtf import Form
import wtforms.validators as v
from wtforms import TextField, TextAreaField, HiddenField, SubmitField
from messages import msg
class WikiTranslatorForm(Form):
pass
def getForm():
FormCl = WikiTranslatorForm
FormCl.title = TextField(msg['wikitranslator-title-label'],
id='txt-title')
FormCl.siteDest = TextField(msg['wikitranslator-siteDest-label'],
id='txt-siteDest',
validators=[v.Required(), v.IgnoreMe()])
FormCl.siteSource = TextField(msg['wikitranslator-siteSource-label'],
id='txt-siteSource',
validators=[v.Required(), v.IgnoreMe()])
FormCl.content = TextAreaField(msg['wikitranslator-content-label'],
id="txt-content")
FormCl.tabStatus = HiddenField(id="tab-active", validators=[v.IgnoreMe()])
FormCl.proceed = SubmitField(msg['wikitranslator-button-submit'])
return FormCl
|
mit
| -4,713,215,222,985,740,000 | 42.92 | 78 | 0.614403 | false |
endlessm/chromium-browser
|
third_party/angle/third_party/VK-GL-CTS/src/scripts/verify/message.py
|
7
|
1372
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# Vulkan CTS
# ----------
#
# Copyright (c) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
class ValidationMessage:
TYPE_ERROR = 0
TYPE_WARNING = 1
def __init__ (self, type, filename, message):
self.type = type
self.filename = filename
self.message = message
def __str__ (self):
prefix = {self.TYPE_ERROR: "ERROR: ", self.TYPE_WARNING: "WARNING: "}
return prefix[self.type] + os.path.basename(self.filename) + ": " + self.message
def error (filename, message):
return ValidationMessage(ValidationMessage.TYPE_ERROR, filename, message)
def warning (filename, message):
return ValidationMessage(ValidationMessage.TYPE_WARNING, filename, message)
|
bsd-3-clause
| 2,389,402,020,114,998,300 | 31.666667 | 82 | 0.642128 | false |
yeyanchao/calibre
|
src/calibre/gui2/preferences/emailp.py
|
1
|
7877
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import textwrap
from PyQt4.Qt import QAbstractTableModel, QVariant, QFont, Qt
from calibre.gui2.preferences import ConfigWidgetBase, test_widget, \
AbortCommit
from calibre.gui2.preferences.email_ui import Ui_Form
from calibre.utils.config import ConfigProxy
from calibre.gui2 import NONE
from calibre.utils.smtp import config as smtp_prefs
class EmailAccounts(QAbstractTableModel): # {{{
def __init__(self, accounts, subjects):
QAbstractTableModel.__init__(self)
self.accounts = accounts
self.subjects = subjects
self.account_order = sorted(self.accounts.keys())
self.headers = map(QVariant, [_('Email'), _('Formats'), _('Subject'), _('Auto send')])
self.default_font = QFont()
self.default_font.setBold(True)
self.default_font = QVariant(self.default_font)
self.tooltips =[NONE] + list(map(QVariant, map(textwrap.fill,
[_('Formats to email. The first matching format will be sent.'),
_('Subject of the email to use when sending. When left blank '
'the title will be used for the subject. Also, the same '
'templates used for "Save to disk" such as {title} and '
'{author_sort} can be used here.'),
'<p>'+_('If checked, downloaded news will be automatically '
'mailed <br>to this email address '
'(provided it is in one of the listed formats).')])))
def rowCount(self, *args):
return len(self.account_order)
def columnCount(self, *args):
return len(self.headers)
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.headers[section]
return NONE
def data(self, index, role):
row, col = index.row(), index.column()
if row < 0 or row >= self.rowCount():
return NONE
account = self.account_order[row]
if account not in self.accounts:
return NONE
if role == Qt.UserRole:
return (account, self.accounts[account])
if role == Qt.ToolTipRole:
return self.tooltips[col]
if role in [Qt.DisplayRole, Qt.EditRole]:
if col == 0:
return QVariant(account)
if col == 1:
return QVariant(self.accounts[account][0])
if col == 2:
return QVariant(self.subjects.get(account, ''))
if role == Qt.FontRole and self.accounts[account][2]:
return self.default_font
if role == Qt.CheckStateRole and col == 3:
return QVariant(Qt.Checked if self.accounts[account][1] else Qt.Unchecked)
return NONE
def flags(self, index):
if index.column() == 3:
return QAbstractTableModel.flags(self, index)|Qt.ItemIsUserCheckable
else:
return QAbstractTableModel.flags(self, index)|Qt.ItemIsEditable
def setData(self, index, value, role):
if not index.isValid():
return False
row, col = index.row(), index.column()
account = self.account_order[row]
if col == 3:
self.accounts[account][1] ^= True
elif col == 2:
self.subjects[account] = unicode(value.toString())
elif col == 1:
self.accounts[account][0] = unicode(value.toString()).upper()
elif col == 0:
na = unicode(value.toString())
from email.utils import parseaddr
addr = parseaddr(na)[-1]
if not addr:
return False
self.accounts[na] = self.accounts.pop(account)
self.account_order[row] = na
if '@kindle.com' in addr:
self.accounts[na][0] = 'AZW, MOBI, TPZ, PRC, AZW1'
self.dataChanged.emit(
self.index(index.row(), 0), self.index(index.row(), 3))
return True
def make_default(self, index):
if index.isValid():
row = index.row()
for x in self.accounts.values():
x[2] = False
self.accounts[self.account_order[row]][2] = True
self.reset()
def add(self):
x = _('new email address')
y = x
c = 0
while y in self.accounts:
c += 1
y = x + str(c)
auto_send = len(self.accounts) < 1
self.accounts[y] = ['MOBI, EPUB', auto_send,
len(self.account_order) == 0]
self.account_order = sorted(self.accounts.keys())
self.reset()
return self.index(self.account_order.index(y), 0)
def remove(self, index):
if index.isValid():
row = index.row()
account = self.account_order[row]
self.accounts.pop(account)
self.account_order = sorted(self.accounts.keys())
has_default = False
for account in self.account_order:
if self.accounts[account][2]:
has_default = True
break
if not has_default and self.account_order:
self.accounts[self.account_order[0]][2] = True
self.reset()
# }}}
class ConfigWidget(ConfigWidgetBase, Ui_Form):
supports_restoring_to_defaults = False
def genesis(self, gui):
self.gui = gui
self.proxy = ConfigProxy(smtp_prefs())
self.send_email_widget.initialize(self.preferred_to_address)
self.send_email_widget.changed_signal.connect(self.changed_signal.emit)
opts = self.send_email_widget.smtp_opts
self._email_accounts = EmailAccounts(opts.accounts, opts.subjects)
self._email_accounts.dataChanged.connect(lambda x,y:
self.changed_signal.emit())
self.email_view.setModel(self._email_accounts)
self.email_add.clicked.connect(self.add_email_account)
self.email_make_default.clicked.connect(self.make_default)
self.email_view.resizeColumnsToContents()
self.email_remove.clicked.connect(self.remove_email_account)
def preferred_to_address(self):
if self._email_accounts.account_order:
return self._email_accounts.account_order[0]
def initialize(self):
ConfigWidgetBase.initialize(self)
# Initializing all done in genesis
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
# No defaults to restore to
def commit(self):
to_set = bool(self._email_accounts.accounts)
if not self.send_email_widget.set_email_settings(to_set):
raise AbortCommit('abort')
self.proxy['accounts'] = self._email_accounts.accounts
self.proxy['subjects'] = self._email_accounts.subjects
return ConfigWidgetBase.commit(self)
def make_default(self, *args):
self._email_accounts.make_default(self.email_view.currentIndex())
self.changed_signal.emit()
def add_email_account(self, *args):
index = self._email_accounts.add()
self.email_view.setCurrentIndex(index)
self.email_view.resizeColumnsToContents()
self.email_view.edit(index)
self.changed_signal.emit()
def remove_email_account(self, *args):
idx = self.email_view.currentIndex()
self._email_accounts.remove(idx)
self.changed_signal.emit()
def refresh_gui(self, gui):
from calibre.gui2.email import gui_sendmail
gui_sendmail.calculate_rate_limit()
if __name__ == '__main__':
from PyQt4.Qt import QApplication
app = QApplication([])
test_widget('Sharing', 'Email')
|
gpl-3.0
| -4,766,216,360,129,009,000 | 35.637209 | 95 | 0.59477 | false |
OCA/stock-logistics-workflow
|
stock_change_price_at_date/tests/test_change_price_at_date.py
|
1
|
9189
|
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from datetime import timedelta
from odoo.tests import common
from odoo import fields
from odoo.osv import expression
from odoo.exceptions import ValidationError
class TestChangePriceAtDate(common.TransactionCase):
def setUp(self):
super(TestChangePriceAtDate, self).setUp()
self.wizard_obj = self.env['stock.change.standard.price']
self.move_line_obj = self.env['account.move.line']
self.account_obj = self.env['account.account']
self.product_obj = self.env['product.product']
self.product = self.product_obj.create(
{'name': 'Ice Cream',
'type': 'product',
'standard_price': 70.0})
self.product.categ_id.property_stock_journal = self.env[
'account.journal'].create({
'name': 'Stock journal',
'type': 'sale',
'code': 'STK00'})
# Defining accounts
self.account = self.account_obj.create({
'name': 'TEST ACCOUNT',
'code': 'TEST',
'user_type_id': self.env.ref(
'account.data_account_type_expenses').id,
})
self.loc_account = self.account_obj.create({
'name': 'STOCK LOC ACCOUNT',
'code': 'STOCK',
'user_type_id': self.env.ref(
'account.data_account_type_expenses').id,
})
self.inventory_account = self.account_obj.create({
'name': 'Inventory ACCOUNT',
'code': 'INV',
'user_type_id': self.env.ref(
'account.data_account_type_expenses').id,
})
self.product.categ_id.property_account_income_categ_id = self.account
self.product.categ_id.property_stock_valuation_account_id = \
self.loc_account
self.product.property_stock_inventory.valuation_out_account_id = \
self.inventory_account
self.product.valuation = 'real_time'
self.location = self.browse_ref('stock.warehouse0').lot_stock_id
self.location.valuation_out_account_id = self.loc_account
self.location.valuation_in_account_id = self.loc_account
def _set_inventory(self, product, date_time=None):
vals = {
'name': 'Test stock available for reservation',
'location_id': self.location.id,
'filter': 'none'
}
inventory_obj = self.env['stock.inventory']
# Set context to simulate the date
if date_time:
vals.update({
'date': date_time,
})
inventory_obj = self.env['stock.inventory'].with_context(
date=date_time,
force_period_date=fields.Date.to_string(date_time)
)
inventory = inventory_obj.create(vals)
inventory.action_start()
self.env['stock.inventory.line'].create({
'inventory_id': inventory.id,
'product_id': product.id,
'location_id': self.location.id,
'product_qty': 10.0})
inventory._action_done()
def _change_price(self, new_price, move_date=None, template=False):
"""
Launch wizard to change price
Set date in context for moves if filled in
:param new_price: New Price
:param move_date: The date of the account move
"""
if template:
active_id = self.product.product_tmpl_id.id
active_model = 'product.template'
else:
active_id = self.product.id
active_model = 'product.product'
wizard_context = self.wizard_obj.with_context(
active_id=active_id,
active_model=active_model)
if move_date:
wizard_context = self.wizard_obj.with_context(
active_id=active_id,
active_model=active_model,
date=move_date,
force_period_date=fields.Date.to_string(move_date),
)
self.wizard = wizard_context.create({
'counterpart_account_id': self.account.id,
'new_price': new_price,
'date': move_date or False})
self.wizard.change_price()
def _get_move_line_domain(self, template=False):
product_domain = [('product_id', '=', self.product.id)]
if template:
product_domain = [
('product_id',
'in',
self.product.product_tmpl_id.product_variant_ids.ids)
]
return expression.AND([
product_domain,
[('move_id.state', '=', 'posted')],
])
def _create_variant(self):
vals = {
'name': 'Strawberry',
'product_tmpl_id': self.product.product_tmpl_id.id,
'standard_price': 75.0,
}
return self.product_obj.create(vals)
def test_change_price(self):
# Change Price to 80.0 (standard one)
# Change Price to 60.0 3 days ago
# Check if product standard price is 80.0
# Check if move line is created
self._set_inventory(self.product)
# Product price before tests was 70.0
self._change_price(80.0)
move_lines = self.move_line_obj.search(
self._get_move_line_domain(),
)
self.assertEquals(
80.0,
self.product.standard_price,
)
move_date = fields.Date.today() - timedelta(days=3)
self._change_price(60.0, move_date)
new_lines = self.move_line_obj.search(
self._get_move_line_domain()
) - move_lines
self.assertEquals(
80.0,
self.product.standard_price,
)
self.assertEquals(
2,
len(new_lines)
)
def test_change_template_price(self):
# Create a variant on template
# Change Price to 80.0 (standard one) for template
# Change Price to 60.0 3 days ago for template
# Check if product variants standard price is 80.0
# Check if move lines are created
# Set inventory on product 2
self._set_inventory(self.product)
# Product price before tests was 70.0
product_2 = self._create_variant()
self._change_price(80.0, template=True)
move_lines = self.move_line_obj.search(
self._get_move_line_domain(template=True),
)
self.assertEquals(
80.0,
self.product.standard_price,
)
self.assertEquals(
80.0,
product_2.standard_price,
)
move_date = fields.Date.today() - timedelta(days=3)
self._change_price(60.0, move_date, template=True)
new_lines = self.move_line_obj.search(
self._get_move_line_domain(template=True)
) - move_lines
self.assertEquals(
80.0,
self.product.standard_price,
)
self.assertEquals(
60.0,
product_2.standard_price,
)
self.assertEquals(
2,
len(new_lines)
)
# Only product 1 has inventory, so move lines are generated for that
# one only
self.assertEquals(
self.product,
new_lines.mapped('product_id'),
)
# Set Inventory on product 2
self._set_inventory(product_2)
move_lines = self.move_line_obj.search(
self._get_move_line_domain(template=True),
)
self._change_price(50.0, move_date, template=True)
new_lines = self.move_line_obj.search(
self._get_move_line_domain(template=True)
) - move_lines
self.assertEquals(
4,
len(new_lines)
)
# Product 2 standard price hasn't changed
self.assertEquals(
60.0,
product_2.standard_price,
)
def test_change_one_product_in_template_price(self):
# Create a variant on template
# Set inventory on that product 3 days ago
# Change Price to 80.0 3 days ago for template
# Check if product variants standard price is 80.0
self._set_inventory(self.product)
# Product price before tests was 70.0
product_2 = self._create_variant()
now_value = fields.Datetime.now() - timedelta(days=3)
self._set_inventory(
product_2, date_time=now_value)
# Change prices
move_date = fields.Date.today() - timedelta(days=2)
self._change_price(80.0, move_date=move_date, template=True)
# Just product 2 standard price should have been changed
self.assertEquals(
80.0,
product_2.standard_price,
)
self.assertEquals(
70.0,
self.product.standard_price,
)
def test_change_future_price(self):
# Try to change price to future date
move_date = fields.Date.today() + timedelta(days=3)
with self.assertRaises(ValidationError):
self._change_price(60.0, move_date, template=True)
|
agpl-3.0
| 9,129,164,995,711,200,000 | 33.159851 | 77 | 0.558385 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.