repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vinhqdang/algorithms_for_interviews
|
chapter1/problem1_3.py
|
1
|
1291
|
# given a sorted array and a key
# find the first occurrence that larger than k
# return -1 if there is no such element
from problem0 import binary_search
def test_find_first_larger_1 ():
assert (find_first_larger([1,2,3,4,5,5,6],7) == -1)
def test_find_first_larger_2 ():
assert (find_first_larger([1,2,3,4,5,6,7,8],4) == 3)
def test_find_first_larger_3 ():
assert (find_first_larger([1,2,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,6,7,8],4) == 3)
def test_find_first_larger_4 ():
assert (find_first_larger([1,2,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,6,7,8],5) == 17)
def find_first_larger (_arr, _k):
if (len(_arr) == 0):
return -1
if _k < _arr[0] or _k >_arr[-1]:
return -1
l = 0
u = len(_arr) - 1
new_k = _k - 0.5
while u>l:
m = l + int ((u-l)/2)
if _arr[m] == new_k:
return m #never happen
elif _arr[m] < new_k:
l=m+1
elif _arr[m] > new_k:
u=m-1
m = l + int ((u-l)/2)
# print (u)
# print (l)
# print (m)
# print (_arr[m])
# print (_k)
if _arr[m] == _k:
return m
if _arr[m] < _k:
return m+1
if _arr[m] > _k:
return m-1
if __name__ == '__main__':
print (find_first_larger([1,2,3,4,5,6,7,8],4) == 3)
|
gpl-3.0
| 55,772,695,387,996,910 | 24.333333 | 83 | 0.492641 | false |
iynaix/manga-downloader-flask
|
manga/spiders/animea.py
|
1
|
1663
|
import datetime
from scrapy.selector import Selector
from .base import BaseSpider as Spider
from manga.items import MangaItem, MangaChapterItem
from utils import extract_link
class AnimeA(Spider):
name = "animea"
allowed_domains = ["animea.net"]
start_urls = [
"http://manga.animea.net/series_old.php",
]
def parse(self, resp):
hxs = Selector(resp)
for manga in hxs.css("a.tooltip_manga"):
item = MangaItem()
item['name'], item['link'] = extract_link(manga)
yield item
class AnimeAChapterSpider(Spider):
name = "animea_chapter"
allowed_domains = ["animea.net"]
# parses the date format
def parsedate(self, s):
# date is in number of days / weeks / months / years ago
s = s.strip().lower().split()
val = int(s[0])
unit = s[1]
if "day" in unit:
delta = val
elif "week" in unit:
delta = val * 7
elif "month" in unit:
delta = val * 30
elif "year" in unit:
delta = val * 365
else:
raise ValueError("Unrecognised unit: %s" % unit)
return datetime.date.today() - datetime.timedelta(delta)
def parse(self, resp):
hxs = Selector(resp)
for row in hxs.css("ul.chapterlistfull > li"):
item = MangaChapterItem()
try:
item["name"], item["link"] = extract_link(row.xpath("a")[0])
dt = row.css("span.date::text")
item["date"] = self.parsedate(dt.extract()[0])
except IndexError:
continue
yield item
|
mit
| -8,047,761,475,780,702,000 | 26.716667 | 76 | 0.549008 | false |
jessrenteria/flowbot
|
src/data/preprocessor.py
|
1
|
4786
|
"""Module for preprocessing Cornell dialog data.
"""
import os
import pickle
import nltk
from tqdm import tqdm
class Preprocessor:
"""Class for parsing and preprocessing dialogs.
"""
def __init__(self, config):
self._config = config
self._lines, self._token2id, self._id2token = self._preprocess_lines()
self._conversations = self._preprocess_conversations()
def _preprocess_lines(self):
if (self._config['reuse_data'] and
os.path.exists(self._config['preprocessed_lines'])):
with open(self._config['preprocessed_lines'], 'rb') as f:
data = pickle.load(f)
return data['lines'], data['token2id'], data['id2token']
else:
print('Preprocessing lines...')
lines = {}
token2id = {}
id2token = {}
next_id = 0
def getId(token):
nonlocal next_id
if token in token2id:
return token2id[token]
else:
token2id[token] = next_id
id2token[next_id] = token
next_id += 1
return token2id[token]
for token in ['<unk>', '<start>', '<end>', '<pad>']:
getId(token)
max_length = max(self._config['encoder_length'],
self._config['decoder_length'])
with open(self._config['lines'], 'r', encoding='iso-8859-1') as f:
for line in tqdm(f):
line = line.split('+++$+++')
tokens = nltk.word_tokenize(line[-1])
if len(tokens) <= max_length:
tokens = map(lambda x: x.lower(), tokens)
lines[line[0].strip()] = list(map(getId, tokens))
with open(self._config['preprocessed_lines'], 'wb') as f:
data = {
'lines' : lines,
'token2id': token2id,
'id2token': id2token
}
pickle.dump(data, f, -1)
return lines, token2id, id2token
def _preprocess_conversations(self):
if (self._config['reuse_data'] and
os.path.exists(self._config['preprocessed_conversations'])):
with open(self._config['preprocessed_conversations'], 'rb') as f:
return pickle.load(f)
else:
print('Preprocessing conversations...')
conversations = []
def valid_conversation(c):
c1, c2 = c
return (c1 in self._lines and
len(self._lines[c1]) <= self._config['encoder_length'] and
c2 in self._lines and
len(self._lines[c2]) + 2 <= self._config['decoder_length'])
with open(self._config['conversations'], 'r', encoding='iso-8859-1') as f:
for line in tqdm(f):
conversation = line.split('+++$+++')[-1].strip()[2:-2].split("', '")
candidates = zip(conversation[:-1], conversation[1:])
candidates = list(filter(valid_conversation, candidates))
conversations += candidates
with open(self._config['preprocessed_conversations'], 'wb') as f:
pickle.dump(conversations, f, -1)
return conversations
def get_data(self):
return self._lines, self._token2id, self._id2token, self._conversations
def start_id(self):
return self._token2id['<start>']
def end_id(self):
return self._token2id['<end>']
def pad_id(self):
return self._token2id['<pad>']
def unk_id(self):
return self._token2id['<unk>']
def get_vocabulary_size(self):
return len(self._token2id)
def encode(self, s):
def sanitize(w):
w = w.lower()
return self._token2id[w] if w in self._token2id else self.unk_id()
tokens = nltk.word_tokenize(s)
if len(tokens) > self._config['encoder_length']:
return None
return list(map(sanitize, tokens))
def decode(self, lst):
return ' '.join(map(lambda x: self._id2token[x], lst))
def decode_pretty(self, lst):
result = []
bad_set = set(['<start>', '<pad>'])
for tokenId in lst:
token = self._id2token[tokenId[0][0]]
if token in bad_set:
continue
elif token == '<end>':
break
elif token == '<unk>':
result.append('???')
else:
result.append(token)
if result == None:
return None
return ' '.join(result)
|
apache-2.0
| -3,789,931,468,041,623,600 | 32.943262 | 88 | 0.497075 | false |
libravatar/libravatar
|
libravatar/account/urls.py
|
1
|
5296
|
# Copyright (C) 2011, 2013, 2015, 2016 Francois Marier <francois@libravatar.org>
# Copyright (C) 2010 Francois Marier <francois@libravatar.org>
# Jonathan Harker <jon@jon.geek.nz>
# Brett Wilkins <bushido.katana@gmail.com>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, patterns
# pylint: disable=invalid-name
urlpatterns = patterns('',
url('login/$', 'django.contrib.auth.views.login',
{'template_name': 'account/login.html'},
name='login'),
url('logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/'},
name='logout'), # must be the last pattern using this view!
url('password_change/$',
'django.contrib.auth.views.password_change',
{'template_name': 'account/password_change.html'},
name='password_change'),
url('password_change_done/$',
'django.contrib.auth.views.password_change_done',
{'template_name': 'account/password_change_done.html'},
name='password_change_done'),
url('password_set/$',
'libravatar.account.views.password_set'),
url('add_email/$',
'libravatar.account.views.add_email'),
url('add_openid/$',
'libravatar.account.views.add_openid'),
url('confirm_email/$',
'libravatar.account.views.confirm_email'),
url(r'^(?P<openid_id>\d+)/confirm_openid/$',
'libravatar.account.views.confirm_openid'),
url(r'^(?P<openid_id>\d+)/redirect_openid/$',
'libravatar.account.views.redirect_openid'),
url(r'^(?P<email_id>\d+)/remove_confirmed_email/$',
'libravatar.account.views.remove_confirmed_email'),
url(r'^(?P<email_id>\d+)/remove_unconfirmed_email/$',
'libravatar.account.views.remove_unconfirmed_email'),
url(r'^(?P<openid_id>\d+)/remove_confirmed_openid/$',
'libravatar.account.views.remove_confirmed_openid'),
url(r'^(?P<openid_id>\d+)/remove_unconfirmed_openid/$',
'libravatar.account.views.remove_unconfirmed_openid'),
url('delete/$', 'libravatar.account.views.delete'),
url('export/$', 'libravatar.account.views.export'),
url('new/$', 'libravatar.account.views.new'),
url('password_reset/$',
'libravatar.account.views.password_reset',
name='password_reset'),
url('password_reset_confirm/$',
'libravatar.account.views.password_reset_confirm',
name='password_reset_confirm'),
url('profile/$', 'libravatar.account.views.profile'),
url('profile_success/$',
'libravatar.account.views.successfully_authenticated'),
url(r'^(?P<email_id>\d+)/assign_photo_email/$',
'libravatar.account.views.assign_photo_email'),
url(r'^(?P<openid_id>\d+)/assign_photo_openid/$',
'libravatar.account.views.assign_photo_openid'),
url(r'^(?P<user_id>\d+)/import_photo/$',
'libravatar.account.views.import_photo'),
url('upload_photo/$',
'libravatar.account.views.upload_photo'),
url('crop_photo/$',
'libravatar.account.views.crop_photo'),
url(r'^(?P<photo_id>\d+)/crop_photo/?$',
'libravatar.account.views.crop_photo'),
url(r'^(?P<photo_id>\d+)/auto_crop/?$',
'libravatar.account.views.auto_crop'),
url(r'^(?P<photo_id>\d+)/delete_photo/$',
'libravatar.account.views.delete_photo'),
# Default page
url(r'^$', 'libravatar.account.views.profile'),
)
|
agpl-3.0
| -5,768,908,466,926,725,000 | 55.946237 | 87 | 0.498301 | false |
natsheh/semantic_query
|
api.py
|
1
|
4747
|
# -*- coding: utf-8 -*-
#
# This file is part of semantic_query.
# Copyright (C) 2016 CIAPPLE.
#
# This is a free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
# Semantic Query API
# Author: Hussein AL-NATSHEH <h.natsheh@ciapple.com>
# Affiliation: CIAPPLE, Jordan
import os, argparse, pickle, json
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
from collections import OrderedDict
from itertools import islice
from bs4 import BeautifulSoup
from flask import Flask, request, make_response
from flask_httpauth import HTTPBasicAuth
from flask_restful import Resource, Api, reqparse
def top(n, sorted_results):
return list(islice(sorted_results.iteritems(), n))
def query_by_text(transformer, transformed, documents, index, query_text, url, n_results=10):
query = transformer.transform(query_text)
sims = cosine_similarity(query.reshape(1,-1), transformed)
scores = sims[0][:].reshape(-1,1)
results= dict()
for i in range(len(transformed)):
results[i] = scores[i]
sorted_results = OrderedDict(sorted(results.items(), key=lambda k: k[1], reverse=True))
topn = top(n_results, sorted_results)
results = np.array(range(n_results), dtype=np.object)
for rank, (answer, score) in enumerate(topn):
title = documents[answer].split('\n__')[0]
title_t = title.replace (" ", "_")
doc_id = str(index[answer])
reference = url + title_t
results[rank] = {'reference': reference, 'score': str(score), 'doc_id': doc_id, 'title': title, 'answer': documents[answer]}
return results.tolist()
class Query(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('question', type=str, required=True, help='Query text')
parser.add_argument('userId', type=str, required=False, help='User ID')
parser.add_argument('questionId', type=str, required=False, help='Question ID')
parser.add_argument('limit', type=int, required=False, help='Size of the returned results')
args = parser.parse_args()
q = request.args.get('question')
question = BeautifulSoup(q, "lxml").p.contents
try:
size = request.args.get('limit')
n_results = int(size)
if n_results > 100:
n_results = 100
except:
n_results = 3
user_id = request.args.get('userId')
question_id = request.args.get('questionId')
response = {}
response['userId'] = user_id
response['questionId'] = question_id
response['limit'] = n_results
response['interesteId'] = 'future_feature'
response['results'] = query_by_text(transformer, transformed, documents, index, question, url, n_results=n_results)
if str(type(question)) == "<type 'list'>":
question = question[0]
response['question'] = question
resp = make_response()
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['content-type'] = 'application/json'
resp.data = response
return response
except Exception as e:
return {'error': str(e)}
def get(self):
try:
q = request.args.get('question')
question = BeautifulSoup(q, "lxml").p.contents
try:
user_id = request.args.get('userId')
except:
user_id = 'uid1'
try:
question_id = request.args.get('questionId')
except:
question_id = 'qid1'
try:
size = request.args.get('limit')
n_results = int(size)
if n_results > 100:
n_results = 100
except:
n_results = 3
response = dict()
response['userId'] = user_id
response['questionId'] = question_id
response['limit'] = n_results
response['interesteId'] = 'future_feature'
results = query_by_text(transformer, transformed, documents, index, question, url, n_results=n_results)
response['results'] = results
if str(type(question)) == "<type 'list'>":
question = question[0]
response['question'] = question
return response
except Exception as e:
return {'error': str(e)}
app = Flask(__name__, static_url_path="")
auth = HTTPBasicAuth()
api = Api(app)
api.add_resource(Query, '/Query/')
if __name__ == '__main__':
transformed_file = 'transformed.pickle'
docs_file = 'documents.pickle'
index_file = 'index.pickle'
transformer_file = 'transformer.pickle'
transformed = np.load(transformed_file)
index = pickle.load(open(index_file,'rb'))
documents = pickle.load(open(docs_file,'rb'))
print 'number of documents :', len(index)
transformer = pickle.load(open(transformer_file,'rb'))
url_config = json.load(open('url_config.json', 'r'))
url = url_config['url']
print 'Ready to call!!'
app.run(host='0.0.0.0', threaded=True)
|
bsd-3-clause
| 6,488,388,960,674,293,000 | 30.230263 | 127 | 0.688224 | false |
SitiBanc/1061_NCTU_IOMDS
|
1108/HW7/HW7.py
|
1
|
3177
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 20:25:18 2017
@author: sitibanc
"""
import numpy as np
from scipy import signal
from PIL import Image
def gen2DGaussian(stdv, mean, h, w):
x, y = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
d = np.sqrt(x ** 2 + y ** 2)
sigma, mu = stdv, mean
g = np.exp(-((d - mu) ** 2 / (2.0 * sigma ** 2)))
return g
def applyMask(M, I_array):
R = I_array[:, :, 0]
R2 = signal.convolve2d(R, M, mode = 'same', boundary = 'symm')
G = I_array[:, :, 1]
G2 = signal.convolve2d(G, M, mode = 'same', boundary = 'symm')
B = I_array[:, :, 2]
B2 = signal.convolve2d(B, M, mode = 'same', boundary = 'symm')
data = I_array.copy()
data[:, :, 0] = R2.astype('uint8')
data[:, :, 1] = G2.astype('uint8')
data[:, :, 2] = B2.astype('uint8')
return data
# 讀圖
I = Image.open('sample.jpg')
data = np.asarray(I)
# =============================================================================
# HW7-1: Gaussian Blur
# =============================================================================
# Generate 2D Gaussian Array
M1 = gen2DGaussian(1.0, 0.0, 10, 10)
M1 = M1 / M1.sum()
# Apply Mask
masked1 = applyMask(M1, data)
I1 = Image.fromarray(masked1.astype('uint8'), 'RGB')
I1.show()
# =============================================================================
# HW7-2: Motion Blur
# =============================================================================
M2 = np.ones((20, 1))
M2 = M2 / M2.sum()
# Apply Mask
masked2 = applyMask(M2, data)
I2 = Image.fromarray(masked2.astype('uint8'), 'RGB')
I2.show()
# =============================================================================
# HW7-3: Sharp Filter(銳化) <-- 兩個標準差不同的Gaussian相減
# =============================================================================
# Generate Mask
#sig1 = gen2DGaussian(1.0, 0.0, 3, 3)
#sig2 = gen2DGaussian(2.0, 0.0, 3, 3)
#M3 = sig1 - sig2
#M3 = M3 / M3.sum()
# Another Mask
M3 = np.array([[-1, -1, -1], [-1, 16, -1], [-1, -1, -1]])
M3 = M3 / 8
# Apply Mask
masked3 = applyMask(M3, data)
I3 = Image.fromarray(masked3.astype('uint8'), 'RGB')
I3.show()
# =============================================================================
# HW7-4: Sobel Filter(邊界強化、類似素描風格)
# =============================================================================
# Gray-scale image
I0 = I.convert('L')
data0 = np.asarray(I0)
# Generate Mask
sobel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
sobel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
# Apply Mask
Ix = signal.convolve2d(data0, sobel_x, mode = 'same', boundary = 'symm')
Iy = signal.convolve2d(data0, sobel_y, mode = 'same', boundary = 'symm')
masked4 = Ix ** 2 + Iy ** 2
# Adjust Color
tmp = masked4.flatten()
tmp[::-1].sort() # sorting in descending order
n = 0.2
idx = int(len(tmp) * n)
for h in range(masked4.shape[0]):
for w in range(masked4.shape[1]):
if masked4[h, w] >= tmp[idx]:
masked4[h, w] = 0
else:
masked4[h, w] = 255
I4 = Image.fromarray(masked4.astype('uint8'), 'L')
I4.show()
|
apache-2.0
| 7,698,695,683,274,534,000 | 30.2 | 79 | 0.460083 | false |
wufangjie/leetcode
|
015. 3Sum.py
|
1
|
1747
|
'''
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
'''
def _move_right(sorted_list, lo, hi, val):
while lo < hi:
lo += 1
if sorted_list[lo] != val:
break
return lo
def _move_left(sorted_list, lo, hi, val):
while lo < hi:
hi -= 1
if sorted_list[hi] != val:
break
return hi
def twoSum(sorted_list, lo, hi, theSum):
while lo < hi:
test = sorted_list[lo] + sorted_list[hi]
if test == theSum:
yield [sorted_list[lo], sorted_list[hi]]
lo = _move_right(sorted_list, lo, hi, sorted_list[lo])
hi = _move_left(sorted_list, lo, hi, sorted_list[hi])
elif test > theSum:
hi = _move_left(sorted_list, lo, hi, sorted_list[hi])
else:
lo = _move_right(sorted_list, lo, hi, sorted_list[lo])
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums = sorted(nums)
theMax = len(nums) - 1
pre = float('inf')
results = []
for i, a in enumerate(nums[:-2], 1):
if a > 0:
break
if a != pre:
pre = a
for comb in twoSum(nums, i, theMax, -a):
results.append([a] + comb)
return results
if __name__ == '__main__':
assert Solution().threeSum([-1, 0, 1, 2, -1, -4]) == [[-1, 0, 1], [-1, -1, 2]]
|
gpl-3.0
| 6,233,434,110,019,889,000 | 25.876923 | 155 | 0.502003 | false |
satterly/alerta5
|
alerta/app/database/backends/mongodb/base.py
|
1
|
40457
|
import json
import pytz
import re
from datetime import datetime, timedelta
from flask import current_app, g
from pymongo import MongoClient, ASCENDING, TEXT, ReturnDocument
from pymongo.errors import ConnectionFailure
from alerta.app.models import status_code
from alerta.app.utils.format import DateTime
from alerta.app import severity
from alerta.app.exceptions import NoCustomerMatch, ApiError
# See https://github.com/MongoEngine/flask-mongoengine/blob/master/flask_mongoengine/__init__.py
# See https://github.com/dcrosta/flask-pymongo/blob/master/flask_pymongo/__init__.py
class Backend:
def connect(self, config):
conn = MongoClient(config.get('MONGO_URI', 'mongodb://localhost:27017/monitoring'))
if config.get('MONGO_DATABASE', None):
db = conn[config['MONGO_DATABASE']]
else:
db = conn.get_database()
# create unique indexes
db.alerts.create_index(
[('environment', ASCENDING), ('customer', ASCENDING), ('resource', ASCENDING), ('event', ASCENDING)],
unique=True
)
db.alerts.create_index([('$**', TEXT)])
db.heartbeats.create_index([('origin', ASCENDING), ('customer', ASCENDING)], unique=True)
db.metrics.create_index([('group', ASCENDING), ('name', ASCENDING)], unique=True)
return conn, db
@property
def cx(self):
return current_app.extensions['mongodb'][0]
@property
def db(self):
return current_app.extensions['mongodb'][1]
@property
def version(self):
return self.db.client.server_info()['version']
@property
def is_alive(self):
try:
self.db.client.admin.command('ismaster')
except ConnectionFailure:
return False
return True
def close(self):
self.db.close()
def destroy(self, name=None):
name = name or self.db.name
self.cx.drop_database(name)
def build_query(self, params):
query_time = datetime.utcnow()
# q
if params.get('q', None):
query = json.loads(params.pop('q'))
else:
query = dict()
# customer
if g.get('customer', None):
query['customer'] = g.get('customer')
# from-date, to-date
from_date = params.get('from-date', default=None, type=DateTime.parse)
to_date = params.get('to-date', default=query_time, type=DateTime.parse)
if from_date and to_date:
query['lastReceiveTime'] = {'$gt': from_date.replace(tzinfo=pytz.utc), '$lte': to_date.replace(tzinfo=pytz.utc)}
elif to_date:
query['lastReceiveTime'] = {'$lte': to_date.replace(tzinfo=pytz.utc)}
# duplicateCount, repeat
if params.get('duplicateCount', None):
query['duplicateCount'] = params.get('duplicateCount', int)
if params.get('repeat', None):
query['repeat'] = params.get('repeat', default=True, type=lambda x: x == 'true')
# sort-by
sort = list()
direction = 1
if params.get('reverse', None):
direction = -1
if params.get('sort-by', None):
for sort_by in params.getlist('sort-by'):
if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort.append((sort_by, -direction)) # reverse chronological
else:
sort.append((sort_by, direction))
else:
sort.append(('lastReceiveTime', -direction))
# group-by
group = params.getlist('group-by')
# page, page-size, limit (deprecated)
page = params.get('page', 1, int)
limit = params.get('limit', current_app.config['DEFAULT_PAGE_SIZE'], int)
page_size = params.get('page-size', limit, int)
# id
ids = params.getlist('id')
if len(ids) == 1:
query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}]
elif ids:
query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}},
{'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}]
EXCLUDE_QUERY = ['q', 'id', 'from-date', 'to-date', 'repeat', 'sort-by', 'reverse', 'group-by', 'page', 'page-size', 'limit']
# fields
for field in params:
if field in EXCLUDE_QUERY:
continue
value = params.getlist(field)
if len(value) == 1:
value = value[0]
if field.endswith('!'):
if value.startswith('~'):
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$ne'] = value
else:
if value.startswith('~'):
query[field] = dict()
query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field] = value
else:
if field.endswith('!'):
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$nin'] = value
else:
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field] = dict()
query[field]['$regex'] = re.compile(value, re.IGNORECASE)
else:
query[field] = dict()
query[field]['$in'] = value
return query, sort, group, page, page_size, query_time
#### ALERTS
def get_severity(self, alert):
"""
Get severity of correlated alert. Used to determine previous severity.
"""
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event,
"severity": {'$ne': alert.severity}
},
{
"event": {'$ne': alert.event},
"correlate": alert.event
}],
"customer": alert.customer
}
return self.db.alerts.find_one(query, projection={"severity": 1, "_id": 0})['severity']
def get_status(self, alert):
"""
Get status of correlated or duplicate alert. Used to determine previous status.
"""
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event
},
{
"correlate": alert.event,
}
],
"customer": alert.customer
}
return self.db.alerts.find_one(query, projection={"status": 1, "_id": 0})['status']
def is_duplicate(self, alert):
query = {
"environment": alert.environment,
"resource": alert.resource,
"event": alert.event,
"severity": alert.severity,
"customer": alert.customer
}
return bool(self.db.alerts.find_one(query))
def is_correlated(self, alert):
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event,
"severity": {'$ne': alert.severity}
},
{
"event": {'$ne': alert.event},
"correlate": alert.event
}],
"customer": alert.customer
}
return bool(self.db.alerts.find_one(query))
def is_flapping(self, alert, window=1800, count=2):
"""
Return true if alert severity has changed more than X times in Y seconds
"""
pipeline = [
{'$match': {"environment": alert.environment, "resource": alert.resource, "event": alert.event}},
{'$unwind': '$history'},
{'$match': {
"history.updateTime": {'$gt': datetime.utcnow() - timedelta(seconds=window)}},
"history.type": "severity"
},
{
'$group': {
"_id": '$history.type',
"count": {'$sum': 1}
}
}
]
responses = self.db.alerts.aggregate(pipeline)
for r in responses:
if r['count'] > count:
return True
return False
def dedup_alert(self, alert):
"""
Update alert value, text and rawData, increment duplicate count and set repeat=True, and
keep track of last receive id and time but don't append to history unless status changes.
"""
previous_status = self.get_status(alert)
if alert.status != status_code.UNKNOWN and alert.status != previous_status:
status = alert.status
else:
status = status_code.status_from_severity(alert.severity, alert.severity, previous_status)
query = {
"environment": alert.environment,
"resource": alert.resource,
"event": alert.event,
"severity": alert.severity,
"customer": alert.customer
}
now = datetime.utcnow()
update = {
'$set': {
"status": status,
"value": alert.value,
"text": alert.text,
"rawData": alert.raw_data,
"repeat": True,
"lastReceiveId": alert.id,
"lastReceiveTime": now
},
'$addToSet': {"tags": {'$each': alert.tags}},
'$inc': {"duplicateCount": 1}
}
# only update those attributes that are specifically defined
attributes = {'attributes.'+k: v for k, v in alert.attributes.items()}
update['$set'].update(attributes)
if status != previous_status:
update['$push'] = {
"history": {
'$each': [{
"event": alert.event,
"status": status,
"type": "status",
"text": "duplicate alert status change",
"id": alert.id,
"updateTime": now
}],
'$slice': -abs(current_app.config['HISTORY_LIMIT'])
}
}
return self.db.alerts.find_one_and_update(
query,
update=update,
projection={"history": 0},
return_document=ReturnDocument.AFTER
)
def correlate_alert(self, alert):
"""
Update alert key attributes, reset duplicate count and set repeat=False, keep track of last
receive id and time, appending all to history. Append to history again if status changes.
"""
previous_severity = self.get_severity(alert)
previous_status = self.get_status(alert)
trend_indication = severity.trend(previous_severity, alert.severity)
if alert.status == status_code.UNKNOWN:
status = status_code.status_from_severity(previous_severity, alert.severity, previous_status)
else:
status = alert.status
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event,
"severity": {'$ne': alert.severity}
},
{
"event": {'$ne': alert.event},
"correlate": alert.event
}],
"customer": alert.customer
}
now = datetime.utcnow()
update = {
'$set': {
"event": alert.event,
"severity": alert.severity,
"status": status,
"value": alert.value,
"text": alert.text,
"createTime": alert.create_time,
"rawData": alert.raw_data,
"duplicateCount": 0,
"repeat": False,
"previousSeverity": previous_severity,
"trendIndication": trend_indication,
"receiveTime": now,
"lastReceiveId": alert.id,
"lastReceiveTime": now
},
'$addToSet': {"tags": {'$each': alert.tags}},
'$push': {
"history": {
'$each': [{
"event": alert.event,
"severity": alert.severity,
"value": alert.value,
"type": "severity",
"text": alert.text,
"id": alert.id,
"updateTime": now
}],
'$slice': -abs(current_app.config['HISTORY_LIMIT'])
}
}
}
# only update those attributes that are specifically defined
attributes = {'attributes.'+k: v for k, v in alert.attributes.items()}
update['$set'].update(attributes)
if status != previous_status:
update['$push']['history']['$each'].append({
"event": alert.event,
"status": status,
"type": "status",
"text": "correlated alert status change",
"id": alert.id,
"updateTime": now
})
return self.db.alerts.find_one_and_update(
query,
update=update,
projection={"history": 0},
return_document=ReturnDocument.AFTER
)
def create_alert(self, alert):
data = {
"_id": alert.id,
"resource": alert.resource,
"event": alert.event,
"environment": alert.environment,
"severity": alert.severity,
"correlate": alert.correlate,
"status": alert.status,
"service": alert.service,
"group": alert.group,
"value": alert.value,
"text": alert.text,
"tags": alert.tags,
"attributes": alert.attributes,
"origin": alert.origin,
"type": alert.event_type,
"createTime": alert.create_time,
"timeout": alert.timeout,
"rawData": alert.raw_data,
"customer": alert.customer,
"duplicateCount": alert.duplicate_count,
"repeat": alert.repeat,
"previousSeverity": alert.previous_severity,
"trendIndication": alert.trend_indication,
"receiveTime": alert.receive_time,
"lastReceiveId": alert.last_receive_id,
"lastReceiveTime": alert.last_receive_time,
"history": [h.serialize for h in alert.history]
}
if self.db.alerts.insert_one(data).inserted_id == alert.id:
return data
def get_alert(self, id, customer=None):
if len(id) == 8:
query = {'$or': [{'_id': {'$regex': '^' + id}}, {'lastReceiveId': {'$regex': '^' + id}}]}
else:
query = {'$or': [{'_id': id}, {'lastReceiveId': id}]}
if customer:
query['customer'] = customer
return self.db.alerts.find_one(query)
#### STATUS, TAGS, ATTRIBUTES
def set_status(self, id, status, text=None):
"""
Set status and update history.
"""
query = {'_id': {'$regex': '^' + id}}
event = self.db.alerts.find_one(query, projection={"event": 1, "_id": 0})['event']
if not event:
return False
now = datetime.utcnow()
update = {
'$set': {"status": status},
'$push': {
"history": {
'$each': [{
"event": event,
"status": status,
"type": "status",
"text": text,
"id": id,
"updateTime": now
}],
'$slice': -abs(current_app.config['HISTORY_LIMIT'])
}
}
}
return self.db.alerts.find_one_and_update(
query,
update=update,
projection={"history": 0},
return_document=ReturnDocument.AFTER
)
def tag_alert(self, id, tags):
"""
Append tags to tag list. Don't add same tag more than once.
"""
response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, {'$addToSet': {"tags": {'$each': tags}}})
return response.matched_count > 0
def untag_alert(self, id, tags):
"""
Remove tags from tag list.
"""
response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, {'$pullAll': {"tags": tags}})
return response.matched_count > 0
def update_attributes(self, id, attrs):
"""
Set all attributes (including private attributes) and unset attributes by using a value of 'null'.
"""
update = dict()
set_value = {'attributes.' + k: v for k, v in attrs.items() if v is not None}
if set_value:
update['$set'] = set_value
unset_value = {'attributes.' + k: v for k, v in attrs.items() if v is None}
if unset_value:
update['$unset'] = unset_value
response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, update=update)
return response.matched_count > 0
def delete_alert(self, id):
response = self.db.alerts.delete_one({'_id': {'$regex': '^' + id}})
return True if response.deleted_count == 1 else False
#### SEARCH & HISTORY
def get_alerts(self, query=None, sort=None, page=1, page_size=0):
return self.db.alerts.find(query, sort=sort).skip((page-1)*page_size).limit(page_size)
def get_history(self, query=None, fields=None):
if not fields:
fields = {
"resource": 1,
"event": 1,
"environment": 1,
"customer": 1,
"service": 1,
"group": 1,
"tags": 1,
"attributes": 1,
"origin": 1,
"type": 1,
"history": 1
}
pipeline = [
{'$match': query},
{'$unwind': '$history'},
{'$project': fields},
{'$limit': current_app.config['HISTORY_LIMIT']},
{'$sort': {'history.updateTime': 1}}
]
responses = self.db.alerts.aggregate(pipeline)
history = list()
for response in responses:
if 'severity' in response['history']:
history.append(
{
"id": response['_id'], # or response['history']['id']
"resource": response['resource'],
"event": response['history']['event'],
"environment": response['environment'],
"severity": response['history']['severity'],
"service": response['service'],
"group": response['group'],
"value": response['history']['value'],
"text": response['history']['text'],
"tags": response['tags'],
"attributes": response['attributes'],
"origin": response['origin'],
"updateTime": response['history']['updateTime'],
"type": response['history'].get('type', 'unknown'),
"customer": response.get('customer', None)
}
)
elif 'status' in response['history']:
history.append(
{
"id": response['_id'], # or response['history']['id']
"resource": response['resource'],
"event": response['event'],
"environment": response['environment'],
"status": response['history']['status'],
"service": response['service'],
"group": response['group'],
"text": response['history']['text'],
"tags": response['tags'],
"attributes": response['attributes'],
"origin": response['origin'],
"updateTime": response['history']['updateTime'],
"type": response['history'].get('type', 'unknown'),
"customer": response.get('customer', None)
}
)
return history
#### COUNTS
def get_count(self, query=None):
"""
Return total number of alerts that meet the query filter.
"""
return self.db.alerts.find(query).count()
def get_counts(self, query=None, fields=None, group=None):
pipeline = [
{'$match': query},
{'$project': fields or {}},
{'$group': {"_id": "$" + group, "count": {'$sum': 1}}}
]
responses = self.db.alerts.aggregate(pipeline)
counts = dict()
for response in responses:
counts[response['_id']] = response['count']
return counts
def get_counts_by_severity(self, query=None):
return self.get_counts(query, fields={"severity": 1}, group="severity")
def get_counts_by_status(self, query=None):
return self.get_counts(query, fields={"status": 1}, group="status")
def get_topn_count(self, query=None, group="event", topn=10):
pipeline = [
{'$match': query},
{'$unwind': '$service'},
{
'$group': {
"_id": "$%s" % group,
"count": {'$sum': 1},
"duplicateCount": {'$sum': "$duplicateCount"},
"environments": {'$addToSet': "$environment"},
"services": {'$addToSet': "$service"},
"resources": {'$addToSet': {"id": "$_id", "resource": "$resource"}}
}
},
{'$sort': {"count": -1, "duplicateCount": -1}},
{'$limit': topn}
]
responses = self.db.alerts.aggregate(pipeline)
top = list()
for response in responses:
top.append(
{
"%s" % group: response['_id'],
"environments": response['environments'],
"services": response['services'],
"resources": response['resources'],
"count": response['count'],
"duplicateCount": response['duplicateCount']
}
)
return top
def get_topn_flapping(self, query=None, group="event", topn=10):
pipeline = [
{'$match': query},
{'$unwind': '$service'},
{'$unwind': '$history'},
{'$match': {"history.type": "severity"}},
{
'$group': {
"_id": "$%s" % group,
"count": {'$sum': 1},
"duplicateCount": {'$max': "$duplicateCount"},
"environments": {'$addToSet': "$environment"},
"services": {'$addToSet': "$service"},
"resources": {'$addToSet': {"id": "$_id", "resource": "$resource"}}
}
},
{'$sort': {"count": -1, "duplicateCount": -1}},
{'$limit': topn}
]
responses = self.db.alerts.aggregate(pipeline)
top = list()
for response in responses:
top.append(
{
"%s" % group: response['_id'],
"environments": response['environments'],
"services": response['services'],
"resources": response['resources'],
"count": response['count'],
"duplicateCount": response['duplicateCount']
}
)
return top
#### ENVIRONMENTS
def get_environments(self, query=None, topn=100):
pipeline = [
{'$match': query},
{'$project': {"environment": 1}},
{'$limit': topn},
{'$group': {"_id": "$environment", "count": {'$sum': 1}}}
]
responses = self.db.alerts.aggregate(pipeline)
environments = list()
for response in responses:
environments.append(
{
"environment": response['_id'],
"count": response['count']
}
)
return environments
#### SERVICES
def get_services(self, query=None, topn=100):
pipeline = [
{'$unwind': '$service'},
{'$match': query},
{'$project': {"environment": 1, "service": 1}},
{'$limit': topn},
{'$group': {"_id": {"environment": "$environment", "service": "$service"}, "count": {'$sum': 1}}}
]
responses = self.db.alerts.aggregate(pipeline)
services = list()
for response in responses:
services.append(
{
"environment": response['_id']['environment'],
"service": response['_id']['service'],
"count": response['count']
}
)
return services
#### BLACKOUTS
def create_blackout(self, blackout):
data = {
"_id": blackout.id,
"priority": blackout.priority,
"environment": blackout.environment,
"startTime": blackout.start_time,
"endTime": blackout.end_time,
"duration": blackout.duration
}
if blackout.service:
data["service"] = blackout.service
if blackout.resource:
data["resource"] = blackout.resource
if blackout.event:
data["event"] = blackout.event
if blackout.group:
data["group"] = blackout.group
if blackout.tags:
data["tags"] = blackout.tags
if blackout.customer:
data["customer"] = blackout.customer
if self.db.blackouts.insert_one(data).inserted_id == blackout.id:
return data
def get_blackout(self, id, customer=None):
query = {'_id': id}
if customer:
query['customer'] = customer
return self.db.blackouts.find_one(query)
def get_blackouts(self, query=None, page=1, page_size=0):
return self.db.blackouts.find(query).skip((page - 1) * page_size).limit(page_size)
def is_blackout_period(self, alert):
now = datetime.utcnow()
query = dict()
query['startTime'] = {'$lte': now}
query['endTime'] = {'$gt': now}
query['environment'] = alert.environment
query['$or'] = [
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": alert.resource,
"service": {'$exists': False},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {"$not": {"$elemMatch": {"$nin": alert.service}}},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": alert.event,
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": {'$exists': False},
"group": alert.group,
"tags": {'$exists': False}
},
{
"resource": alert.resource,
"service": {'$exists': False},
"event": alert.event,
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {"$not": {"$elemMatch": {"$nin": alert.tags}}}
}
]
if self.db.blackouts.find_one(query):
return True
if current_app.config['CUSTOMER_VIEWS']:
query['customer'] = alert.customer
if self.db.blackouts.find_one(query):
return True
return False
def delete_blackout(self, id):
response = self.db.blackouts.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
#### HEARTBEATS
def upsert_heartbeat(self, heartbeat):
return self.db.heartbeats.find_one_and_update(
{
"origin": heartbeat.origin,
"customer": heartbeat.customer
},
{
'$setOnInsert': {
"_id": heartbeat.id
},
'$set': {
"origin": heartbeat.origin,
"tags": heartbeat.tags,
"type": heartbeat.event_type,
"createTime": heartbeat.create_time,
"timeout": heartbeat.timeout,
"receiveTime": heartbeat.receive_time,
"customer": heartbeat.customer
}
},
upsert=True,
return_document=ReturnDocument.AFTER
)
def get_heartbeat(self, id, customer=None):
if len(id) == 8:
query = {'_id': {'$regex': '^' + id}}
else:
query = {'_id': id}
if customer:
query['customer'] = customer
return self.db.heartbeats.find_one(query)
def get_heartbeats(self, query=None, page=1, page_size=0):
return self.db.heartbeats.find(query).skip((page - 1) * page_size).limit(page_size)
def delete_heartbeat(self, id):
response = self.db.heartbeats.delete_one({'_id': {'$regex': '^' + id}})
return True if response.deleted_count == 1 else False
#### API KEYS
# save
def create_key(self, key):
data = {
"_id": key.key,
"user": key.user,
"scopes": key.scopes,
"text": key.text,
"expireTime": key.expire_time,
"count": key.count,
"lastUsedTime": key.last_used_time
}
if key.customer:
data['customer'] = key.customer
if self.db.keys.insert_one(data).inserted_id == key.key:
return data
# get
def get_key(self, key, customer=None):
query = {'$or': [{'key': key}, {'_id': key}]}
if customer:
query['customer'] = customer
return self.db.keys.find_one(query)
# list
def get_keys(self, query=None, page=1, page_size=0):
return self.db.keys.find(query).skip((page - 1) * page_size).limit(page_size)
# update
def update_key_last_used(self, key):
return self.db.keys.update_one(
{'$or': [{'key': key}, {'_id': key}]},
{
'$set': {"lastUsedTime": datetime.utcnow()},
'$inc': {"count": 1}
}
).matched_count == 1
# delete
def delete_key(self, key):
query = {'$or': [{'key': key}, {'_id': key}]}
response = self.db.keys.delete_one(query)
return True if response.deleted_count == 1 else False
#### USERS
def create_user(self, user):
data = {
"_id": user.id,
"name": user.name,
"password": user.password,
"email": user.email,
"createTime": user.create_time,
"lastLogin": user.last_login,
"text": user.text,
"email_verified": user.email_verified
}
if self.db.users.insert_one(data).inserted_id == user.id:
return data
# get
def get_user(self, id, customer=None):
query = {'_id': id}
if customer:
query['customer'] = customer
return self.db.users.find_one(query)
# list
def get_users(self, query=None, page=1, page_size=0):
return self.db.users.find(query).skip((page - 1) * page_size).limit(page_size)
def get_user_by_email(self, email):
query = {"email": email}
return self.db.users.find_one(query)
def get_user_by_hash(self, hash):
query = {"hash": hash}
return self.db.users.find_one(query)
def get_user_password(self, id):
return
def update_last_login(self, id):
return self.db.users.update_one(
{"_id": id},
update={'$set': {"lastLogin": datetime.utcnow()}}
).matched_count == 1
def set_email_hash(self, id, hash):
return self.db.users.update_one(
{"_id": id},
update={'$set': {'hash': hash, 'updateTime': datetime.utcnow()}}
).matched_count == 1
def update_user(self, id, **kwargs):
return self.db.users.find_one_and_update(
{"_id": id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def delete_user(self, id):
response = self.db.users.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
#### PERMISSIONS
def create_perm(self, perm):
data = {
"_id": perm.id,
"match": perm.match,
"scopes": perm.scopes
}
if self.db.perms.insert_one(data).inserted_id == perm.id:
return data
def get_perm(self, id):
query = {'_id': id}
return self.db.perms.find_one(query)
def get_perms(self, query=None, page=1, page_size=0):
return self.db.perms.find(query).skip((page - 1) * page_size).limit(page_size)
def delete_perm(self, id):
response = self.db.perms.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
def get_scopes_by_match(self, login, matches):
if login in current_app.config['ADMIN_USERS']:
return ['admin', 'read', 'write']
scopes = list()
for match in matches:
response = self.db.perms.find_one({"match": match}, projection={"scopes": 1, "_id": 0})
if response:
scopes.extend(response['scopes'])
return set(scopes) or current_app.config['USER_DEFAULT_SCOPES']
#### CUSTOMERS
def create_customer(self, customer):
data = {
"_id": customer.id,
"match": customer.match,
"customer": customer.customer
}
if self.db.customers.insert_one(data).inserted_id == customer.id:
return data
def get_customer(self, id):
query = {'_id': id}
return self.db.customers.find_one(query)
def get_customers(self, query=None, page=1, page_size=0):
return self.db.customers.find(query).skip((page - 1) * page_size).limit(page_size)
def delete_customer(self, id):
response = self.db.customers.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
def get_customers_by_match(self, login, matches):
if login in current_app.config['ADMIN_USERS']:
return '*' # all customers
for match in [login] + matches:
response = self.db.customers.find_one({"match": match}, projection={"customer": 1, "_id": 0})
if response:
return response['customer']
raise NoCustomerMatch("No customer lookup configured for user '%s' or '%s'" % (login, ','.join(matches)))
#### METRICS
def get_metrics(self, type=None):
query = {"type": type} if type else {}
return list(self.db.metrics.find(query, {"_id": 0}))
def set_gauge(self, group, name, title=None, description=None, value=0):
return self.db.metrics.find_one_and_update(
{
"group": group,
"name": name
},
{
'$set': {
"group": group,
"name": name,
"title": title,
"description": description,
"value": value,
"type": "gauge"
}
},
upsert=True,
return_document=ReturnDocument.AFTER
)['value']
def get_gauges(self):
from alerta.app.models.metrics import Gauge
return [
Gauge(
group=g.get('group'),
name=g.get('name'),
title=g.get('title', ''),
description=g.get('description', ''),
value=g.get('value', 0)
) for g in self.db.metrics.find({"type": "gauge"}, {"_id": 0})
]
def inc_counter(self, group, name, title=None, description=None, count=1):
return self.db.metrics.find_one_and_update(
{
"group": group,
"name": name
},
{
'$set': {
"group": group,
"name": name,
"title": title,
"description": description,
"type": "counter"
},
'$inc': {"count": count}
},
upsert=True,
return_document=ReturnDocument.AFTER
)['count']
def get_counters(self):
from alerta.app.models.metrics import Counter
return [
Counter(
group=c.get('group'),
name=c.get('name'),
title=c.get('title', ''),
description=c.get('description', ''),
count=c.get('count', 0)
) for c in self.db.metrics.find({"type": "counter"}, {"_id": 0})
]
def update_timer(self, group, name, title=None, description=None, count=1, duration=0):
return self.db.metrics.find_one_and_update(
{
"group": group,
"name": name
},
{
'$set': {
"group": group,
"name": name,
"title": title,
"description": description,
"type": "timer"
},
'$inc': {"count": count, "totalTime": duration}
},
upsert=True,
return_document=ReturnDocument.AFTER
)
def get_timers(self):
from alerta.app.models.metrics import Timer
return [
Timer(
group=t.get('group'),
name=t.get('name'),
title=t.get('title', ''),
description=t.get('description', ''),
count=t.get('count', 0),
total_time=t.get('totalTime', 0)
) for t in self.db.metrics.find({"type": "timer"}, {"_id": 0})
]
|
apache-2.0
| -2,205,730,669,139,260,000 | 33.756873 | 133 | 0.470994 | false |
ckan/ckanext-archiver
|
ckanext/archiver/lib.py
|
1
|
1725
|
import os
import logging
import ckan.plugins as p
from ckanext.archiver.tasks import update_package, update_resource
log = logging.getLogger(__name__)
def compat_enqueue(name, fn, queue, args=None):
u'''
Enqueue a background job using Celery or RQ.
'''
try:
# Try to use RQ
from ckan.plugins.toolkit import enqueue_job
enqueue_job(fn, args=args, queue=queue)
except ImportError:
# Fallback to Celery
import uuid
from ckan.lib.celery_app import celery
celery.send_task(name, args=args + [queue], task_id=str(uuid.uuid4()))
def create_archiver_resource_task(resource, queue):
from pylons import config
if p.toolkit.check_ckan_version(max_version='2.2.99'):
# earlier CKANs had ResourceGroup
package = resource.resource_group.package
else:
package = resource.package
ckan_ini_filepath = os.path.abspath(config['__file__'])
compat_enqueue('archiver.update_resource', update_resource, queue, [ckan_ini_filepath, resource.id])
log.debug('Archival of resource put into celery queue %s: %s/%s url=%r',
queue, package.name, resource.id, resource.url)
def create_archiver_package_task(package, queue):
from pylons import config
ckan_ini_filepath = os.path.abspath(config['__file__'])
compat_enqueue('archiver.update_package', update_package, queue, [ckan_ini_filepath, package.id])
log.debug('Archival of package put into celery queue %s: %s',
queue, package.name)
def get_extra_from_pkg_dict(pkg_dict, key, default=None):
for extra in pkg_dict.get('extras', []):
if extra['key'] == key:
return extra['value']
return default
|
mit
| 7,860,166,736,346,997,000 | 30.944444 | 104 | 0.663188 | false |
jhermann/rituals
|
src/rituals/util/shell.py
|
1
|
2127
|
# -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
""" Shell command calls.
"""
# Copyright ⓒ 2015 Jürgen Hermann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full LICENSE file and source are available at
# https://github.com/jhermann/rituals
from __future__ import absolute_import, unicode_literals, print_function
import sys
from invoke import run as invoke_run
from invoke import exceptions
from . import notify
def capture(cmd, **kw):
"""Run a command and return its stripped captured output."""
kw = kw.copy()
kw['hide'] = 'out'
if not kw.get('echo', False):
kw['echo'] = False
ignore_failures = kw.pop('ignore_failures', False)
try:
return invoke_run(cmd, **kw).stdout.strip()
except exceptions.Failure as exc:
if not ignore_failures:
notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,))
raise
def run(cmd, **kw):
"""Run a command and flush its output."""
kw = kw.copy()
kw.setdefault('warn', False) # make extra sure errors don't get silenced
report_error = kw.pop('report_error', True)
runner = kw.pop('runner', invoke_run)
try:
return runner(cmd, **kw)
except exceptions.Failure as exc:
sys.stdout.flush()
sys.stderr.flush()
if report_error:
notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,))
raise
finally:
sys.stdout.flush()
sys.stderr.flush()
|
gpl-2.0
| 1,718,451,169,136,169,700 | 31.676923 | 96 | 0.669021 | false |
google-research/google-research
|
task_set/tasks/conv_fc_test.py
|
1
|
1058
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for task_set.tasks.conv_fc."""
from task_set.tasks import conv_fc
from task_set.tasks import family_test_utils
import tensorflow.compat.v1 as tf
class ConvFCTest(family_test_utils.TaskFamilyTestCase):
def __init__(self, *args, **kwargs):
super(ConvFCTest,
self).__init__(conv_fc.sample_conv_fc_family_cfg,
conv_fc.get_conv_fc_family, *args, **kwargs)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
| -8,215,526,058,761,472,000 | 33.129032 | 74 | 0.710775 | false |
Nth-iteration-labs/streamingbandit
|
app/handlers/evalhandlers.py
|
1
|
3985
|
# -* coding: utf-8 -*-
import json
import numpy as np
from handlers.basehandler import BaseHandler, ExceptionHandler
from core.experiment import Experiment
global numpy
global random
class Simulate(BaseHandler):
def get(self, exp_id):
""" Simulate your experiment based on four scripts, which create a closed feedback loop.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
|http://example.com/eval/EXP_ID/simulate?N=1000&log_stats=True |
|&verbose=True&seed=10 |
+--------------------------------------------------------------------+
:requires: A secure cookie, obtained by logging in.
:param int exp_id: Experiment ID as specified in the url.
:param int N: The number of simulation draws.
:param bool log_stats: Flag for logging the results in the database (default is False)
:param bool verbose: Flag for displaying the results in the returning JSON object (default is True)
:param int seed (optional): Set numpy seed.
:returns: A JSON indicating success when verbose flag is False, and a JSON with all the data when verbose flag is True.
:raises 400: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_current_user():
if self.validate_user_experiment(exp_id):
N = int(self.get_argument("N", default = 1000))
log_stats = self.get_argument("log_stats", default = False)
verbose = self.get_argument("verbose", default = True)
seed = self.get_argument("seed", default = None)
if seed is None:
seed = np.random.randint(2**32-1, dtype=np.uint32)
if verbose == "True":
verbose = True
else:
verbose = False
if log_stats == "True":
log_stats = True
else:
log_stats = False
__EXP__ = Experiment(exp_id)
data = {}
with self.temp_seed(int(seed)):
for i in range(N):
# Generate context
context = __EXP__.run_context_code()
# Get action
action = __EXP__.run_action_code(context, {})
# Generate reward
reward = __EXP__.run_get_reward_code(context, action)
# Set reward
__EXP__.run_reward_code(context, action, reward)
# Get theta
theta = __EXP__.get_theta()
# Save stats
data[str(i)] = {'context' : context.copy(), 'action' : action.copy(), 'reward' : reward.copy(), 'theta' : theta.copy()}
context.clear()
action.clear()
reward.clear()
if log_stats == True:
__EXP__.log_simulation_data(data.copy())
data_tmp = data.copy()
data.clear()
if verbose == True:
self.write(json.dumps({'simulate':'success', 'experiment':exp_id, 'data':data_tmp}))
else:
self.write(json.dumps({'simulate':'success', 'experiment':exp_id, 'theta':theta}))
else:
raise ExceptionHandler(reason="Experiment could not be validated.", status_code=401)
else:
raise ExceptionHandler(reason="Could not validate user.", status_code=401)
|
mit
| -3,311,606,737,012,722,000 | 42.791209 | 143 | 0.473275 | false |
RobertKolner/hastebin-client
|
tests/test_haste_utils.py
|
1
|
1819
|
import pytest
import requests
from hastebin_client.utils import create_url, read_data, upload
from unittest import mock
def test_create_url():
assert create_url('key') == 'https://hastebin.com/key'
with pytest.raises(ValueError):
create_url(None)
def test_read_data_stdin():
with mock.patch('sys.stdin.read', mock.Mock(return_value='data')) as mock_read:
result = read_data()
mock_read.assert_called_once_with()
assert result == 'data'
def test_read_data_stdin_abort():
with mock.patch('sys.stdin.read', mock.Mock(side_effect=KeyboardInterrupt)):
result = read_data()
assert result == ''
def test_read_data_file():
with mock.patch('builtins.open', mock.mock_open(read_data='data')) as mock_open:
result = read_data('file')
mock_open.assert_called_once_with('file', 'r')
assert result == 'data'
def test_upload():
with mock.patch('requests.post') as post_function:
post_function.return_value.json.return_value = {'key': 'lonely-ranger'}
result = upload('test data')
assert result == 'lonely-ranger'
def test_upload_timeout():
post_mock = mock.Mock(side_effect=requests.exceptions.Timeout('Request timed out!'))
with mock.patch('requests.post', post_mock):
result = upload('test data')
assert result is None
def test_upload_error():
with mock.patch('requests.post') as post_function:
post_function.return_value.json.side_effect = ValueError('Invalid JSON')
result = upload('test data')
assert result is None
def test_upload_too_big():
with mock.patch('requests.post') as post_function:
post_function.return_value.json.return_value = {'message': 'Document exceeds maximum length.'}
result = upload('test data')
assert result is None
|
mit
| 7,434,796,016,962,102,000 | 29.830508 | 102 | 0.6663 | false |
brettcs/diffoscope
|
diffoscope/comparators/utils/archive.py
|
1
|
3833
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2016 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import abc
import logging
from diffoscope.profiling import profile
from diffoscope.tempfiles import get_temporary_directory
from ..missing_file import MissingFile
from .file import File
from .container import Container
logger = logging.getLogger(__name__)
class Archive(Container, metaclass=abc.ABCMeta):
def __new__(cls, source, *args, **kwargs):
if isinstance(source, MissingFile):
return super(Container, MissingArchive).__new__(MissingArchive)
else:
return super(Container, cls).__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with profile('open_archive', self):
self._archive = self.open_archive()
def __del__(self):
with profile('close_archive', self):
self.close_archive()
@property
def archive(self):
return self._archive
@abc.abstractmethod
def open_archive(self):
raise NotImplementedError()
@abc.abstractmethod
def close_archive(self):
raise NotImplementedError()
@abc.abstractmethod
def get_member_names(self):
raise NotImplementedError()
@abc.abstractmethod
def extract(self, member_name, dest_dir):
raise NotImplementedError()
def get_member(self, member_name):
return ArchiveMember(self, member_name)
class ArchiveMember(File):
def __init__(self, container, member_name):
super().__init__(container=container)
self._name = member_name
self._temp_dir = None
self._path = None
@property
def path(self):
if self._path is None:
logger.debug("Unpacking %s", self._name)
assert self._temp_dir is None
self._temp_dir = get_temporary_directory()
with profile('container_extract', self.container):
self._path = self.container.extract(self._name, self._temp_dir.name)
return self._path
def cleanup(self):
if self._path is not None:
self._path = None
if self._temp_dir is not None:
self._temp_dir.cleanup()
self._temp_dir = None
super().cleanup()
def is_directory(self):
return False
def is_symlink(self):
return False
def is_device(self):
return False
class MissingArchiveLikeObject(object):
def getnames(self):
return []
def list(self, *args, **kwargs):
return ''
def close(self):
pass
class MissingArchive(Archive):
@property
def source(self):
return None
def open_archive(self):
return MissingArchiveLikeObject()
def close_archive(self):
pass
def get_member_names(self):
return []
def extract(self, member_name, dest_dir):
# should never be called
raise NotImplementedError()
def get_member(self, member_name):
return MissingFile('/dev/null')
# Be nice to gzip and the likes
@property
def path(self):
return '/dev/null'
|
gpl-3.0
| 5,791,410,520,121,028,000 | 25.611111 | 84 | 0.63857 | false |
edx/pyrasite
|
pyrasite/inspector.py
|
1
|
1168
|
# This file is part of pyrasite.
#
# pyrasite is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyrasite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyrasite. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011, 2012 Red Hat, Inc., Luke Macken <lmacken@redhat.com>
import subprocess
def inspect(pid, address):
"Return the value of an object in a given process at the specified address"
cmd = ' '.join([
'gdb --quiet -p %s -batch' % pid,
'-eval-command="print (PyObject *)%s"' % address,
])
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in p.communicate()[0].split('\n'):
if line.startswith('$1 = '):
return line[5:]
|
gpl-3.0
| 1,011,668,733,586,748,900 | 39.275862 | 79 | 0.694349 | false |
googleapis/python-asset
|
google/cloud/asset_v1/types/asset_service.py
|
1
|
63408
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.asset_v1.types import assets as gca_assets
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.asset.v1",
manifest={
"ContentType",
"ExportAssetsRequest",
"ExportAssetsResponse",
"ListAssetsRequest",
"ListAssetsResponse",
"BatchGetAssetsHistoryRequest",
"BatchGetAssetsHistoryResponse",
"CreateFeedRequest",
"GetFeedRequest",
"ListFeedsRequest",
"ListFeedsResponse",
"UpdateFeedRequest",
"DeleteFeedRequest",
"OutputConfig",
"OutputResult",
"GcsOutputResult",
"GcsDestination",
"BigQueryDestination",
"PartitionSpec",
"PubsubDestination",
"FeedOutputConfig",
"Feed",
"SearchAllResourcesRequest",
"SearchAllResourcesResponse",
"SearchAllIamPoliciesRequest",
"SearchAllIamPoliciesResponse",
"IamPolicyAnalysisQuery",
"AnalyzeIamPolicyRequest",
"AnalyzeIamPolicyResponse",
"IamPolicyAnalysisOutputConfig",
"AnalyzeIamPolicyLongrunningRequest",
"AnalyzeIamPolicyLongrunningResponse",
},
)
class ContentType(proto.Enum):
r"""Asset content type."""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
ORG_POLICY = 4
ACCESS_POLICY = 5
OS_INVENTORY = 6
class ExportAssetsRequest(proto.Message):
r"""Export asset request.
Attributes:
parent (str):
Required. The relative name of the root
asset. This can only be an organization number
(such as "organizations/123"), a project ID
(such as "projects/my-project-id"), or a project
number (such as "projects/12345"), or a folder
number (such as "folders/123").
read_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp to take an asset snapshot. This can
only be set to a timestamp between the current
time and the current time minus 35 days
(inclusive). If not specified, the current time
will be used. Due to delays in resource data
collection and indexing, there is a volatile
window during which running the same query may
get different results.
asset_types (Sequence[str]):
A list of asset types to take a snapshot for. For example:
"compute.googleapis.com/Disk".
Regular expressions are also supported. For example:
- "compute.googleapis.com.*" snapshots resources whose
asset type starts with "compute.googleapis.com".
- ".*Instance" snapshots resources whose asset type ends
with "Instance".
- ".*Instance.*" snapshots resources whose asset type
contains "Instance".
See `RE2 <https://github.com/google/re2/wiki/Syntax>`__ for
all supported regular expression syntax. If the regular
expression does not match any supported asset type, an
INVALID_ARGUMENT error will be returned.
If specified, only matching assets will be returned,
otherwise, it will snapshot all asset types. See
`Introduction to Cloud Asset
Inventory <https://cloud.google.com/asset-inventory/docs/overview>`__
for all supported asset types.
content_type (google.cloud.asset_v1.types.ContentType):
Asset content type. If not specified, no
content but the asset name will be returned.
output_config (google.cloud.asset_v1.types.OutputConfig):
Required. Output configuration indicating
where the results will be output to.
"""
parent = proto.Field(proto.STRING, number=1,)
read_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
asset_types = proto.RepeatedField(proto.STRING, number=3,)
content_type = proto.Field(proto.ENUM, number=4, enum="ContentType",)
output_config = proto.Field(proto.MESSAGE, number=5, message="OutputConfig",)
class ExportAssetsResponse(proto.Message):
r"""The export asset response. This message is returned by the
[google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation]
method in the returned
[google.longrunning.Operation.response][google.longrunning.Operation.response]
field.
Attributes:
read_time (google.protobuf.timestamp_pb2.Timestamp):
Time the snapshot was taken.
output_config (google.cloud.asset_v1.types.OutputConfig):
Output configuration indicating where the
results were output to.
output_result (google.cloud.asset_v1.types.OutputResult):
Output result indicating where the assets were exported to.
For example, a set of actual Google Cloud Storage object
uris where the assets are exported to. The uris can be
different from what [output_config] has specified, as the
service will split the output object into multiple ones once
it exceeds a single Google Cloud Storage object limit.
"""
read_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
output_config = proto.Field(proto.MESSAGE, number=2, message="OutputConfig",)
output_result = proto.Field(proto.MESSAGE, number=3, message="OutputResult",)
class ListAssetsRequest(proto.Message):
r"""ListAssets request.
Attributes:
parent (str):
Required. Name of the organization or project the assets
belong to. Format: "organizations/[organization-number]"
(such as "organizations/123"), "projects/[project-id]" (such
as "projects/my-project-id"), or "projects/[project-number]"
(such as "projects/12345").
read_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp to take an asset snapshot. This can
only be set to a timestamp between the current
time and the current time minus 35 days
(inclusive). If not specified, the current time
will be used. Due to delays in resource data
collection and indexing, there is a volatile
window during which running the same query may
get different results.
asset_types (Sequence[str]):
A list of asset types to take a snapshot for. For example:
"compute.googleapis.com/Disk".
Regular expression is also supported. For example:
- "compute.googleapis.com.*" snapshots resources whose
asset type starts with "compute.googleapis.com".
- ".*Instance" snapshots resources whose asset type ends
with "Instance".
- ".*Instance.*" snapshots resources whose asset type
contains "Instance".
See `RE2 <https://github.com/google/re2/wiki/Syntax>`__ for
all supported regular expression syntax. If the regular
expression does not match any supported asset type, an
INVALID_ARGUMENT error will be returned.
If specified, only matching assets will be returned,
otherwise, it will snapshot all asset types. See
`Introduction to Cloud Asset
Inventory <https://cloud.google.com/asset-inventory/docs/overview>`__
for all supported asset types.
content_type (google.cloud.asset_v1.types.ContentType):
Asset content type. If not specified, no
content but the asset name will be returned.
page_size (int):
The maximum number of assets to be returned
in a single response. Default is 100, minimum is
1, and maximum is 1000.
page_token (str):
The ``next_page_token`` returned from the previous
``ListAssetsResponse``, or unspecified for the first
``ListAssetsRequest``. It is a continuation of a prior
``ListAssets`` call, and the API should return the next page
of assets.
"""
parent = proto.Field(proto.STRING, number=1,)
read_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
asset_types = proto.RepeatedField(proto.STRING, number=3,)
content_type = proto.Field(proto.ENUM, number=4, enum="ContentType",)
page_size = proto.Field(proto.INT32, number=5,)
page_token = proto.Field(proto.STRING, number=6,)
class ListAssetsResponse(proto.Message):
r"""ListAssets response.
Attributes:
read_time (google.protobuf.timestamp_pb2.Timestamp):
Time the snapshot was taken.
assets (Sequence[google.cloud.asset_v1.types.Asset]):
Assets.
next_page_token (str):
Token to retrieve the next page of results.
It expires 72 hours after the page token for the
first page is generated. Set to empty if there
are no remaining results.
"""
@property
def raw_page(self):
return self
read_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
assets = proto.RepeatedField(proto.MESSAGE, number=2, message=gca_assets.Asset,)
next_page_token = proto.Field(proto.STRING, number=3,)
class BatchGetAssetsHistoryRequest(proto.Message):
r"""Batch get assets history request.
Attributes:
parent (str):
Required. The relative name of the root
asset. It can only be an organization number
(such as "organizations/123"), a project ID
(such as "projects/my-project-id")", or a
project number (such as "projects/12345").
asset_names (Sequence[str]):
A list of the full names of the assets. See:
https://cloud.google.com/asset-inventory/docs/resource-name-format
Example:
``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``.
The request becomes a no-op if the asset name list is empty,
and the max size of the asset name list is 100 in one
request.
content_type (google.cloud.asset_v1.types.ContentType):
Optional. The content type.
read_time_window (google.cloud.asset_v1.types.TimeWindow):
Optional. The time window for the asset history. Both
start_time and end_time are optional and if set, it must be
after the current time minus 35 days. If end_time is not
set, it is default to current timestamp. If start_time is
not set, the snapshot of the assets at end_time will be
returned. The returned results contain all temporal assets
whose time window overlap with read_time_window.
"""
parent = proto.Field(proto.STRING, number=1,)
asset_names = proto.RepeatedField(proto.STRING, number=2,)
content_type = proto.Field(proto.ENUM, number=3, enum="ContentType",)
read_time_window = proto.Field(
proto.MESSAGE, number=4, message=gca_assets.TimeWindow,
)
class BatchGetAssetsHistoryResponse(proto.Message):
r"""Batch get assets history response.
Attributes:
assets (Sequence[google.cloud.asset_v1.types.TemporalAsset]):
A list of assets with valid time windows.
"""
assets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_assets.TemporalAsset,
)
class CreateFeedRequest(proto.Message):
r"""Create asset feed request.
Attributes:
parent (str):
Required. The name of the
project/folder/organization where this feed
should be created in. It can only be an
organization number (such as
"organizations/123"), a folder number (such as
"folders/123"), a project ID (such as
"projects/my-project-id")", or a project number
(such as "projects/12345").
feed_id (str):
Required. This is the client-assigned asset
feed identifier and it needs to be unique under
a specific parent project/folder/organization.
feed (google.cloud.asset_v1.types.Feed):
Required. The feed details. The field ``name`` must be empty
and it will be generated in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
parent = proto.Field(proto.STRING, number=1,)
feed_id = proto.Field(proto.STRING, number=2,)
feed = proto.Field(proto.MESSAGE, number=3, message="Feed",)
class GetFeedRequest(proto.Message):
r"""Get asset feed request.
Attributes:
name (str):
Required. The name of the Feed and it must be in the format
of: projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
name = proto.Field(proto.STRING, number=1,)
class ListFeedsRequest(proto.Message):
r"""List asset feeds request.
Attributes:
parent (str):
Required. The parent
project/folder/organization whose feeds are to
be listed. It can only be using
project/folder/organization number (such as
"folders/12345")", or a project ID (such as
"projects/my-project-id").
"""
parent = proto.Field(proto.STRING, number=1,)
class ListFeedsResponse(proto.Message):
r"""
Attributes:
feeds (Sequence[google.cloud.asset_v1.types.Feed]):
A list of feeds.
"""
feeds = proto.RepeatedField(proto.MESSAGE, number=1, message="Feed",)
class UpdateFeedRequest(proto.Message):
r"""Update asset feed request.
Attributes:
feed (google.cloud.asset_v1.types.Feed):
Required. The new values of feed details. It must match an
existing feed and the field ``name`` must be in the format
of: projects/project_number/feeds/feed_id or
folders/folder_number/feeds/feed_id or
organizations/organization_number/feeds/feed_id.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Only updates the ``feed`` fields indicated by this
mask. The field mask must not be empty, and it must not
contain fields that are immutable or only set by the server.
"""
feed = proto.Field(proto.MESSAGE, number=1, message="Feed",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteFeedRequest(proto.Message):
r"""
Attributes:
name (str):
Required. The name of the feed and it must be in the format
of: projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
name = proto.Field(proto.STRING, number=1,)
class OutputConfig(proto.Message):
r"""Output configuration for export assets destination.
Attributes:
gcs_destination (google.cloud.asset_v1.types.GcsDestination):
Destination on Cloud Storage.
bigquery_destination (google.cloud.asset_v1.types.BigQueryDestination):
Destination on BigQuery. The output table
stores the fields in asset proto as columns in
BigQuery.
"""
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message="GcsDestination",
)
bigquery_destination = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message="BigQueryDestination",
)
class OutputResult(proto.Message):
r"""Output result of export assets.
Attributes:
gcs_result (google.cloud.asset_v1.types.GcsOutputResult):
Export result on Cloud Storage.
"""
gcs_result = proto.Field(
proto.MESSAGE, number=1, oneof="result", message="GcsOutputResult",
)
class GcsOutputResult(proto.Message):
r"""A Cloud Storage output result.
Attributes:
uris (Sequence[str]):
List of uris of the Cloud Storage objects. Example:
"gs://bucket_name/object_name".
"""
uris = proto.RepeatedField(proto.STRING, number=1,)
class GcsDestination(proto.Message):
r"""A Cloud Storage location.
Attributes:
uri (str):
The uri of the Cloud Storage object. It's the same uri that
is used by gsutil. Example: "gs://bucket_name/object_name".
See `Viewing and Editing Object
Metadata <https://cloud.google.com/storage/docs/viewing-editing-metadata>`__
for more information.
If the specified Cloud Storage object already exists and
there is no
`hold <https://cloud.google.com/storage/docs/object-holds>`__,
it will be overwritten with the exported result.
uri_prefix (str):
The uri prefix of all generated Cloud Storage objects.
Example: "gs://bucket_name/object_name_prefix". Each object
uri is in format: "gs://bucket_name/object_name_prefix// and
only contains assets for that type. starts from 0. Example:
"gs://bucket_name/object_name_prefix/compute.googleapis.com/Disk/0"
is the first shard of output objects containing all
compute.googleapis.com/Disk assets. An INVALID_ARGUMENT
error will be returned if file with the same name
"gs://bucket_name/object_name_prefix" already exists.
"""
uri = proto.Field(proto.STRING, number=1, oneof="object_uri",)
uri_prefix = proto.Field(proto.STRING, number=2, oneof="object_uri",)
class BigQueryDestination(proto.Message):
r"""A BigQuery destination for exporting assets to.
Attributes:
dataset (str):
Required. The BigQuery dataset in format
"projects/projectId/datasets/datasetId", to which the
snapshot result should be exported. If this dataset does not
exist, the export call returns an INVALID_ARGUMENT error.
table (str):
Required. The BigQuery table to which the
snapshot result should be written. If this table
does not exist, a new table with the given name
will be created.
force (bool):
If the destination table already exists and this flag is
``TRUE``, the table will be overwritten by the contents of
assets snapshot. If the flag is ``FALSE`` or unset and the
destination table already exists, the export call returns an
INVALID_ARGUMEMT error.
partition_spec (google.cloud.asset_v1.types.PartitionSpec):
[partition_spec] determines whether to export to partitioned
table(s) and how to partition the data.
If [partition_spec] is unset or
[partition_spec.partition_key] is unset or
``PARTITION_KEY_UNSPECIFIED``, the snapshot results will be
exported to non-partitioned table(s). [force] will decide
whether to overwrite existing table(s).
If [partition_spec] is specified. First, the snapshot
results will be written to partitioned table(s) with two
additional timestamp columns, readTime and requestTime, one
of which will be the partition key. Secondly, in the case
when any destination table already exists, it will first try
to update existing table's schema as necessary by appending
additional columns. Then, if [force] is ``TRUE``, the
corresponding partition will be overwritten by the snapshot
results (data in different partitions will remain intact);
if [force] is unset or ``FALSE``, it will append the data.
An error will be returned if the schema update or data
appension fails.
separate_tables_per_asset_type (bool):
If this flag is ``TRUE``, the snapshot results will be
written to one or multiple tables, each of which contains
results of one asset type. The [force] and [partition_spec]
fields will apply to each of them.
Field [table] will be concatenated with "*" and the asset
type names (see
https://cloud.google.com/asset-inventory/docs/supported-asset-types
for supported asset types) to construct per-asset-type table
names, in which all non-alphanumeric characters like "." and
"/" will be substituted by "*". Example: if field [table] is
"mytable" and snapshot results contain
"storage.googleapis.com/Bucket" assets, the corresponding
table name will be "mytable_storage_googleapis_com_Bucket".
If any of these tables does not exist, a new table with the
concatenated name will be created.
When [content_type] in the ExportAssetsRequest is
``RESOURCE``, the schema of each table will include
RECORD-type columns mapped to the nested fields in the
Asset.resource.data field of that asset type (up to the 15
nested level BigQuery supports
(https://cloud.google.com/bigquery/docs/nested-repeated#limitations)).
The fields in >15 nested levels will be stored in JSON
format string as a child column of its parent RECORD column.
If error occurs when exporting to any table, the whole
export call will return an error but the export results that
already succeed will persist. Example: if exporting to
table_type_A succeeds when exporting to table_type_B fails
during one export call, the results in table_type_A will
persist and there will not be partial results persisting in
a table.
"""
dataset = proto.Field(proto.STRING, number=1,)
table = proto.Field(proto.STRING, number=2,)
force = proto.Field(proto.BOOL, number=3,)
partition_spec = proto.Field(proto.MESSAGE, number=4, message="PartitionSpec",)
separate_tables_per_asset_type = proto.Field(proto.BOOL, number=5,)
class PartitionSpec(proto.Message):
r"""Specifications of BigQuery partitioned table as export
destination.
Attributes:
partition_key (google.cloud.asset_v1.types.PartitionSpec.PartitionKey):
The partition key for BigQuery partitioned
table.
"""
class PartitionKey(proto.Enum):
r"""This enum is used to determine the partition key column when
exporting assets to BigQuery partitioned table(s). Note that, if the
partition key is a timestamp column, the actual partition is based
on its date value (expressed in UTC. see details in
https://cloud.google.com/bigquery/docs/partitioned-tables#date_timestamp_partitioned_tables).
"""
PARTITION_KEY_UNSPECIFIED = 0
READ_TIME = 1
REQUEST_TIME = 2
partition_key = proto.Field(proto.ENUM, number=1, enum=PartitionKey,)
class PubsubDestination(proto.Message):
r"""A Pub/Sub destination.
Attributes:
topic (str):
The name of the Pub/Sub topic to publish to. Example:
``projects/PROJECT_ID/topics/TOPIC_ID``.
"""
topic = proto.Field(proto.STRING, number=1,)
class FeedOutputConfig(proto.Message):
r"""Output configuration for asset feed destination.
Attributes:
pubsub_destination (google.cloud.asset_v1.types.PubsubDestination):
Destination on Pub/Sub.
"""
pubsub_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message="PubsubDestination",
)
class Feed(proto.Message):
r"""An asset feed used to export asset updates to a destinations.
An asset feed filter controls what updates are exported. The
asset feed must be created within a project, organization, or
folder. Supported destinations are:
Pub/Sub topics.
Attributes:
name (str):
Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier}
or
folders/{folder_number}/feeds/{client-assigned_feed_identifier}
or
organizations/{organization_number}/feeds/{client-assigned_feed_identifier}
The client-assigned feed identifier must be unique within
the parent project/folder/organization.
asset_names (Sequence[str]):
A list of the full names of the assets to receive updates.
You must specify either or both of asset_names and
asset_types. Only asset updates matching specified
asset_names or asset_types are exported to the feed.
Example:
``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``.
See `Resource
Names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__
for more info.
asset_types (Sequence[str]):
A list of types of the assets to receive updates. You must
specify either or both of asset_names and asset_types. Only
asset updates matching specified asset_names or asset_types
are exported to the feed. Example:
``"compute.googleapis.com/Disk"``
See `this
topic <https://cloud.google.com/asset-inventory/docs/supported-asset-types>`__
for a list of all supported asset types.
content_type (google.cloud.asset_v1.types.ContentType):
Asset content type. If not specified, no
content but the asset name and type will be
returned.
feed_output_config (google.cloud.asset_v1.types.FeedOutputConfig):
Required. Feed output configuration defining
where the asset updates are published to.
condition (google.type.expr_pb2.Expr):
A condition which determines whether an asset update should
be published. If specified, an asset will be returned only
when the expression evaluates to true. When set,
``expression`` field in the ``Expr`` must be a valid [CEL
expression] (https://github.com/google/cel-spec) on a
TemporalAsset with name ``temporal_asset``. Example: a Feed
with expression ("temporal_asset.deleted == true") will only
publish Asset deletions. Other fields of ``Expr`` are
optional.
See our `user
guide <https://cloud.google.com/asset-inventory/docs/monitoring-asset-changes#feed_with_condition>`__
for detailed instructions.
"""
name = proto.Field(proto.STRING, number=1,)
asset_names = proto.RepeatedField(proto.STRING, number=2,)
asset_types = proto.RepeatedField(proto.STRING, number=3,)
content_type = proto.Field(proto.ENUM, number=4, enum="ContentType",)
feed_output_config = proto.Field(
proto.MESSAGE, number=5, message="FeedOutputConfig",
)
condition = proto.Field(proto.MESSAGE, number=6, message=expr_pb2.Expr,)
class SearchAllResourcesRequest(proto.Message):
r"""Search all resources request.
Attributes:
scope (str):
Required. A scope can be a project, a folder, or an
organization. The search is limited to the resources within
the ``scope``. The caller must be granted the
```cloudasset.assets.searchAllResources`` <https://cloud.google.com/asset-inventory/docs/access-control#required_permissions>`__
permission on the desired scope.
The allowed values are:
- projects/{PROJECT_ID} (e.g., "projects/foo-bar")
- projects/{PROJECT_NUMBER} (e.g., "projects/12345678")
- folders/{FOLDER_NUMBER} (e.g., "folders/1234567")
- organizations/{ORGANIZATION_NUMBER} (e.g.,
"organizations/123456")
query (str):
Optional. The query statement. See `how to construct a
query <https://cloud.google.com/asset-inventory/docs/searching-resources#how_to_construct_a_query>`__
for more information. If not specified or empty, it will
search all the resources within the specified ``scope``.
Examples:
- ``name:Important`` to find Cloud resources whose name
contains "Important" as a word.
- ``name=Important`` to find the Cloud resource whose name
is exactly "Important".
- ``displayName:Impor*`` to find Cloud resources whose
display name contains "Impor" as a prefix of any word in
the field.
- ``location:us-west*`` to find Cloud resources whose
location contains both "us" and "west" as prefixes.
- ``labels:prod`` to find Cloud resources whose labels
contain "prod" as a key or value.
- ``labels.env:prod`` to find Cloud resources that have a
label "env" and its value is "prod".
- ``labels.env:*`` to find Cloud resources that have a
label "env".
- ``kmsKey:key`` to find Cloud resources encrypted with a
customer-managed encryption key whose name contains the
word "key".
- ``state:ACTIVE`` to find Cloud resources whose state
contains "ACTIVE" as a word.
- ``NOT state:ACTIVE`` to find {{gcp_name}} resources whose
state doesn't contain "ACTIVE" as a word.
- ``createTime<1609459200`` to find Cloud resources that
were created before "2021-01-01 00:00:00 UTC". 1609459200
is the epoch timestamp of "2021-01-01 00:00:00 UTC" in
seconds.
- ``updateTime>1609459200`` to find Cloud resources that
were updated after "2021-01-01 00:00:00 UTC". 1609459200
is the epoch timestamp of "2021-01-01 00:00:00 UTC" in
seconds.
- ``Important`` to find Cloud resources that contain
"Important" as a word in any of the searchable fields.
- ``Impor*`` to find Cloud resources that contain "Impor"
as a prefix of any word in any of the searchable fields.
- ``Important location:(us-west1 OR global)`` to find Cloud
resources that contain "Important" as a word in any of
the searchable fields and are also located in the
"us-west1" region or the "global" location.
asset_types (Sequence[str]):
Optional. A list of asset types that this request searches
for. If empty, it will search all the `searchable asset
types <https://cloud.google.com/asset-inventory/docs/supported-asset-types#searchable_asset_types>`__.
Regular expressions are also supported. For example:
- "compute.googleapis.com.*" snapshots resources whose
asset type starts with "compute.googleapis.com".
- ".*Instance" snapshots resources whose asset type ends
with "Instance".
- ".*Instance.*" snapshots resources whose asset type
contains "Instance".
See `RE2 <https://github.com/google/re2/wiki/Syntax>`__ for
all supported regular expression syntax. If the regular
expression does not match any supported asset type, an
INVALID_ARGUMENT error will be returned.
page_size (int):
Optional. The page size for search result pagination. Page
size is capped at 500 even if a larger value is given. If
set to zero, server will pick an appropriate default.
Returned results may be fewer than requested. When this
happens, there could be more results as long as
``next_page_token`` is returned.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``page_token`` must be the value of ``next_page_token`` from
the previous response. The values of all other method
parameters, must be identical to those in the previous call.
order_by (str):
Optional. A comma-separated list of fields specifying the
sorting order of the results. The default order is
ascending. Add " DESC" after the field name to indicate
descending order. Redundant space characters are ignored.
Example: "location DESC, name". Only singular primitive
fields in the response are sortable:
- name
- assetType
- project
- displayName
- description
- location
- kmsKey
- createTime
- updateTime
- state
- parentFullResourceName
- parentAssetType All the other fields such as repeated
fields (e.g., ``networkTags``), map fields (e.g.,
``labels``) and struct fields (e.g.,
``additionalAttributes``) are not supported.
"""
scope = proto.Field(proto.STRING, number=1,)
query = proto.Field(proto.STRING, number=2,)
asset_types = proto.RepeatedField(proto.STRING, number=3,)
page_size = proto.Field(proto.INT32, number=4,)
page_token = proto.Field(proto.STRING, number=5,)
order_by = proto.Field(proto.STRING, number=6,)
class SearchAllResourcesResponse(proto.Message):
r"""Search all resources response.
Attributes:
results (Sequence[google.cloud.asset_v1.types.ResourceSearchResult]):
A list of Resources that match the search
query. It contains the resource standard
metadata information.
next_page_token (str):
If there are more results than those appearing in this
response, then ``next_page_token`` is included. To get the
next set of results, call this method again using the value
of ``next_page_token`` as ``page_token``.
"""
@property
def raw_page(self):
return self
results = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_assets.ResourceSearchResult,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class SearchAllIamPoliciesRequest(proto.Message):
r"""Search all IAM policies request.
Attributes:
scope (str):
Required. A scope can be a project, a folder, or an
organization. The search is limited to the IAM policies
within the ``scope``. The caller must be granted the
```cloudasset.assets.searchAllIamPolicies`` <https://cloud.google.com/asset-inventory/docs/access-control#required_permissions>`__
permission on the desired scope.
The allowed values are:
- projects/{PROJECT_ID} (e.g., "projects/foo-bar")
- projects/{PROJECT_NUMBER} (e.g., "projects/12345678")
- folders/{FOLDER_NUMBER} (e.g., "folders/1234567")
- organizations/{ORGANIZATION_NUMBER} (e.g.,
"organizations/123456")
query (str):
Optional. The query statement. See `how to construct a
query <https://cloud.google.com/asset-inventory/docs/searching-iam-policies#how_to_construct_a_query>`__
for more information. If not specified or empty, it will
search all the IAM policies within the specified ``scope``.
Note that the query string is compared against each Cloud
IAM policy binding, including its members, roles, and Cloud
IAM conditions. The returned Cloud IAM policies will only
contain the bindings that match your query. To learn more
about the IAM policy structure, see `IAM policy
doc <https://cloud.google.com/iam/docs/policies#structure>`__.
Examples:
- ``policy:amy@gmail.com`` to find IAM policy bindings that
specify user "amy@gmail.com".
- ``policy:roles/compute.admin`` to find IAM policy
bindings that specify the Compute Admin role.
- ``policy:comp*`` to find IAM policy bindings that contain
"comp" as a prefix of any word in the binding.
- ``policy.role.permissions:storage.buckets.update`` to
find IAM policy bindings that specify a role containing
"storage.buckets.update" permission. Note that if callers
don't have ``iam.roles.get`` access to a role's included
permissions, policy bindings that specify this role will
be dropped from the search results.
- ``policy.role.permissions:upd*`` to find IAM policy
bindings that specify a role containing "upd" as a prefix
of any word in the role permission. Note that if callers
don't have ``iam.roles.get`` access to a role's included
permissions, policy bindings that specify this role will
be dropped from the search results.
- ``resource:organizations/123456`` to find IAM policy
bindings that are set on "organizations/123456".
- ``resource=//cloudresourcemanager.googleapis.com/projects/myproject``
to find IAM policy bindings that are set on the project
named "myproject".
- ``Important`` to find IAM policy bindings that contain
"Important" as a word in any of the searchable fields
(except for the included permissions).
- ``resource:(instance1 OR instance2) policy:amy`` to find
IAM policy bindings that are set on resources "instance1"
or "instance2" and also specify user "amy".
- ``roles:roles/compute.admin`` to find IAM policy bindings
that specify the Compute Admin role.
- ``memberTypes:user`` to find IAM policy bindings that
contain the "user" member type.
page_size (int):
Optional. The page size for search result pagination. Page
size is capped at 500 even if a larger value is given. If
set to zero, server will pick an appropriate default.
Returned results may be fewer than requested. When this
happens, there could be more results as long as
``next_page_token`` is returned.
page_token (str):
Optional. If present, retrieve the next batch of results
from the preceding call to this method. ``page_token`` must
be the value of ``next_page_token`` from the previous
response. The values of all other method parameters must be
identical to those in the previous call.
asset_types (Sequence[str]):
Optional. A list of asset types that the IAM policies are
attached to. If empty, it will search the IAM policies that
are attached to all the `searchable asset
types <https://cloud.google.com/asset-inventory/docs/supported-asset-types#searchable_asset_types>`__.
Regular expressions are also supported. For example:
- "compute.googleapis.com.*" snapshots IAM policies
attached to asset type starts with
"compute.googleapis.com".
- ".*Instance" snapshots IAM policies attached to asset
type ends with "Instance".
- ".*Instance.*" snapshots IAM policies attached to asset
type contains "Instance".
See `RE2 <https://github.com/google/re2/wiki/Syntax>`__ for
all supported regular expression syntax. If the regular
expression does not match any supported asset type, an
INVALID_ARGUMENT error will be returned.
order_by (str):
Optional. A comma-separated list of fields specifying the
sorting order of the results. The default order is
ascending. Add " DESC" after the field name to indicate
descending order. Redundant space characters are ignored.
Example: "assetType DESC, resource". Only singular primitive
fields in the response are sortable:
- resource
- assetType
- project All the other fields such as repeated fields
(e.g., ``folders``) and non-primitive fields (e.g.,
``policy``) are not supported.
"""
scope = proto.Field(proto.STRING, number=1,)
query = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
asset_types = proto.RepeatedField(proto.STRING, number=5,)
order_by = proto.Field(proto.STRING, number=7,)
class SearchAllIamPoliciesResponse(proto.Message):
r"""Search all IAM policies response.
Attributes:
results (Sequence[google.cloud.asset_v1.types.IamPolicySearchResult]):
A list of IamPolicy that match the search
query. Related information such as the
associated resource is returned along with the
policy.
next_page_token (str):
Set if there are more results than those appearing in this
response; to get the next set of results, call this method
again, using this value as the ``page_token``.
"""
@property
def raw_page(self):
return self
results = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_assets.IamPolicySearchResult,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class IamPolicyAnalysisQuery(proto.Message):
r"""## IAM policy analysis query message.
Attributes:
scope (str):
Required. The relative name of the root asset. Only
resources and IAM policies within the scope will be
analyzed.
This can only be an organization number (such as
"organizations/123"), a folder number (such as
"folders/123"), a project ID (such as
"projects/my-project-id"), or a project number (such as
"projects/12345").
To know how to get organization id, visit
`here <https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id>`__.
To know how to get folder or project id, visit
`here <https://cloud.google.com/resource-manager/docs/creating-managing-folders#viewing_or_listing_folders_and_projects>`__.
resource_selector (google.cloud.asset_v1.types.IamPolicyAnalysisQuery.ResourceSelector):
Optional. Specifies a resource for analysis.
identity_selector (google.cloud.asset_v1.types.IamPolicyAnalysisQuery.IdentitySelector):
Optional. Specifies an identity for analysis.
access_selector (google.cloud.asset_v1.types.IamPolicyAnalysisQuery.AccessSelector):
Optional. Specifies roles or permissions for
analysis. This is optional.
options (google.cloud.asset_v1.types.IamPolicyAnalysisQuery.Options):
Optional. The query options.
condition_context (google.cloud.asset_v1.types.IamPolicyAnalysisQuery.ConditionContext):
Optional. The hypothetical context for IAM
conditions evaluation.
"""
class ResourceSelector(proto.Message):
r"""Specifies the resource to analyze for access policies, which
may be set directly on the resource, or on ancestors such as
organizations, folders or projects.
Attributes:
full_resource_name (str):
Required. The [full resource name]
(https://cloud.google.com/asset-inventory/docs/resource-name-format)
of a resource of `supported resource
types <https://cloud.google.com/asset-inventory/docs/supported-asset-types#analyzable_asset_types>`__.
"""
full_resource_name = proto.Field(proto.STRING, number=1,)
class IdentitySelector(proto.Message):
r"""Specifies an identity for which to determine resource access,
based on roles assigned either directly to them or to the groups
they belong to, directly or indirectly.
Attributes:
identity (str):
Required. The identity appear in the form of members in `IAM
policy
binding <https://cloud.google.com/iam/reference/rest/v1/Binding>`__.
The examples of supported forms are:
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com".
Notice that wildcard characters (such as \* and ?) are not
supported. You must give a specific identity.
"""
identity = proto.Field(proto.STRING, number=1,)
class AccessSelector(proto.Message):
r"""Specifies roles and/or permissions to analyze, to determine
both the identities possessing them and the resources they
control. If multiple values are specified, results will include
roles or permissions matching any of them. The total number of
roles and permissions should be equal or less than 10.
Attributes:
roles (Sequence[str]):
Optional. The roles to appear in result.
permissions (Sequence[str]):
Optional. The permissions to appear in
result.
"""
roles = proto.RepeatedField(proto.STRING, number=1,)
permissions = proto.RepeatedField(proto.STRING, number=2,)
class Options(proto.Message):
r"""Contains query options.
Attributes:
expand_groups (bool):
Optional. If true, the identities section of the result will
expand any Google groups appearing in an IAM policy binding.
If
[IamPolicyAnalysisQuery.identity_selector][google.cloud.asset.v1.IamPolicyAnalysisQuery.identity_selector]
is specified, the identity in the result will be determined
by the selector, and this flag is not allowed to set.
Default is false.
expand_roles (bool):
Optional. If true, the access section of result will expand
any roles appearing in IAM policy bindings to include their
permissions.
If
[IamPolicyAnalysisQuery.access_selector][google.cloud.asset.v1.IamPolicyAnalysisQuery.access_selector]
is specified, the access section of the result will be
determined by the selector, and this flag is not allowed to
set.
Default is false.
expand_resources (bool):
Optional. If true and
[IamPolicyAnalysisQuery.resource_selector][google.cloud.asset.v1.IamPolicyAnalysisQuery.resource_selector]
is not specified, the resource section of the result will
expand any resource attached to an IAM policy to include
resources lower in the resource hierarchy.
For example, if the request analyzes for which resources
user A has permission P, and the results include an IAM
policy with P on a GCP folder, the results will also include
resources in that folder with permission P.
If true and
[IamPolicyAnalysisQuery.resource_selector][google.cloud.asset.v1.IamPolicyAnalysisQuery.resource_selector]
is specified, the resource section of the result will expand
the specified resource to include resources lower in the
resource hierarchy. Only project or lower resources are
supported. Folder and organization resource cannot be used
together with this option.
For example, if the request analyzes for which users have
permission P on a GCP project with this option enabled, the
results will include all users who have permission P on that
project or any lower resource.
Default is false.
output_resource_edges (bool):
Optional. If true, the result will output
resource edges, starting from the policy
attached resource, to any expanded resources.
Default is false.
output_group_edges (bool):
Optional. If true, the result will output
group identity edges, starting from the
binding's group members, to any expanded
identities. Default is false.
analyze_service_account_impersonation (bool):
Optional. If true, the response will include access analysis
from identities to resources via service account
impersonation. This is a very expensive operation, because
many derived queries will be executed. We highly recommend
you use
[AssetService.AnalyzeIamPolicyLongrunning][google.cloud.asset.v1.AssetService.AnalyzeIamPolicyLongrunning]
rpc instead.
For example, if the request analyzes for which resources
user A has permission P, and there's an IAM policy states
user A has iam.serviceAccounts.getAccessToken permission to
a service account SA, and there's another IAM policy states
service account SA has permission P to a GCP folder F, then
user A potentially has access to the GCP folder F. And those
advanced analysis results will be included in
[AnalyzeIamPolicyResponse.service_account_impersonation_analysis][google.cloud.asset.v1.AnalyzeIamPolicyResponse.service_account_impersonation_analysis].
Another example, if the request analyzes for who has
permission P to a GCP folder F, and there's an IAM policy
states user A has iam.serviceAccounts.actAs permission to a
service account SA, and there's another IAM policy states
service account SA has permission P to the GCP folder F,
then user A potentially has access to the GCP folder F. And
those advanced analysis results will be included in
[AnalyzeIamPolicyResponse.service_account_impersonation_analysis][google.cloud.asset.v1.AnalyzeIamPolicyResponse.service_account_impersonation_analysis].
Default is false.
"""
expand_groups = proto.Field(proto.BOOL, number=1,)
expand_roles = proto.Field(proto.BOOL, number=2,)
expand_resources = proto.Field(proto.BOOL, number=3,)
output_resource_edges = proto.Field(proto.BOOL, number=4,)
output_group_edges = proto.Field(proto.BOOL, number=5,)
analyze_service_account_impersonation = proto.Field(proto.BOOL, number=6,)
class ConditionContext(proto.Message):
r"""The IAM conditions context.
Attributes:
access_time (google.protobuf.timestamp_pb2.Timestamp):
The hypothetical access timestamp to evaluate IAM
conditions. Note that this value must not be earlier than
the current time; otherwise, an INVALID_ARGUMENT error will
be returned.
"""
access_time = proto.Field(
proto.MESSAGE,
number=1,
oneof="TimeContext",
message=timestamp_pb2.Timestamp,
)
scope = proto.Field(proto.STRING, number=1,)
resource_selector = proto.Field(proto.MESSAGE, number=2, message=ResourceSelector,)
identity_selector = proto.Field(proto.MESSAGE, number=3, message=IdentitySelector,)
access_selector = proto.Field(proto.MESSAGE, number=4, message=AccessSelector,)
options = proto.Field(proto.MESSAGE, number=5, message=Options,)
condition_context = proto.Field(proto.MESSAGE, number=6, message=ConditionContext,)
class AnalyzeIamPolicyRequest(proto.Message):
r"""A request message for
[AssetService.AnalyzeIamPolicy][google.cloud.asset.v1.AssetService.AnalyzeIamPolicy].
Attributes:
analysis_query (google.cloud.asset_v1.types.IamPolicyAnalysisQuery):
Required. The request query.
execution_timeout (google.protobuf.duration_pb2.Duration):
Optional. Amount of time executable has to complete. See
JSON representation of
`Duration <https://developers.google.com/protocol-buffers/docs/proto3#json>`__.
If this field is set with a value less than the RPC
deadline, and the execution of your query hasn't finished in
the specified execution timeout, you will get a response
with partial result. Otherwise, your query's execution will
continue until the RPC deadline. If it's not finished until
then, you will get a DEADLINE_EXCEEDED error.
Default is empty.
"""
analysis_query = proto.Field(
proto.MESSAGE, number=1, message="IamPolicyAnalysisQuery",
)
execution_timeout = proto.Field(
proto.MESSAGE, number=2, message=duration_pb2.Duration,
)
class AnalyzeIamPolicyResponse(proto.Message):
r"""A response message for
[AssetService.AnalyzeIamPolicy][google.cloud.asset.v1.AssetService.AnalyzeIamPolicy].
Attributes:
main_analysis (google.cloud.asset_v1.types.AnalyzeIamPolicyResponse.IamPolicyAnalysis):
The main analysis that matches the original
request.
service_account_impersonation_analysis (Sequence[google.cloud.asset_v1.types.AnalyzeIamPolicyResponse.IamPolicyAnalysis]):
The service account impersonation analysis if
[AnalyzeIamPolicyRequest.analyze_service_account_impersonation][]
is enabled.
fully_explored (bool):
Represents whether all entries in the
[main_analysis][google.cloud.asset.v1.AnalyzeIamPolicyResponse.main_analysis]
and
[service_account_impersonation_analysis][google.cloud.asset.v1.AnalyzeIamPolicyResponse.service_account_impersonation_analysis]
have been fully explored to answer the query in the request.
"""
class IamPolicyAnalysis(proto.Message):
r"""An analysis message to group the query and results.
Attributes:
analysis_query (google.cloud.asset_v1.types.IamPolicyAnalysisQuery):
The analysis query.
analysis_results (Sequence[google.cloud.asset_v1.types.IamPolicyAnalysisResult]):
A list of
[IamPolicyAnalysisResult][google.cloud.asset.v1.IamPolicyAnalysisResult]
that matches the analysis query, or empty if no result is
found.
fully_explored (bool):
Represents whether all entries in the
[analysis_results][google.cloud.asset.v1.AnalyzeIamPolicyResponse.IamPolicyAnalysis.analysis_results]
have been fully explored to answer the query.
non_critical_errors (Sequence[google.cloud.asset_v1.types.IamPolicyAnalysisState]):
A list of non-critical errors happened during
the query handling.
"""
analysis_query = proto.Field(
proto.MESSAGE, number=1, message="IamPolicyAnalysisQuery",
)
analysis_results = proto.RepeatedField(
proto.MESSAGE, number=2, message=gca_assets.IamPolicyAnalysisResult,
)
fully_explored = proto.Field(proto.BOOL, number=3,)
non_critical_errors = proto.RepeatedField(
proto.MESSAGE, number=5, message=gca_assets.IamPolicyAnalysisState,
)
main_analysis = proto.Field(proto.MESSAGE, number=1, message=IamPolicyAnalysis,)
service_account_impersonation_analysis = proto.RepeatedField(
proto.MESSAGE, number=2, message=IamPolicyAnalysis,
)
fully_explored = proto.Field(proto.BOOL, number=3,)
class IamPolicyAnalysisOutputConfig(proto.Message):
r"""Output configuration for export IAM policy analysis
destination.
Attributes:
gcs_destination (google.cloud.asset_v1.types.IamPolicyAnalysisOutputConfig.GcsDestination):
Destination on Cloud Storage.
bigquery_destination (google.cloud.asset_v1.types.IamPolicyAnalysisOutputConfig.BigQueryDestination):
Destination on BigQuery.
"""
class GcsDestination(proto.Message):
r"""A Cloud Storage location.
Attributes:
uri (str):
Required. The uri of the Cloud Storage object. It's the same
uri that is used by gsutil. Example:
"gs://bucket_name/object_name". See `Viewing and Editing
Object
Metadata <https://cloud.google.com/storage/docs/viewing-editing-metadata>`__
for more information.
If the specified Cloud Storage object already exists and
there is no
`hold <https://cloud.google.com/storage/docs/object-holds>`__,
it will be overwritten with the analysis result.
"""
uri = proto.Field(proto.STRING, number=1,)
class BigQueryDestination(proto.Message):
r"""A BigQuery destination.
Attributes:
dataset (str):
Required. The BigQuery dataset in format
"projects/projectId/datasets/datasetId", to which the
analysis results should be exported. If this dataset does
not exist, the export call will return an INVALID_ARGUMENT
error.
table_prefix (str):
Required. The prefix of the BigQuery tables to which the
analysis results will be written. Tables will be created
based on this table_prefix if not exist:
- <table_prefix>_analysis table will contain export
operation's metadata.
- <table_prefix>_analysis_result will contain all the
[IamPolicyAnalysisResult][google.cloud.asset.v1.IamPolicyAnalysisResult].
When [partition_key] is specified, both tables will be
partitioned based on the [partition_key].
partition_key (google.cloud.asset_v1.types.IamPolicyAnalysisOutputConfig.BigQueryDestination.PartitionKey):
The partition key for BigQuery partitioned
table.
write_disposition (str):
Optional. Specifies the action that occurs if the
destination table or partition already exists. The following
values are supported:
- WRITE_TRUNCATE: If the table or partition already exists,
BigQuery overwrites the entire table or all the
partitions data.
- WRITE_APPEND: If the table or partition already exists,
BigQuery appends the data to the table or the latest
partition.
- WRITE_EMPTY: If the table already exists and contains
data, an error is returned.
The default value is WRITE_APPEND. Each action is atomic and
only occurs if BigQuery is able to complete the job
successfully. Details are at
https://cloud.google.com/bigquery/docs/loading-data-local#appending_to_or_overwriting_a_table_using_a_local_file.
"""
class PartitionKey(proto.Enum):
r"""This enum determines the partition key column for the
bigquery tables. Partitioning can improve query performance and
reduce query cost by filtering partitions. Refer to
https://cloud.google.com/bigquery/docs/partitioned-tables for
details.
"""
PARTITION_KEY_UNSPECIFIED = 0
REQUEST_TIME = 1
dataset = proto.Field(proto.STRING, number=1,)
table_prefix = proto.Field(proto.STRING, number=2,)
partition_key = proto.Field(
proto.ENUM,
number=3,
enum="IamPolicyAnalysisOutputConfig.BigQueryDestination.PartitionKey",
)
write_disposition = proto.Field(proto.STRING, number=4,)
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message=GcsDestination,
)
bigquery_destination = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message=BigQueryDestination,
)
class AnalyzeIamPolicyLongrunningRequest(proto.Message):
r"""A request message for
[AssetService.AnalyzeIamPolicyLongrunning][google.cloud.asset.v1.AssetService.AnalyzeIamPolicyLongrunning].
Attributes:
analysis_query (google.cloud.asset_v1.types.IamPolicyAnalysisQuery):
Required. The request query.
output_config (google.cloud.asset_v1.types.IamPolicyAnalysisOutputConfig):
Required. Output configuration indicating
where the results will be output to.
"""
analysis_query = proto.Field(
proto.MESSAGE, number=1, message="IamPolicyAnalysisQuery",
)
output_config = proto.Field(
proto.MESSAGE, number=2, message="IamPolicyAnalysisOutputConfig",
)
class AnalyzeIamPolicyLongrunningResponse(proto.Message):
r"""A response message for
[AssetService.AnalyzeIamPolicyLongrunning][google.cloud.asset.v1.AssetService.AnalyzeIamPolicyLongrunning].
"""
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -8,524,429,891,240,930,000 | 44.617266 | 169 | 0.636623 | false |
Comunitea/CMNT_00098_2017_JIM_addons
|
partner_consolidate/models/res_partner.py
|
1
|
1064
|
# -*- coding: utf-8 -*-
# Copyright 2017 Omar Castiñeira, Comunitea Servicios Tecnológicos S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields
class ResPartner(models.Model):
_inherit = "res.partner"
consolidate = fields.Boolean("Consolidate", company_dependent=True,
help="Allow consolidation when invoicing")
def _find_accounting_partner(self, partner):
''' Find the partner for which the accounting entries will be created '''
if partner.consolidate and partner.parent_id:
return partner.parent_id.commercial_partner_id
else:
if not partner.parent_id:
return partner.commercial_partner_id
else:
if partner.parent_id.consolidate and \
partner.parent_id.parent_id:
return partner.parent_id.parent_id
else:
return partner.commercial_partner_id
return partner.commercial_partner_id
|
agpl-3.0
| 1,795,314,826,486,858,200 | 35.62069 | 81 | 0.613936 | false |
huiyiqun/check_mk
|
tests/pylint/test_pylint_check_plugins.py
|
1
|
1148
|
#!/usr/bin/python
# encoding: utf-8
import os
import sys
from testlib import repo_path, cmc_path
import testlib.pylint_cmk as pylint_cmk
def test_pylint_checks(pylint_test_dir):
f = file(pylint_test_dir + "/cmk-checks.py", "w")
# Fake data structures where checks register (See cmk_base/checks.py)
f.write("""
check_info = {}
check_includes = {}
precompile_params = {}
check_default_levels = {}
factory_settings = {}
check_config_variables = []
snmp_info = {}
snmp_scan_functions = {}
active_check_info = {}
special_agent_info = {}
""")
# add the modules
pylint_cmk.add_file(f, repo_path() + "/cmk_base/check_api.py")
# Now add the checks
for path in pylint_cmk.check_files(repo_path() + "/checks"):
pylint_cmk.add_file(f, path)
f.close()
exit_code = pylint_cmk.run_pylint(pylint_test_dir)
assert exit_code == 0, "PyLint found an error in checks, inventory " \
"or agent bakery plugins"
|
gpl-2.0
| -3,343,809,612,867,096,000 | 29.210526 | 74 | 0.543554 | false |
thinkle/gourmet
|
gourmet/plugins/import_export/mealmaster_plugin/mealmaster_importer.py
|
1
|
22802
|
from gourmet.importers import importer, plaintext_importer
import re, os.path, string, array
from gourmet import convert, check_encodings
from gourmet.gdebug import debug,TimeAction
#from gourmet.gglobals import gt
from gettext import gettext as _
class mmf_constants:
def __init__ (self):
self.committed = False
self.recattrs={'Title':'title',
'Name':'title',
'Categories':'category',
'Category':'category',
'Serves':'servings',
'Servings':'servings',
'Source':'source',
'Recipe by':'source',
'Yield':'yields',
'Preparation Time':'preptime',
}
self.unit_conv = {'ts':'tsp',
'tb':'Tbs',
'sm':'small',
'md':'medium',
'ea':'',
'lg':'large',
'c':'c',
'pn':'pinch',
'ds':'dash',
'T' : 'tbs',
't' : 'tsp',
'pk' : 'package',
'x' : '',
'ea' : '',
't' : 'tsp',
'pt' : 'pt',
'qt' : 'qt',
'oz' : 'oz'
}
self.unit_convr = {}
for k,v in list(self.unit_conv.items()):
self.unit_convr[v]=k
mmf=mmf_constants()
mm_start_pattern=r"^(?i)([m-][m-][m-][m-][m-])-*.*(recipe|meal-?master).*"
class mmf_importer (plaintext_importer.TextImporter):
"""Mealmaster(tm) importer class.
We read in a text file a line at a time and parse
attributes/ingredients/instructions as best we can.
We're following, more or less, the specs laid out here
<http://phprecipebook.sourceforge.net/docs/MM_SPEC.DOC>
The problem with Mealmaster(tm) files is that they rarely conform
to the above spec. So, we are a bit more flexible -- we can
handle one or two columns of ingredients for example. However,
it's hard to handle all cases. Also, mealmaster (as described in
the above spec) allows for essentially a continuous flow of text
with ingredient and text blocks interspersed. Gourmet separates
out the ingredients from the instructions, which means that we
have to change the presentation of mealmaster files when they
intersperse instructions and ingredients.
To allow for this flexibility also means we are less flexible
about both instructions and ingredients: instructions that look
like ingredients or ingredients that look like instructions will
be parsed the wrong way, regardless of their position in the file,
since the spec above does not specify that mealmaster files must
follow the normal pattern.
The result is that anyone importing large numbers of mealmaster
files from various internet sources should expect to tweak files
by hand with some frequency.
"""
committed = False
def __init__ (self,filename='Data/mealmaster.mmf',
prog=None, source=None,threaded=True,
two_col_minimum=38,conv=None):
"""filename is the file to parse (or filename). rd is the recData instance
to start with. prog is a function we tell about our
prog to (we hand it a single arg)."""
testtimer = TimeAction('mealmaster_importer.__init__',10)
debug("mmf_importer start __init__ ",5)
self.source=source
self.header=False
self.instr=""
self.ingrs=[]
self.ing_added=False
self.in_variation=False
self.fn = filename
self.prog = prog
self.unit_length = 2
self.two_col_minimum = two_col_minimum
self.last_line_was = None
plaintext_importer.TextImporter.__init__(self,filename)#prog=prog,
#threaded=threaded,conv=conv)
testtimer.end()
def compile_regexps (self):
testtimer = TimeAction('mealmaster_importer.compile_regexps',10)
debug("start compile_regexps",5)
plaintext_importer.TextImporter.compile_regexps(self)
self.start_matcher = re.compile(mm_start_pattern)
self.end_matcher = re.compile(r"^[M-][M-][M-][M-][M-]\s*$")
self.group_matcher = re.compile(r"^\s*([M-][M-][M-][M-][M-])-*\s*([^-]+)\s*-*|^\s*---\s*([^-]+)\s*---\s*$",re.IGNORECASE)
self.ing_cont_matcher = re.compile(r"^\s*[-;]")
self.ing_opt_matcher = re.compile(r"(.+?)\s*\(?\s*optional\)?\s*$",re.IGNORECASE)
self.ing_or_matcher = re.compile("^[- ]*[Oo][Rr][- ]*$",re.IGNORECASE)
self.variation_matcher = re.compile(r"^\s*(VARIATION|HINT|NOTES?)(:.*)?",re.IGNORECASE)
# a crude ingredient matcher -- we look for two numbers,
# intermingled with spaces followed by a space or more,
# followed by a two digit unit (or spaces)
c = convert.get_converter()
self.ing_num_matcher = re.compile(
r"^\s*%s+\s+([a-z ]{1,2}|%s)\s+.*\w+.*"%(
convert.NUMBER_REGEXP,
'('+'|'.join([x for x in list(c.unit_dict.keys()) if x])+')'
),
re.IGNORECASE)
self.amt_field_matcher = re.compile(r"^(\s*%s\s*)$"%convert.NUMBER_REGEXP)
# we build a regexp to match anything that looks like
# this: ^\s*ATTRIBUTE: Some entry of some kind...$
self.mmf = mmf
attrmatch=r"^\s*("
for k in list(self.mmf.recattrs.keys()):
attrmatch += "%s|"%re.escape(k)
attrmatch=r"%s):\s*(.*)\s*$"%attrmatch[0:-1]
self.attr_matcher = re.compile(attrmatch)
testtimer.end()
def handle_line (self,l):
"""Handle an individual line of a mealmaster file.
We're quite loose at handling mealmaster files. We look at
each line and determine what it is most likely to be:
ingredients and instructions can be intermingled: instructions
will simply be added to the instructions and ingredients to
the ingredient list. This may result in loss of information
(for instructions that specifically follow ingredients) or in
mis-parsing (for instructions that look like ingredients). But
we're following, more or less, the specs laid out here
<http://phprecipebook.sourceforge.net/docs/MM_SPEC.DOC>"""
testtimer =TimeAction('mealmaster_importer.handle_line',10)
debug("start handle_line",10)
#gt.gtk_update()
if self.start_matcher.match(l):
debug("recipe start %s"%l,4)
if 'Windows Gourmet' in l:
self.unit_length = 15
self.new_rec()
self.last_line_was = 'new_rec'
self.in_variation = False
return
if self.end_matcher.match(l):
debug("recipe end %s"%l,4)
self.commit_rec()
self.last_line_was = 'end_rec'
return
groupm = self.group_matcher.match(l)
if groupm:
debug("new group %s"%l,4)
self.handle_group(groupm)
self.last_line_was = 'group'
return
attrm = self.attr_matcher.match(l)
if attrm:
debug('Found attribute in %s'%l,4)
attr,val = attrm.groups()
debug("Writing attribute, %s=%s"%(attr,val),4)
self.rec[self.mmf.recattrs[attr]]=val.strip()
self.last_line_was = 'attr'
return
if not self.instr and self.blank_matcher.match(l):
debug('ignoring blank line before instructions',4)
self.last_line_was = 'blank'
return
if self.variation_matcher.match(l):
debug('in variation',4)
self.in_variation = True
if self.is_ingredient(l) and not self.in_variation:
debug('in ingredient',4)
contm = self.ing_cont_matcher.match(l)
if contm:
# only continuations after ingredients are ingredients
if self.ingrs and self.last_line_was == 'ingr':
debug('continuing %s'%self.ingrs[-1][0],4)
continuation = " %s"%l[contm.end():].strip()
self.ingrs[-1][0] += continuation
self.last_line_was = 'ingr'
else:
self.instr += l
self.last_line_was = 'instr'
else:
self.last_line_was = 'ingr'
self.ingrs.append([l,self.group])
else:
## otherwise, we assume a line of instructions
if self.last_line_was == 'blank': add_blank=True
else: add_blank = False
if self.in_variation:
debug('Adding to instructions: %s'%l,4)
self.last_line_was = 'mod'
add_to = 'mod'
else:
debug('Adding to modifications: %s'%l,4)
self.last_line_was = 'instr'
add_to = 'instr'
if getattr(self,add_to):
if add_blank: setattr(self,add_to,
getattr(self,add_to)+"\n")
setattr(self,add_to,
getattr(self,add_to) + l.strip() + "\n")
else:
setattr(self,add_to,
l.strip() + "\n")
testtimer.end()
def is_ingredient (self, l):
"""Return true if the line looks like an ingredient.
We're going to go with a somewhat hackish approach
here. Once we have the ingredient list, we can determine
columns more appropriately. For now, we'll assume that a
field that starts with at least 5 blanks (the specs suggest 7)
or a field that begins with a numeric value is an ingredient"""
testtimer = TimeAction('mealmaster_importer.is_ingredient',10)
if self.ing_num_matcher.match(l):
testtimer.end()
return True
if len(l) >= 7 and self.blank_matcher.match(l[0:5]):
testtimer.end()
return True
def new_rec (self):
"""Start a new recipe."""
testtimer = TimeAction('mealmaster_importer.new_rec',10)
debug("start new_rec",5)
if self.rec:
# this shouldn't happen if recipes are ended properly
# but we'll be graceful if a recipe starts before another
# has ended...
self.commit_rec()
self.committed=False
self.start_rec()
debug('resetting instructions',5)
self.instr=""
self.mod = ""
self.ingrs=[]
self.header=False
testtimer.end()
def commit_rec (self):
"""Commit our recipe to our database."""
testtimer = TimeAction('mealmaster_importer.commit_rec',10)
if self.committed: return
debug("start _commit_rec",5)
self.instr = self.unwrap_lines(self.instr)
self.mod = self.unwrap_lines(self.mod)
self.rec['instructions']=self.instr
if self.mod:
self.rec['modifications']=self.mod
self.parse_inglist()
if self.source:
self.rec['source']=self.source
importer.Importer.commit_rec(self)
# blank rec
self.committed = True
self.in_variation=False
testtimer.end()
def handle_group (self, groupm):
"""Start a new ingredient group."""
testtimer = TimeAction('mealmaster_importer.handle_group',10)
debug("start handle_group",10)
# the only group of the match will contain
# the name of the group. We'll put it into
# a more sane title case (MealMaster defaults
# to all caps
name = groupm.groups()[1]
if not name:
name = groupm.groups()[2]
if not name:
return
name = name.strip().title()
self.group=name
#if re.match('^[^A-Za-z]*$',self.group): self.group=None #WTF was this for?
testtimer.end()
# a blank line before a group could fool us into thinking
# we were in instructions. If we see a group heading,
# we know that's not the case!
def find_ing_fields (self):
"""Find fields in an ingredient line."""
testtimer = TimeAction('mealmaster_importer.find_ing_fields',10)
all_ings = [i[0] for i in self.ingrs]
fields = find_fields(all_ings)
fields_is_numfield = fields_match(all_ings,fields,self.amt_field_matcher)
#fields = [[r,field_match(all_ings,r,self.amt_field_matcher)] for r in find_fields(all_ings)]
aindex,afield = self.find_amt_field(fields,fields_is_numfield)
if aindex != None:
fields = fields[aindex+1:]
fields_is_numfield = fields_is_numfield[aindex+1:]
ufield = fields and self.find_unit_field(fields,fields_is_numfield)
if ufield:
fields = fields[1:]
fields_is_numfield = fields_is_numfield[1:]
if fields:
ifield = [fields[0][0],None]
else:
ifield = 0,None
retval = [[afield,ufield,ifield]]
sec_col_fields = [x for x in fields if x[0]>self.two_col_minimum]
if sec_col_fields:
ibase = fields.index(sec_col_fields[0])
while sec_col_fields and not fields_is_numfield[ibase]:
ibase += 1
sec_col_fields = sec_col_fields[1:]
# if we might have a 2nd column...
if sec_col_fields and len(sec_col_fields) > 2:
fields_is_numfield = fields_is_numfield[ibase:]
aindex2,afield2 = self.find_amt_field(sec_col_fields,fields_is_numfield)
if aindex2 != None and len(sec_col_fields[aindex2+1:]) >= 1:
# then it's a go! Shift our first ifield
retval[0][2]=[ifield[0],fields[ibase-1][1]]
sec_col_fields = sec_col_fields[aindex2 + 1:]
fields_is_numfield = fields_is_numfield[aindex2+1:]
ufield2 = self.find_unit_field(sec_col_fields,fields_is_numfield)
if ufield2:
sec_col_fields=sec_col_fields[1:]
fields_is_numfield = fields_is_numfield[1:]
ifield2 = sec_col_fields[0][0],None
retval.append([afield2,ufield2,ifield2])
testtimer.end()
return retval
def find_unit_field (self, fields, fields_is_numfield):
testtimer = TimeAction('mealmaster_importer.find_unit_field',10)
if 0 < fields[0][1]-fields[0][0] <= self.unit_length and len(fields)>1:
testtimer.end()
return fields[0]
testtimer.end()
def find_amt_field (self, fields, fields_is_numfield):
"""Return amount field and field index for the last amount field.
In other words, if we the following fields...
0 1 2 3 4 5 6 7
1 1/2 ts green onions chopped in 1/2
...we will return the index for our first two fields [1] and
we will return the field corresponding to the first two fields
(0,5)
"""
afield = None
aindex = None
for i,f in enumerate(fields):
# if our field is a numeric field...
if fields_is_numfield[i]:
if not afield:
afield = f
aindex = i
# if we our contiguous
elif i == aindex + 1:
afield = [afield[0],f[1]] # give it a new end
aindex = i
else:
return aindex,afield
return aindex, afield
def add_item (self, item):
testtimer = TimeAction('mealmaster_importer.add_item',10)
self.ing['item']=item.strip()
# fixing bug 1061363, potatoes; cut and mashed should become just potatoes
# for keying purposes
key_base = self.ing['item'].split(";")[0]
self.ing['ingkey']=self.km.get_key_fast(key_base)
testtimer.end()
def parse_inglist(self):
testtimer = TimeAction('mealmaster_importer.parse_inglis',10)
debug("start parse_inglist",5)
"""We handle our ingredients after the fact."""
ingfields =self.find_ing_fields()
debug("ingredient fields are: %s"%ingfields,10)
for s,g in self.ingrs:
for afield,ufield,ifield in ingfields:
self.group = g
amt,u,i = get_fields(s,(afield,ufield,ifield))
debug("""amt:%(amt)s
u:%(u)s
i:%(i)s"""%locals(),0)
# sanity check...
if not amt.strip() and not u.strip():
if not i: continue
# if we have not amt or unit, let's do the right
# thing if this just looks misaligned -- in other words
# if the "item" column has 2 c. parsley, let's just parse
# the damned thing as 2 c. parsley
parsed = self.rd.parse_ingredient(i,conv=self.conv,get_key=False)
if parsed and parsed.get('amount','') and parsed.get('item',''):
amt = "%s"%parsed['amount']
u = parsed.get('unit','')
i = parsed['item']
debug("""After sanity check
amt:%(amt)s
u:%(u)s
i:%(i)s"""%locals(),0)
if amt.strip() or u.strip() or i.strip():
self.start_ing()
if amt:
self.add_amt(amt)
if u:
self.add_unit(u)
optm=self.ing_opt_matcher.match(i)
if optm:
item=optm.groups()[0]
self.ing['optional']=True
else:
item = i
self.add_item(item)
debug("committing ing: %s"%self.ing,6)
self.commit_ing()
testtimer.end()
def add_unit (self, unit):
testtimer = TimeAction('mealmaster_importer.add_unit',10)
unit = unit.strip()
if unit in self.mmf.unit_conv:
unit = self.mmf.unit_conv[unit]
importer.Importer.add_unit(self,unit)
testtimer.end()
def split_fields (strings, char=" "):
testtimer = TimeAction('mealmaster_importer.split_fields',10)
debug("start split_fields",10)
fields=find_fields(strings,char)
testtimer.end()
def fields_match (strings, fields, matcher):
testtimer = TimeAction('mealmaster_importer.fields_match',10)
"""Return an array of True or False values representing
whether matcher is a match for each of fields in string."""
#retarray = array.array('H',[1]*len(fields))
ret = []
for f in fields:
strs = [s[f[0]:f[1]] for s in strings]
matches = [matcher.match(s) and True or False for s in strs]
if True in matches: ret.append(1)
else: ret.append(0)
return ret
#return array.array('H',[True in [matcher.match(s[f[0]:f[1]]) and 1 or 0 for s in strings] for f in fields])
# cycle through each string broken into our fields
#for ff in [[s[f[0]:f[1]] for f in fields] for s in strings]:
# for i,fld in enumerate(ff):
# if fld and retarray[i] and not matcher.match(fld):
# retarray[i]=False
# if not True in retarray: return retarray
#testtimer.end()
#return retarray
def field_match (strings, tup, matcher):
testtimer = TimeAction('mealmaster_importer.field_match',10)
debug("start field_match",10)
if isinstance(matcher, str):
matcher=re.compile(matcher)
for f in [s[tup[0]:tup[1]] for s in strings]:
#f=s[tup[0]:tup[1]]
if f and not matcher.match(f):
testtimer.end()
return False
testtimer.end()
return True
def get_fields (string, tuples):
testtimer = TimeAction('mealmaster_importer.get_fields',10)
debug("start get_fields",10)
lst = []
for t in tuples:
if t:
lst.append(string[t[0]:t[1]])
else:
lst.append("")
testtimer.end()
return lst
def field_width (tuple):
testtimer = TimeAction('mealmaster_importer.field_width',10)
debug("start field_width",10)
if tuple[1]:
testtimer.end()
return tuple[1]-tuple[0]
else:
testtimer.end()
return None
def find_fields (strings, char=" "):
testtimer = TimeAction('mealmaster_importer.find_fields',10)
cols = find_columns(strings, char)
if not cols: return []
cols.reverse()
fields = []
lens = list(map(len,strings))
lens.sort()
end = lens[-1]
last_col = end
for col in cols:
if col == last_col - 1:
end = col
else:
fields.append([col+1,end])
end = col
last_col = col
if end != 0: fields.append([0,end])
fields.reverse()
testtimer.end()
return fields
def find_columns (strings, char=" "):
testtimer = TimeAction('mealmaster_importer.find_columns',10)
"""Return a list of character indices that match char for each string in strings."""
debug("start find_columns",10)
# we start with the columns in the first string
if not strings:
return None
strings=list(strings)
strings.sort(key=len, reverse=True)
columns = [match.start() for match in re.finditer(re.escape(char),strings[0])]
if len(strings)==1:
return columns
# we eliminate all columns that aren't blank for every string
for s in strings:
for c in columns[0:]: # we'll be modifying columns
if c < len(s) and s[c]!=char:
columns.remove(c)
columns.sort()
testtimer.end()
return columns
if __name__ == '__main__':
import gourmet.recipeManager as recipeManager
import tempfile, sys, profile, os.path
print('Testing MealMaster import')
tmpfile = tempfile.mktemp()
import backends.db
rd = backends.db.RecipeManager(tmpfile)
if not args: args = ['/home/tom/Projects/recipe/Data/200_Recipes.mmf']
for a in args:
profi = os.path.join(tempfile.tempdir,'MMI_PROFILE')
profile.run("mmf_importer(rd,a,prog=lambda *args: sys.stdout.write('|'),threaded=False)",
profi)
import pstats
p = pstats.Stats(profi)
p.strip_dirs().sort_stats('cumulative').print_stats()
|
gpl-2.0
| -7,832,865,645,741,468,000 | 38.724739 | 129 | 0.549776 | false |
analysiscenter/dataset
|
batchflow/models/tf/vgg.py
|
1
|
4561
|
""" Simonyan K., Zisserman A. "`Very Deep Convolutional Networks for Large-Scale Image Recognition
<https://arxiv.org/abs/1409.1556>`_"
"""
import tensorflow as tf
from . import TFModel
from .layers import conv_block
_VGG16_ARCH = [
(2, 0, 64, 1),
(2, 0, 128, 1),
(3, 0, 256, 1),
(3, 0, 512, 1),
(3, 0, 512, 1)
]
_VGG19_ARCH = [
(2, 0, 64, 1),
(2, 0, 128, 1),
(4, 0, 256, 1),
(4, 0, 512, 1),
(4, 0, 512, 1)
]
_VGG7_ARCH = [
(2, 0, 64, 1),
(2, 0, 128, 1),
(2, 1, 256, 1)
]
class VGG(TFModel):
""" Base VGG neural network
**Configuration**
inputs : dict
dict with keys 'images' and 'labels' (see :meth:`~.TFModel._make_inputs`)
body/arch : list of tuple of int
Each list item contains parameters for one network block as a tuple of 4 ints:
- number of convolution layers with 3x3 kernel
- number of convolution layers with 1x1 kernel
- number of filters in each layer
- whether to downscale the image at the end of the block with max_pooling (2x2, stride=2)
body/block : dict
:func:`.conv_block` parameters
"""
@classmethod
def default_config(cls):
""" Define model defaults. See :meth: `~.TFModel.default_config` """
config = TFModel.default_config()
config['common/conv/use_bias'] = False
config['body/block'] = dict(layout='cna', pool_size=2, pool_strides=2)
config['head'] += dict(layout='Vdf', dropout_rate=.2, units=2)
config['loss'] = 'ce'
return config
def build_config(self, names=None):
""" Define model's architecture configuration. See :meth: `~.TFModel.build_config` """
config = super().build_config(names)
if isinstance(config['head/units'], list):
config['head/units'][-1] = self.num_classes('targets')
else:
config['head/units'] = self.num_classes('targets')
return config
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Create base VGG layers
Parameters
----------
inputs : tf.Tensor
input tensor
arch : list of tuples
number of 3x3 conv, number of 1x1 conv, number of filters, whether to downscale
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
arch = kwargs.pop('arch')
if not isinstance(arch, (list, tuple)):
raise TypeError("arch must be list or tuple, but {} was given.".format(type(arch)))
block = kwargs.pop('block')
block = {**block, **kwargs}
x = inputs
with tf.variable_scope(name):
for i, block_cfg in enumerate(arch):
x = cls.block(x, *block_cfg, name='block-%d' % i, **block)
return x
@classmethod
def block(cls, inputs, depth3, depth1, filters, downscale, name='block', **kwargs):
""" A sequence of 3x3 and 1x1 convolutions followed by pooling
Parameters
----------
inputs : tf.Tensor
input tensor
depth3 : int
the number of convolution layers with 3x3 kernel
depth1 : int
the number of convolution layers with 1x1 kernel
filters : int
the number of filters in each convolution layer
downscale : bool
whether to decrease spatial dimension at the end of the block
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/block', **kwargs)
layout = kwargs.pop('layout') * (depth3 + depth1) + 'p' * downscale
kernels = [3] * depth3 + [1] * depth1
with tf.variable_scope(name):
x = conv_block(inputs, layout=layout, filters=filters, kernel_size=kernels,
name='conv', **kwargs)
x = tf.identity(x, name='output')
return x
class VGG16(VGG):
""" VGG16 network """
@classmethod
def default_config(cls):
config = VGG.default_config()
config['body/arch'] = _VGG16_ARCH
return config
class VGG19(VGG):
""" VGG19 network """
@classmethod
def default_config(cls):
config = VGG.default_config()
config['body/arch'] = _VGG19_ARCH
return config
class VGG7(VGG):
""" VGG7 network """
@classmethod
def default_config(cls):
config = VGG.default_config()
config['body/arch'] = _VGG7_ARCH
return config
|
apache-2.0
| 4,739,125,673,084,060,000 | 27.867089 | 98 | 0.560842 | false |
ojii/django-shop
|
tests/testapp/settings.py
|
1
|
4005
|
# Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Christopher Glass', 'tribaal@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h2%uf!luks79rw^4!5%q#v2znc87g_)@^jf1og!04@&&tsf7*9'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
import django
if django.VERSION[0] < 1 or django.VERSION[1] <3:
MIDDLEWARE_CLASSES.append('cbv.middleware.DeferredRenderingMiddleware')
ROOT_URLCONF = 'testapp.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'polymorphic', # We need polymorphic installed for the shop
'shop', # The django SHOP application
'shop.addressmodel',
'project', # the test project application
)
# The shop settings:
SHOP_CART_MODIFIERS= ['shop.cart.modifiers.rebate_modifiers.BulkRebateModifier']
SHOP_SHIPPING_BACKENDS=['shop.shipping.backends.flat_rate.FlatRateShipping']
# Shop module settings
SHOP_SHIPPING_FLAT_RATE = '10' # That's just for the flat rate shipping backend
|
bsd-3-clause
| -8,624,996,170,369,027,000 | 34.758929 | 122 | 0.698127 | false |
cgqyh/pyalgotrade-mod
|
pyalgotrade/tools/quandl.py
|
1
|
5711
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import os
from pyalgotrade import bar
from pyalgotrade.barfeed import quandlfeed
from pyalgotrade.utils import dt
from pyalgotrade.utils import csvutils
import pyalgotrade.logger
# http://www.quandl.com/help/api
def download_csv(sourceCode, tableCode, begin, end, frequency, authToken):
url = "http://www.quandl.com/api/v1/datasets/%s/%s.csv" % (sourceCode, tableCode)
params = {
"trim_start": begin.strftime("%Y-%m-%d"),
"trim_end": end.strftime("%Y-%m-%d"),
"collapse": frequency
}
if authToken is not None:
params["auth_token"] = authToken
return csvutils.download_csv(url, params)
def download_daily_bars(sourceCode, tableCode, year, csvFile, authToken=None):
"""Download daily bars from Quandl for a given year.
:param sourceCode: The dataset's source code.
:type sourceCode: string.
:param tableCode: The dataset's table code.
:type tableCode: string.
:param year: The year.
:type year: int.
:param csvFile: The path to the CSV file to write.
:type csvFile: string.
:param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day.
:type authToken: string.
"""
bars = download_csv(sourceCode, tableCode, datetime.date(year, 1, 1), datetime.date(year, 12, 31), "daily", authToken)
f = open(csvFile, "w")
f.write(bars)
f.close()
def download_weekly_bars(sourceCode, tableCode, year, csvFile, authToken=None):
"""Download weekly bars from Quandl for a given year.
:param sourceCode: The dataset's source code.
:type sourceCode: string.
:param tableCode: The dataset's table code.
:type tableCode: string.
:param year: The year.
:type year: int.
:param csvFile: The path to the CSV file to write.
:type csvFile: string.
:param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day.
:type authToken: string.
"""
begin = dt.get_first_monday(year) - datetime.timedelta(days=1) # Start on a sunday
end = dt.get_last_monday(year) - datetime.timedelta(days=1) # Start on a sunday
bars = download_csv(sourceCode, tableCode, begin, end, "weekly", authToken)
f = open(csvFile, "w")
f.write(bars)
f.close()
def build_feed(sourceCode, tableCodes, fromYear, toYear, storage, frequency=bar.Frequency.DAY, timezone=None, skipErrors=False, noAdjClose=False, authToken=None):
"""Build and load a :class:`pyalgotrade.barfeed.quandlfeed.Feed` using CSV files downloaded from Quandl.
CSV files are downloaded if they haven't been downloaded before.
:param sourceCode: The dataset source code.
:type sourceCode: string.
:param tableCodes: The dataset table codes.
:type tableCodes: list.
:param fromYear: The first year.
:type fromYear: int.
:param toYear: The last year.
:type toYear: int.
:param storage: The path were the files will be loaded from, or downloaded to.
:type storage: string.
:param frequency: The frequency of the bars. Only **pyalgotrade.bar.Frequency.DAY** or **pyalgotrade.bar.Frequency.WEEK**
are supported.
:param timezone: The default timezone to use to localize bars. Check :mod:`pyalgotrade.marketsession`.
:type timezone: A pytz timezone.
:param skipErrors: True to keep on loading/downloading files in case of errors.
:type skipErrors: boolean.
:param noAdjClose: True if the instruments don't have adjusted close values.
:type noAdjClose: boolean.
:param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day.
:type authToken: string.
:rtype: :class:`pyalgotrade.barfeed.quandlfeed.Feed`.
"""
logger = pyalgotrade.logger.getLogger("quandl")
ret = quandlfeed.Feed(frequency, timezone)
if noAdjClose:
ret.setNoAdjClose()
if not os.path.exists(storage):
logger.info("Creating %s directory" % (storage))
os.mkdir(storage)
for year in range(fromYear, toYear+1):
for tableCode in tableCodes:
fileName = os.path.join(storage, "%s-%s-%d-quandl.csv" % (sourceCode, tableCode, year))
if not os.path.exists(fileName):
logger.info("Downloading %s %d to %s" % (tableCode, year, fileName))
try:
if frequency == bar.Frequency.DAY:
download_daily_bars(sourceCode, tableCode, year, fileName, authToken)
elif frequency == bar.Frequency.WEEK:
download_weekly_bars(sourceCode, tableCode, year, fileName, authToken)
else:
raise Exception("Invalid frequency")
except Exception, e:
if skipErrors:
logger.error(str(e))
continue
else:
raise e
ret.addBarsFromCSV(tableCode, fileName)
return ret
|
apache-2.0
| -7,145,280,726,221,592,000 | 38.386207 | 162 | 0.66941 | false |
evancasey/demeter
|
demeter/unsup/common/image_pool.py
|
1
|
1090
|
import tensorflow as tf
import copy
class ImagePool:
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
ret_imgs = []
for i in range(images.shape[0]):
image = tf.expand_dims(images[i], axis=0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
ret_imgs.append(image)
else:
p = tf.random_uniform((1,), 0, 1).numpy()[0]
if p > 0.5:
random_id = tf.random_uniform((1,), 0, self.pool_size - 1).numpy()[0].astype(int)
tmp = copy.copy(self.images[random_id])
self.images[random_id] = image
ret_imgs.append(tmp)
else:
ret_imgs.append(image)
ret_imgs = tf.concat(ret_imgs, 0)
return ret_imgs
|
mit
| 6,230,843,943,123,238,000 | 33.0625 | 101 | 0.480734 | false |
mvaled/sentry
|
src/sentry/south_migrations/0277_auto__add_commitfilechange__add_unique_commitfilechange_commit_filenam.py
|
1
|
92625
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CommitFileChange'
db.create_table(
'sentry_commitfilechange', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'organization_id',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
db_index=True
)
), (
'commit', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Commit']
)
), ('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
)
)
db.send_create_signal('sentry', ['CommitFileChange'])
# Adding unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.create_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Adding field 'Repository.url'
db.add_column(
'sentry_repository',
'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True),
keep_default=False
)
# Adding field 'Repository.provider'
db.add_column(
'sentry_repository',
'provider',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False
)
# Adding field 'Repository.external_id'
db.add_column(
'sentry_repository',
'external_id',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False
)
# Adding field 'Repository.config'
db.add_column(
'sentry_repository',
'config',
self.gf('sentry.db.models.fields.jsonfield.JSONField')(default={}),
keep_default=False
)
# Adding field 'Repository.status'
db.add_column(
'sentry_repository',
'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
default=0, db_index=True
),
keep_default=False
)
# Adding unique constraint on 'Repository', fields ['organization_id',
# 'provider', 'external_id']
db.create_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
def backwards(self, orm):
# Removing unique constraint on 'Repository', fields ['organization_id',
# 'provider', 'external_id']
db.delete_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
# Removing unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.delete_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Deleting model 'CommitFileChange'
db.delete_table('sentry_commitfilechange')
# Deleting field 'Repository.url'
db.delete_column('sentry_repository', 'url')
# Deleting field 'Repository.provider'
db.delete_column('sentry_repository', 'provider')
# Deleting field 'Repository.external_id'
db.delete_column('sentry_repository', 'external_id')
# Deleting field 'Repository.config'
db.delete_column('sentry_repository', 'config')
# Deleting field 'Repository.status'
db.delete_column('sentry_repository', 'status')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 11, 29, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'nWSQmbINKkiwvRzlFaq4iWFfAr22O7g3'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
bsd-3-clause
| -4,519,465,274,450,707,000 | 35.799762 | 97 | 0.40068 | false |
paeschli/scons-builder
|
modules/axtor_check.py
|
1
|
2706
|
from builder.btools import RegisterCustomTest
from builder.btools import AddConfigKey
from builder.bconfig import getAutoconfPrefix
from builder.bconfig import filterOut
from builder.bconfig import Version
def CheckAxtor(ctx, write_config_h=False, add_to_compiler_env=False,
min_version=None, max_version=None):
ctx.Message('Checking for Axtor Backends (OpenCL, GLSL) ... ')
confprefix = getAutoconfPrefix(ctx.env)
key = confprefix +'HAVE_AXTOR'
# if min_version is not None:
# min_version = Version(min_version)
# if max_version is not None:
# max_version = Version(max_version)
# LLVM is required for axtor
if not ctx.env.GetPackage('llvm'):
ctx.Message('LLVM not detected')
if write_config_h:
AddConfigKey(ctx, key, 0)
ctx.Result(0)
return 0
savedVars = ctx.env.RequirePackage('llvm')
axtorCoreLibs = ctx.env.Split("""
axtorMetainfo
axtorWriter
axtorIntrinsics
axtorGenericC
axtorInterface
axtorConsole
axtorPass
axtorParsers
axtorSolvers
axtorCNS
axtorAST
axtorUtil
""")
axtorBackendLibs = ['Axtor_OCL','Axtor_GLSL']
axtorLibs = axtorBackendLibs + axtorCoreLibs
ctx.env.Prepend(LIBS = axtorLibs)
ret, outputStr = ctx.TryRun("""
#include <axtor_ocl/OCLBackend.h>
#include <axtor_glsl/GLSLBackend.h>
int main(int argc, char** argv)
{
axtor::OCLBackend oclBackend;
axtor::GLSLBackend glslBackend;
printf("%d",1);
return 0;
}
""", extension='.cpp')
ctx.env.RestoreVars(savedVars)
if ret:
ctx.env.DeclarePackage('axtor',
vars={'LIBS' : axtorLibs},
dependencies='llvm',
trigger_libs=['Axtor', 'Axtor'],
trigger_frameworks=['Axtor', 'Axtor'])
# if ctx.env.GetPackage('llvm_shared'):
# ctx.env.DeclarePackage('axtor_shared',
# vars={'LIBS' : axtorSharedLibs},
# trigger_libs=['Axtor_shared'],
# trigger_frameworks=['Axtor_shared'])
# define
# if ret:
# ctx.env.DeclarePackage('axtor',
# vars={'LIBS' : axtorLibs,
# 'CPPDEFINES' : key},
# dependencies='llvm',
# trigger_libs=['axtor', 'Axtor'],
# trigger_frameworks=['Axtor'])
ctx.Result(ret)
return ret
RegisterCustomTest('CheckAxtor', CheckAxtor)
|
gpl-2.0
| -8,409,912,608,106,378,000 | 30.103448 | 72 | 0.553954 | false |
sassoftware/mint
|
mint/buildtypes.py
|
1
|
14373
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pyflakes=ignore-file
import sys
from conary.deps import deps
validBuildTypes = {
'BOOTABLE_IMAGE' : 0,
'INSTALLABLE_ISO' : 1,
'STUB_IMAGE' : 2,
'RAW_FS_IMAGE' : 3,
'NETBOOT_IMAGE' : 4,
'TARBALL' : 5,
'LIVE_ISO' : 6,
'RAW_HD_IMAGE' : 7,
'VMWARE_IMAGE' : 8,
'VMWARE_ESX_IMAGE' : 9,
'VIRTUAL_PC_IMAGE' : 10,
'XEN_OVA' : 11,
'VIRTUAL_IRON' : 12,
'PARALLELS' : 13,
'AMI' : 14,
'UPDATE_ISO' : 15,
'APPLIANCE_ISO' : 16,
'IMAGELESS' : 17,
'VMWARE_OVF_IMAGE' : 18,
'WINDOWS_ISO' : 19,
'WINDOWS_WIM' : 20,
'DEFERRED_IMAGE' : 21,
'DOCKER_IMAGE' : 22,
}
TYPES = validBuildTypes.values()
# add all the defined image types directly to the module so that the standard
# approach of "buildtypes.IMAGE_TYPE" will result in the expected enum
sys.modules[__name__].__dict__.update(validBuildTypes)
deprecatedBuildTypes = {
'QEMU_IMAGE' : RAW_HD_IMAGE
}
windowsBuildTypes = set([
WINDOWS_ISO,
WINDOWS_WIM,
])
#
# These are identifying pieces of information that we can extract from the
# flavor of a build, but not necessarily tied to any particular build type.
#
# These can sometimes be used as a buildType, indexes starting at 100.
#
flavorFlags = {
'XEN_DOMU': 100,
'APPLIANCE': 101,
}
FLAG_TYPES = flavorFlags.values()
flavorFlagsFromId = dict((x[1], x[0]) for x in flavorFlags.items())
sys.modules[__name__].__dict__.update(flavorFlags)
flavorFlagFlavors = {
XEN_DOMU: "use: xen, domU",
APPLIANCE: "use: appliance",
}
flavorFlagNames = {
XEN_DOMU: "DomU",
APPLIANCE: "Appliance",
}
#BOOTABLE_IMAGE Should never get stored in the DB and therefore doesn't need a name
# NOTA BENE: Using Latin-1 here is harmful to XML-RPC which expects UTF-8
# Until we figure out the root cause, use "(R)" for registered trademark here.
typeNames = {
NETBOOT_IMAGE: "Netboot Image",
INSTALLABLE_ISO: "Installable CD/DVD",
RAW_FS_IMAGE: "Raw Filesystem Image",
STUB_IMAGE: "Stub Image",
RAW_HD_IMAGE: "Raw Hard Disk Image",
VMWARE_IMAGE: "VMware (R) Virtual Appliance",
VMWARE_ESX_IMAGE: "VMware (R) ESX Server Virtual Appliance",
VMWARE_OVF_IMAGE: "VMware (R) Virtual Appliance OVF",
LIVE_ISO: "Demo CD/DVD (Live CD/DVD)",
TARBALL: "Compressed Tar File",
VIRTUAL_PC_IMAGE: "VHD for Microsoft (R) Hyper-V",
XEN_OVA: "Citrix XenServer (TM) Appliance",
VIRTUAL_IRON: "Virtual Iron Virtual Appliance",
PARALLELS: "Parallels Virtual Appliance",
AMI: "Amazon Machine Image (EC2)",
UPDATE_ISO: "Update CD/DVD",
APPLIANCE_ISO: "Appliance Installable ISO",
DEFERRED_IMAGE: "Layered Image",
WINDOWS_ISO: "Windows Installable ISO",
WINDOWS_WIM: "Windows Imaging Format (WIM)",
IMAGELESS: "Online Update",
DOCKER_IMAGE: "Docker Image",
}
typeNamesShort = {
NETBOOT_IMAGE: "Netboot",
INSTALLABLE_ISO: "Inst CD/DVD",
RAW_FS_IMAGE: "Raw FS",
STUB_IMAGE: "Stub",
RAW_HD_IMAGE: "HDD",
VMWARE_IMAGE: "VMware (R)",
VMWARE_ESX_IMAGE: "VMware (R) ESX",
LIVE_ISO: "Demo CD/DVD",
TARBALL: "Tar",
VIRTUAL_PC_IMAGE: "Microsoft (R) Hyper-V",
XEN_OVA: "Citrix XenServer (TM)",
VIRTUAL_IRON: "Virtual Iron",
PARALLELS: "Parallels",
AMI: "AMI",
UPDATE_ISO: "Update CD/DVD",
APPLIANCE_ISO: "Appliance Inst",
DEFERRED_IMAGE: "Layered",
WINDOWS_ISO: "Windows Inst",
WINDOWS_WIM: "Windows WIM",
IMAGELESS: "Online Update",
VMWARE_OVF_IMAGE: "VMware (R) OVF",
DOCKER_IMAGE: "Docker",
}
# To be used to map image types ids from XML tag names
# used the build definition contained within the
# product definition.
#
# Note: Only supported image types are contained here.
# Thus you will not see XML tags for the following:
# - STUB_IMAGE
# - PARALLELS
#
# Furthermore, we don't support IMAGELESS builds
# in the context of a product definition.
#
xmlTagNameImageTypeMap = {
'amiImage': AMI,
'applianceIsoImage': APPLIANCE_ISO,
'deferredImage': DEFERRED_IMAGE,
'dockerImage': DOCKER_IMAGE,
'installableIsoImage': INSTALLABLE_ISO,
'liveIsoImage': LIVE_ISO,
'netbootImage': NETBOOT_IMAGE,
'rawFsImage': RAW_FS_IMAGE,
'rawHdImage': RAW_HD_IMAGE,
'tarballImage': TARBALL,
'updateIsoImage': UPDATE_ISO,
'vhdImage': VIRTUAL_PC_IMAGE,
'virtualIronImage': VIRTUAL_IRON,
'vmwareImage': VMWARE_IMAGE,
'vmwareEsxImage': VMWARE_ESX_IMAGE,
'vmwareOvfImage': VMWARE_OVF_IMAGE,
'xenOvaImage': XEN_OVA,
'imageless': IMAGELESS,
'windowsIsoImage': WINDOWS_ISO,
'wimImage': WINDOWS_WIM,
}
imageTypeXmlTagNameMap = dict([(v,k) for k,v in xmlTagNameImageTypeMap.iteritems()])
typeNamesMarketing = {
NETBOOT_IMAGE: "Netboot Image",
INSTALLABLE_ISO: "Legacy Installable CD/DVD",
RAW_FS_IMAGE: "Eucalyptus/Mountable Filesystem",
STUB_IMAGE: "Stub Image",
RAW_HD_IMAGE: "OpenStack/KVM/QEMU/Raw Hard Disk",
VMWARE_IMAGE: "VMware(R) Workstation/Fusion / Parallels(R) Virtual Appliance",
VMWARE_ESX_IMAGE: "VMware(R) ESX/VCD / Oracle(R) VirtualBox Virtual Appliance",
VMWARE_OVF_IMAGE: "VMware(R) Virtual Appliance OVF",
LIVE_ISO: "Demo CD/DVD (Live CD/DVD)",
TARBALL: "TAR File",
VIRTUAL_PC_IMAGE: "VHD for Microsoft(R) Hyper-V(R)",
XEN_OVA: "Citrix(R) XenServer(TM) Appliance",
VIRTUAL_IRON: "Virtual Iron Virtual Appliance",
PARALLELS: "Parallels(R) Virtual Appliance",
AMI: "Amazon Machine Image (EC2)",
UPDATE_ISO: "Update CD/DVD",
APPLIANCE_ISO: "Appliance Installable ISO",
DEFERRED_IMAGE: "Layered Image",
WINDOWS_ISO: "Installable CD/DVD (ISO)",
WINDOWS_WIM: "Windows Imaging Format (WIM)",
IMAGELESS: "Online Update",
DOCKER_IMAGE: "Docker Image",
# flavor flags here
XEN_DOMU: "DomU",
APPLIANCE: "Appliance",
}
buildTypeExtra = {
APPLIANCE_ISO: "This image type will not work without using "
"a version of anaconda-templates based on "
"rPath Linux 2.",
IMAGELESS: "Select this image type to mark a group for "
"later publishing to an Update Service."
}
buildTypeIcons = {
VMWARE_IMAGE: dict(
icon="get-vmware-player.png",
href="http://www.vmware.com/download/player/",
text="Download VMware Player"),
RAW_HD_IMAGE: dict(
icon="get-parallels.png",
href="http://www.parallels.com/",
text="Try Parallels Workstation 2.2"),
VIRTUAL_IRON: dict(
icon="get-virtual-iron.png",
href="http://www.virtualiron.com/free",
text="Virtual Iron: Download Now"),
XEN_OVA: dict(
icon="get-xen-express.gif",
href="http://www.citrix.com/xenserver/getexpress",
text="Citrix XenServer Express Edition: Download Now",
),
VIRTUAL_PC_IMAGE: dict(
icon="get-hyper-v.png",
href="http://www.microsoft.com/Hyper-V",
text="Learn more about Microsoft Hyper-V",
),
}
typeFlavorOverride = {
(RAW_HD_IMAGE, XEN_DOMU): dict(
marketingName="Raw Hard Disk Image",
icon=False,
),
}
# sizes are listed in bytes...
discSizes = {
'CD: 650 MB' : '681574400',
'CD: 700 MB' : '734003200',
'DVD: 4.7 GB' : '4700000000',
'DVD: 8.5 GB' : '8500000000',
}
buildDefinitionFlavorTypes = {
'BD_GENERIC_X86' : 0,
'BD_GENERIC_X86_64' : 1,
'BD_DOM0_X86' : 2,
'BD_DOM0_X86_64' : 3,
'BD_DOMU_X86' : 4,
'BD_DOMU_X86_64' : 5,
'BD_VMWARE_X86' : 6,
'BD_VMWARE_X86_64' : 7,
}
sys.modules[__name__].__dict__.update(buildDefinitionFlavorTypes)
buildDefinitionFlavorMap = {
BD_GENERIC_X86 : '!dom0, !domU, !xen, !vmware is: x86',
BD_GENERIC_X86_64 : '!dom0, !domU, !xen, !vmware is: x86_64',
BD_DOM0_X86 : 'dom0, !domU, xen, !vmware is: x86',
BD_DOM0_X86_64 : 'dom0, !domU, xen, !vmware is: x86_64',
BD_DOMU_X86 : '!dom0, domU, xen, !vmware is: x86',
BD_DOMU_X86_64 : '!dom0, domU, xen, !vmware is: x86_64',
BD_VMWARE_X86 : '!dom0, !domU, !xen, vmware is: x86',
BD_VMWARE_X86_64 : '!dom0, !domU, !xen, vmware is: x86_64',
}
def alphabatizeBuildTypes(visibleBuildTypes):
sortedList = sorted([x for x in visibleBuildTypes if x != IMAGELESS],
key = lambda x: typeNames.get(x))
if IMAGELESS in visibleBuildTypes:
sortedList.insert(0, IMAGELESS)
return sortedList
def makeBuildFlavorMap(prd):
baseFlavor = prd.getBaseFlavor() or prd.getPlatformBaseFlavor() or ''
baseFlavor = deps.parseFlavor(baseFlavor)
flavorSets = prd.getFlavorSets()
architectures = prd.getArchitectures()
if prd.platform:
flavorSets += prd.platform.getFlavorSets()
architectures = prd.platform.getArchitectures()
res = {}
for flavorSet in flavorSets:
for architecture in architectures:
flv = deps.parseFlavor(flavorSet.flavor)
arch = deps.parseFlavor(architecture.flavor)
flavor = deps.overrideFlavor(baseFlavor, flv)
flavor = deps.overrideFlavor(flavor, arch)
res[str(flavor)] = \
"%s %s" % (flavorSet.displayName, architecture.displayName)
return res
def makeFlavorMap(prd):
flavorSets = prd.getFlavorSets()
architectures = prd.getArchitectures()
if prd.platform:
flavorSets += prd.platform.getFlavorSets()
architectures += prd.platform.getArchitectures()
return dict([("%s %s" % (x.displayName, y.displayName),
"%s,%s" % (x.name, y.name)) \
for x in flavorSets for y in architectures])
def makeFlavorsForBuild(prd, key):
# compose a flavor map much like above but filter illegal types
flavorSets = prd.getFlavorSets()
architectures = prd.getArchitectures()
buildTemplates = prd.getBuildTemplates()
if prd.platform:
flavorSets += prd.platform.getFlavorSets()
architectures += prd.platform.getArchitectures()
buildTemplates += prd.platform.getBuildTemplates()
containerTemplateRef = imageTypeXmlTagNameMap.get(key)
if not containerTemplateRef:
return makeFlavorMap(prd)
# for arch and flavorSet, if None is encountered, all available types
# are legal
arches = set([x.architectureRef for x in buildTemplates \
if x.containerTemplateRef == containerTemplateRef])
arches = [x for x in architectures if None in arches or x.name in arches]
flavors = set([x.flavorSetRef for x in buildTemplates \
if x.containerTemplateRef == containerTemplateRef])
flavors = [x for x in flavorSets if None in flavors or x.name in flavors]
return dict([("%s %s" % (x.displayName, y.displayName),
"%s,%s" % (x.name, y.name)) \
for x in flavors for y in arches])
# generate mapping of flavors to flavor names
buildDefinitionFlavorToFlavorMapRev = \
dict((x[1], x[0]) for x in buildDefinitionFlavorMap.iteritems())
buildDefinitionFlavorNameMap = {
BD_GENERIC_X86 : 'Generic x86 (32-bit)',
BD_GENERIC_X86_64 : 'Generic x86 (64-bit)',
BD_DOM0_X86 : 'dom0 x86 (32-bit)',
BD_DOM0_X86_64 : 'dom0 x86 (64-bit)',
BD_DOMU_X86 : 'domU x86 (32-bit)',
BD_DOMU_X86_64 : 'domU x86 (64-bit)',
BD_VMWARE_X86 : 'VMware x86 (32-bit)',
BD_VMWARE_X86_64 : 'VMware x86 (64-bit)',
}
# a mapping of build types to supported flavors. If a build type does not
# exist in this map, it is assumed it supports all flavors. The first flavor
# is assumed to be the default.
buildDefinitionSupportedFlavorsMap = {
VMWARE_IMAGE : [BD_VMWARE_X86, BD_VMWARE_X86_64],
VMWARE_ESX_IMAGE : [BD_VMWARE_X86, BD_VMWARE_X86_64],
XEN_OVA : [BD_DOMU_X86, BD_DOMU_X86_64],
AMI : [BD_DOMU_X86, BD_DOMU_X86_64],
}
# code generator run by make to generate javascript constants
# should only be run by the makefile in mint/web/content/javascript
def codegen():
s = "// this Javascript was generated by mint/buildtypes.py\n"
s += "// do not edit or check into source control\n"
s += "var maxBuildType = %d;" % max(validBuildTypes.values())
s += "var buildTypeNames = {"
i = []
for k, v in typeNames.items():
i.append(" '%d': '%s'" % (k, v,))
s += ", ".join(i)
s += "};"
s += "var buildTypeNamesShort = {"
i = []
for k, v in typeNamesShort.items():
i.append(" '%d': '%s'" % (k, v,))
s += ", ".join(i)
s += "};"
s += "var buildTypeNamesMarketing = {"
i = []
for k, v in typeNamesMarketing.items():
i.append(" '%d': '%s'" % (k, v,))
s += ", ".join(i)
s += "};"
for k, v in validBuildTypes.items():
s += "%s = %d;\n" % (k, v)
return s
if __name__ == "__main__": #pragma: no cover
if len(sys.argv) > 1 and sys.argv[1] == "--genjs":
print codegen()
sys.exit(0)
else:
sys.exit(1)
|
apache-2.0
| -8,518,906,341,440,424,000 | 33.970803 | 88 | 0.598066 | false |
KIOS-Research/effinet-smart-water-game
|
test.py
|
1
|
6635
|
# -*- coding: cp1253 -*-
from tkinter import *
from time import sleep
def create(w, x1, y1):
w.place(x=x1, y=y1)
def erase(w):
w.destroy()
def reset(w):
w.destroy()
start()
def exit(w):
w.destroy()
def e_q1(root, counter, step):
TL = Toplevel()
w, h = TL.winfo_screenwidth(), TL.winfo_screenheight()
TL.overrideredirect(1)
TL.geometry("%dx%d+0+0" % (w, h))
a01 = 0
a02 = 0
a03 = 0
if step == 1:
question = "Question 1: How much of Earth's water is salty and undrinkable?"
a1 = "37%"
a2 = "97%"
a3 = "67%"
backfile = "1.gif" # effinet
solution = "1a.gif"
a02 = 1
elif step == 2:
question = "Question 2: How much water do Europeans use per day on average?"
a1 = "50 Liters"
a2 = "150 Liters"
a3 = "10 Liters"
solution = ""
backfile = "2.gif" # William Newman
a02 = 1
elif step == 3:
question = "Question 3: Which substance do water companies use to kill bacteria in water?"
a1 = "Soap"
a2 = "Citric Acid"
a3 = "Chlorine"
solution = ""
backfile = "3.gif" # Jacob Vanderheyden
a03 = 1
elif step == 4:
question = "Question 4: How much water is lost due to leakages in Cyprus?"
a1 = "Around 20%"
a2 = "Around 50%"
a3 = "Around 12%"
solution = ""
backfile = "4.gif" # Pete
a01 = 1
elif step == 5:
question = "Question 5: What is the energy cost to deliver water to consumers in Barcelona, Spain?"
a1 = "7 Million Euros"
a2 = "700,000 Euros"
a3 = "70 Million Euros"
solution = ""
backfile = "5.gif" #
a01 = 1
elif step == 6:
question = "Question 6: How water utilities detect leakages?"
a1 = "Using many sensors"
a2 = "Monitoring night flow increase"
a3 = "Consumer complaints"
solution = ""
backfile = "6.gif" #
a02 = 1
elif step == 7:
question = "Question 7: A water tank is equivalent to:"
a1 = "A battery"
a2 = "A lamp"
a3 = "A switch"
backfile = "7.gif" #
solution = ""
a01 = 1
elif step == 8:
question = "Question 8: The most energy consumption in a water network goes for"
a1 = "Disinfection System"
a2 = "ICT Functions"
a3 = "Pump operations"
solution = ""
backfile = "8.gif" #
a03 = 1
elif step == 9:
question = "Question 9: How can we reduce energy usage in water networks?"
a1 = "Use pumps during off-peak hours"
a2 = "Use ground water"
a3 = "Increase water prices"
solution = ""
backfile = "9.gif" #
a01 = 1
elif step == 10:
question = "Question 10: In the future, water utilities will"
a1 = "Communicate information to the consumers"
a2 = "Get information directly from the consumers"
a3 = "Both of the above"
solution = ""
backfile = "10.gif" #
a03 = 1
photo = PhotoImage(file=backfile)
wback = Label(TL, image=photo)
wback.photo = photo
wback.place(x=-5, y=-5)
photo = PhotoImage(file="logo2.gif")
wlogo = Label(TL, image=photo)
wlogo.photo = photo
wlogo.place(x=1050, y=100)
l = Label(TL, text=question, font="Verdana 20", bg="Plum", pady=10)
l.pack(side=TOP)
b2 = Button(TL, text=a1, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White",
command=lambda: e_correct1(root, TL, a01, counter, step,solution))
b2.pack()
b2.place(x=500, y=250)
b3 = Button(TL, text=a2, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White",
command=lambda: e_correct1(root, TL, a02, counter, step,solution))
b3.pack()
b3.place(x=500, y=340)
b2 = Button(TL, text=a3, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White",
command=lambda: e_correct1(root, TL, a03, counter, step, solution))
b2.pack()
b2.place(x=500, y=430)
# ex = Button(window2, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White",
# command=lambda: exit2(window1))
#ex.pack()
#ex.place(x=1168, y=725)
ex1 = Button(TL, text="RESET", bd=1, width=8, font="Verdana 10 bold", bg="red", fg="White",
command=lambda: TL.destroy())
ex1.pack()
ex1.place(x=1048, y=725)
def e_correct1(root, TL, a, counter, step, solution):
#t = Text(TL, text=solution, font="Verdana 20", bg="Plum")
#t.place(100,20)
#l = Label(TL, text=solution, font="Verdana 20", bg="Plum", pady=10)
#l.pack(side=BOTTOM)
photo = PhotoImage(file=solution)
wsol = Label(TL, image=photo)
wsol.photo = photo
wsol.place(x=100, y=100)
if a == 1:
counter += 1
photo = PhotoImage(file="cr.gif")
w = Label(TL, image=photo)
w.photo = photo
w.place(x=570, y=60)
else:
photo = PhotoImage(file="wr.gif")
w = Label(TL, image=photo)
w.photo = photo
w.place(x=570, y=60)
if step < 10:
TL.update()
sleep(3)
e_q1(root, counter, step + 1)
TL.destroy()
else:
sleep(0.5)
backfile = '0.gif'
photo = PhotoImage(file=backfile)
w = Label(TL, image=photo)
w.photo = photo
w.place(x=-5, y=-5)
ex = Button(TL, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White",
command=lambda: root.destroy())
ex.pack()
ex.place(x=1168, y=725)
# t= lambda: reset(w)
#window2.after(1500, t)
def start():
root = Tk()
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (w, h))
photo = PhotoImage(file="0.gif")
w = Label(root, image=photo)
w.photo = photo
w.place(x=-5, y=-5)
photo = PhotoImage(file="logo2.gif")
w = Label(root, image=photo)
w.photo = photo
w.place(x=1050, y=100)
counter = 0
step = 1
b2 = Button(root, text='Begin Smart Water Challenge!', bd=10, height=1, font="Verdana 14 bold", bg="Black",
fg="White", command=lambda: e_q1(root, counter, step), compound=CENTER)
b2.pack()
b2.place(x=500, y=350)
ex = Button(root, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White",
command=lambda: root.destroy())
ex.pack()
ex.place(x=1168, y=725)
root.mainloop()
start()
|
bsd-2-clause
| 6,492,672,037,668,850,000 | 30.009346 | 111 | 0.547099 | false |
bbengfort/inigo
|
inigo/image.py
|
1
|
7931
|
# inigo.image
# Handles data dealing with images, particularly EXIF for JPEG
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sun Jun 14 22:32:17 2015 -0400
#
# Copyright (C) 2015 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: image.py [] benjamin@bengfort.com $
"""
Handles data dealing with images, particularly EXIF for JPEG
"""
##########################################################################
## Imports
##########################################################################
from inigo.fs import FileMeta
from PIL import Image, ExifTags
from datetime import datetime
from dateutil.tz import tzutc
from inigo.config import settings
from inigo.utils.timez import epochptime
from inigo.utils.decorators import memoized
from inigo.exceptions import PictureNotFound
from inigo.models import STYPE, create_session
from inigo.models import Picture, Storage
from inigo.utils.timez import tzaware_now
from sqlalchemy.sql import exists
from geopy.geocoders import GoogleV3
##########################################################################
## Module Constants
##########################################################################
EXIF_DATE_FORMAT = "%Y:%m:%d %H:%M:%S"
##########################################################################
## Helper functions
##########################################################################
def convert_to_degrees(value):
"""
Helper function to convert GPS coordinates stored in EXIF degrees to a
decimal float format, though this function does not take into account
N/S or E/W cardinality of the degree vector.
"""
deg = float(value[0][0]) / float(value[0][1])
mns = float(value[1][0]) / float(value[1][1])
sec = float(value[2][0]) / float(value[2][1])
return deg + (mns / 60.0) + (sec / 3600.0)
##########################################################################
## Image Node
##########################################################################
class ImageMeta(FileMeta):
"""
Wraps a path and provides image meta data.
"""
@property
def exif(self):
"""
Uses Pillow to extract the EXIF data
"""
if not hasattr(self, '_exif'):
self.read_image_data()
return self._exif
@property
def dimensions(self):
"""
Returns a tuple of the width and height of the image.
"""
if not hasattr(self, '_dimensions'):
self.read_image_data()
return self._dimensions
@memoized
def date_taken(self):
"""
Attempts to find the date taken. Returns any timestamp, even if it is
just the date created on the file meta. Current logic for the method:
1. Attempt to parse DateTimeOriginal from EXIF
2. Return st_ctime from os.stat
"""
dtorig = self.exif.get('DateTimeOriginal', None)
if dtorig:
return datetime.strptime(dtorig, EXIF_DATE_FORMAT).replace(tzinfo=tzutc())
return epochptime(self.stat().st_ctime)
@memoized
def coordinates(self):
"""
Returns the latitude and longitude as a tuple.
"""
lat = lon = None
# Decode the GPSInfo tags
if "GPSInfo" in self.exif:
self.exif["GPSInfo"] = {
ExifTags.GPSTAGS[k]: v
for k,v in self.exif["GPSInfo"].iteritems()
if k in ExifTags.GPSTAGS
}
# Gather GPS data points
gps_info = self.exif["GPSInfo"]
gps_lat = gps_info.get("GPSLatitude", None)
gps_lon = gps_info.get("GPSLongitude", None)
gps_lat_ref = gps_info.get("GPSLatitudeRef", None)
gps_lon_ref = gps_info.get("GPSLongitudeRef", None)
# Perform GPS conversions
if gps_lat and gps_lon and gps_lat_ref and gps_lon_ref:
lat = convert_to_degrees(gps_lat)
if gps_lat_ref != "N":
lat = 0 - lat
lon = convert_to_degrees(gps_lon)
if gps_lon_ref != "E":
lon = 0 - lon
return (lat, lon)
@memoized
def address(self):
"""
Reverses the address from the coordinates
"""
if not self.coordinates:
return
geocoder = GoogleV3(api_key=settings.geocode.apikey)
query = "{},{}".format(*self.coordinates)
result = geocoder.reverse(query, exactly_one=True, sensor=False)
if result:
return result.address
def read_image_data(self):
"""
Reads the image data and returns specific information.
"""
with Image.open(self.path) as img:
# Read size data
self._dimensions = img.size
# Read EXIF data
exifdata = img._getexif() if hasattr(img, "_getexif") else {}
self._exif = {
ExifTags.TAGS[k]: v
for k,v in exifdata.iteritems()
if k in ExifTags.TAGS
} if exifdata else {}
def save(self, session=None, commit=False):
"""
Stores the image information in the database along with the current
file path. Pass a session object in to use the same session for
multiple saves.
This method returns the session object. Will commit if required.
"""
session = session or create_session()
if not session.query(exists().where(
Picture.signature == self.signature
)).scalar():
session.add(Picture(
signature = self.signature,
date_taken = self.date_taken,
latitude = self.coordinates[0] if self.coordinates else None,
longitude = self.coordinates[1] if self.coordinates else None,
width = self.dimensions[0],
height = self.dimensions[1],
mimetype = unicode(self.mimetype),
bytes = self.filesize,
))
if commit:
session.commit()
return session
def save_storage(self, session=None, commit=False, **skwargs):
"""
Saves the storage associated with this image and file meta.
"""
session = session or create_session()
# Fetch the picture from the database
picture = session.query(Picture)
picture = picture.filter(Picture.signature == self.signature).first()
if not picture:
raise PictureNotFound(
"Must save the picture before assigning storages."
)
# Create the storage object
sdata = {
"stype": STYPE.ORIGINAL,
"hostname": unicode(self.hostname),
"filepath": unicode(self.path),
"memo": None,
"picture": picture,
"modified": tzaware_now(),
}
sdata.update(skwargs)
# Attempt to fetch the storage on the dependent keys
storage = session.query(Storage)
storage = storage.filter(Storage.stype == sdata['stype'])
storage = storage.filter(Storage.hostname == sdata['hostname'])
storage = storage.filter(Storage.filepath == sdata['filepath'])
storage = storage.filter(Storage.picture == sdata['picture'])
storage = storage.first() or Storage()
# Set the new values on the storage object
for key, val in sdata.iteritems():
setattr(storage, key, val)
session.add(storage)
if commit:
session.commit()
return session
if __name__ == '__main__':
import os
from inigo.config import PROJECT
img = ImageMeta(os.path.join(PROJECT, "fixtures/animals/land/cats/cat.jpg"))
print img.date_taken
print img.dimensions
|
mit
| 2,563,230,144,697,206,000 | 30.724 | 86 | 0.536628 | false |
macarthur-lab/xbrowse
|
seqr/views/apis/locus_list_api_tests.py
|
1
|
8486
|
import json
import mock
from django.test import TransactionTestCase
from django.urls.base import reverse
from seqr.models import LocusList, Project
from seqr.views.apis.locus_list_api import locus_lists, locus_list_info, create_locus_list_handler, \
update_locus_list_handler, delete_locus_list_handler, add_project_locus_lists, delete_project_locus_lists
from seqr.views.utils.orm_to_json_utils import get_project_locus_list_models
from seqr.views.utils.test_utils import _check_login
LOCUS_LIST_GUID = 'LL00049_pid_genes_autosomal_do'
PROJECT_GUID = 'R0001_1kg'
class LocusListAPITest(TransactionTestCase):
fixtures = ['users', '1kg_project', 'reference_data']
def test_locus_lists(self):
url = reverse(locus_lists)
_check_login(self, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
locus_lists_dict = response.json()['locusListsByGuid']
self.assertSetEqual(set(locus_lists_dict.keys()), {'LL00049_pid_genes_autosomal_do', 'LL00005_retina_proteome'})
locus_list = locus_lists_dict[LOCUS_LIST_GUID]
self.assertSetEqual(
set(locus_list.keys()),
{'locusListGuid', 'description', 'lastModifiedDate', 'numEntries', 'isPublic', 'createdBy', 'createdDate',
'canEdit', 'name'}
)
def test_locus_list_info(self):
url = reverse(locus_list_info, args=[LOCUS_LIST_GUID])
_check_login(self, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response_json = response.json()
locus_lists_dict = response_json['locusListsByGuid']
self.assertListEqual(locus_lists_dict.keys(), [LOCUS_LIST_GUID])
locus_list = locus_lists_dict[LOCUS_LIST_GUID]
self.assertSetEqual(
set(locus_list.keys()),
{'locusListGuid', 'description', 'lastModifiedDate', 'numEntries', 'isPublic', 'createdBy', 'createdDate',
'canEdit', 'name', 'items', 'intervalGenomeVersion'}
)
self.assertSetEqual(
{item['geneId'] for item in locus_list['items'] if item.get('geneId')},
set(response_json['genesById'].keys())
)
def test_create_update_and_delete_locus_list(self):
create_locus_list_url = reverse(create_locus_list_handler)
_check_login(self, create_locus_list_url)
# send invalid requests to create locus_list
response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({}))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.reason_phrase, '"Name" is required')
response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({
'name': 'new_locus_list', 'isPublic': True, 'rawItems': 'DDX11L1, foo 10:10-1 chr100:1-10 \n2:1234-5678',
}))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.reason_phrase, 'This list contains invalid genes/ intervals. Update them, or select the "Ignore invalid genes and intervals" checkbox to ignore.')
self.assertListEqual(response.json()['invalidLocusListItems'], ['chr10:10-1', 'chr100:1-10', 'foo'])
# send valid request to create locus_list
response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({
'name': 'new_locus_list', 'isPublic': True, 'ignoreInvalidItems': True,
'rawItems': 'DDX11L1, foo chr100:1-1 \nchr2:1234-5678',
}))
self.assertEqual(response.status_code, 200)
new_locus_list_response = response.json()
self.assertEqual(len(new_locus_list_response['locusListsByGuid']), 1)
new_locus_list = new_locus_list_response['locusListsByGuid'].values()[0]
self.assertEqual(new_locus_list['name'], 'new_locus_list')
self.assertEqual(new_locus_list['isPublic'], True)
self.assertSetEqual(
{item['geneId'] for item in new_locus_list['items'] if item.get('geneId')},
set(new_locus_list_response['genesById'].keys())
)
self.assertListEqual(
new_locus_list['items'],
[
{'geneId': 'ENSG00000223972'},
{'chrom': '2', 'start': 1234, 'end': 5678, 'genomeVersion': '37', 'locusListIntervalGuid': mock.ANY}
]
)
guid = new_locus_list['locusListGuid']
gene_id = new_locus_list['items'][0]['geneId']
new_locus_list_model = LocusList.objects.filter(guid=guid).first()
self.assertIsNotNone(new_locus_list_model)
self.assertEqual(new_locus_list_model.name, new_locus_list['name'])
self.assertEqual(new_locus_list_model.is_public, new_locus_list['isPublic'])
self.assertEqual(new_locus_list_model.locuslistgene_set.count(), 1)
self.assertEqual(new_locus_list_model.locuslistgene_set.first().gene_id, gene_id)
self.assertEqual(new_locus_list_model.locuslistinterval_set.count(), 1)
new_interval = new_locus_list_model.locuslistinterval_set.first()
self.assertEqual(new_interval.chrom, '2')
self.assertEqual(new_interval.start, 1234)
# update the locus_list
update_locus_list_url = reverse(update_locus_list_handler, args=[guid])
response = self.client.post(update_locus_list_url, content_type='application/json', data=json.dumps(
{'name': 'updated_locus_list', 'isPublic': False, 'rawItems': 'DDX11L1 FAM138A'}))
self.assertEqual(response.status_code, 200)
updated_locus_list_response = response.json()
self.assertEqual(len(updated_locus_list_response['locusListsByGuid']), 1)
updated_locus_list = updated_locus_list_response['locusListsByGuid'].values()[0]
self.assertEqual(updated_locus_list['name'], 'updated_locus_list')
self.assertEqual(updated_locus_list['isPublic'], False)
self.assertEqual(len(updated_locus_list_response['genesById']), 2)
self.assertTrue(gene_id in updated_locus_list_response['genesById'])
new_gene_id = next(gid for gid in updated_locus_list_response['genesById'] if gid != gene_id)
self.assertSetEqual({item['geneId'] for item in updated_locus_list['items']}, {new_gene_id, gene_id})
updated_locus_list_model = LocusList.objects.filter(guid=guid).first()
self.assertIsNotNone(updated_locus_list_model)
self.assertEqual(updated_locus_list_model.name, updated_locus_list['name'])
self.assertEqual(updated_locus_list_model.is_public, updated_locus_list['isPublic'])
self.assertEqual(updated_locus_list_model.locuslistgene_set.count(), 2)
self.assertEqual(updated_locus_list_model.locuslistgene_set.last().gene_id, new_gene_id)
self.assertEqual(updated_locus_list_model.locuslistinterval_set.count(), 0)
# delete the locus_list
delete_locus_list_url = reverse(delete_locus_list_handler, args=[guid])
response = self.client.post(delete_locus_list_url, content_type='application/json')
self.assertEqual(response.status_code, 200)
# check that locus_list was deleted
new_locus_list = LocusList.objects.filter(guid=guid)
self.assertEqual(len(new_locus_list), 0)
def test_add_and_remove_project_locus_lists(self):
project = Project.objects.get(guid=PROJECT_GUID)
self.assertListEqual(list(get_project_locus_list_models(project)), [])
# add a locus list
url = reverse(add_project_locus_lists, args=[PROJECT_GUID])
_check_login(self, url)
response = self.client.post(url, content_type='application/json', data=json.dumps({'locusListGuids': [LOCUS_LIST_GUID]}))
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.json()['locusListGuids'], [LOCUS_LIST_GUID])
self.assertListEqual(list(get_project_locus_list_models(project)), [LocusList.objects.get(guid=LOCUS_LIST_GUID)])
# remove a locus list
url = reverse(delete_project_locus_lists, args=[PROJECT_GUID])
response = self.client.post(url, content_type='application/json', data=json.dumps({'locusListGuids': [LOCUS_LIST_GUID]}))
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.json()['locusListGuids'], [])
self.assertListEqual(list(get_project_locus_list_models(project)), [])
|
agpl-3.0
| 3,673,026,669,628,493,000 | 49.511905 | 180 | 0.665213 | false |
bjtox/ec2ssh-manager
|
ec2ssh/ec2ssh.py
|
1
|
11122
|
import subprocess
import boto3
import sys
import configparser
from codecs import open
from os.path import expanduser
import os
import glob
import inquirer
import argparse
import libtmux
import time
class Connector:
def __init__(self, connection_name, profile):
self.hosts_folder = expanduser("~")
print(self.hosts_folder)
self.profile = profile
self.directory_to_save = self.hosts_folder+'/.ec2ssh/hosts/'
if not os.path.exists(self.directory_to_save):
os.makedirs(self.directory_to_save)
if connection_name != None:
self.connection_name = connection_name
self.config = self.read_config(connection_name)
if self.config != False:
self.port = self.config['Connection']['connection_port']
self.region_name = self.config['Connection']['region']
def open_tmux(self,selects,connection_name, region, profile, port):
server = libtmux.Server()
session = server.list_sessions()[0]
print(session)
window = session.new_window(attach=True, window_name=connection_name+str(round(time.time() * 1000)))
instances = len(selects)
print(instances)
print(instances % 2 == 0)
if instances % 2 == 0:
count = 1
else:
count = 0
while (count < instances):
window.split_window()
window.select_layout('tiled')
count += 1
selection = 1
for pane in window.list_panes():
pane.send_keys('ec2ssh connect -n {} -p {}'.format(connection_name,profile))
pane.send_keys(str(selection))
selection += 1
window.set_window_option('synchronize-panes', True)
def printMenu(self):
print (30 * '-')
print (" M A I N - M E N U")
print (30 * '-')
print ("1. Direct Connect")
print ("2. Pass from Bastion Host")
print ("3. Autoscaling")
print (30 * '-')
def read_config(self,host):
if os.path.isfile(self.directory_to_save+host+'.ini'):
config = configparser.ConfigParser()
config.sections()
config.read(self.directory_to_save+host+'.ini')
return(config);
else:
return False
def query_yes_no(self,question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def addConfig(self):
config = configparser.ConfigParser()
self.printMenu()
valid_choise=0
usr_input = ''
while usr_input not in ['1', '2', '3']:
if valid_choise :
print("Not Valid Choise")
valid_choise=1
usr_input = input("Input: ")
config['Connection']= {}
config['Connection']['region'] = input('Specify a Region:\n-> ')
config['Connection']['connection_port'] = input('Specify a connection port (for direct or for Bastion):\n-> ')
config['Connection']['profile'] = input('Specify which AWS profile use:\n-> ')
if not config['Connection']['profile']:
config['Connection']['profile'] = 'default'
if usr_input == "1":
config['Connection']['type'] = "direct"
config['EC2INSTANCE'] = {}
config['EC2INSTANCE']['pem_path'] = input('Enter a keyPair EC2 file path (absolute path):\n-> ')
config['EC2INSTANCE']['user'] = input('Enter a EC2 user (default "ec2-user"):\n-> ')
config['EC2INSTANCE']['ec2_instance_id'] = input('Enter a EC2 Instance ID:\n-> ')
if not config['EC2INSTANCE']['user']:
config['EC2INSTANCE']['user'] = 'ec2-user'
elif usr_input == "2":
config['Connection']['type'] = "bastion"
config['EC2INSTANCE'] = {}
config['EC2INSTANCE']['pem_path'] = input('Enter a keyPair EC2 file path (absolute path):\n-> ')
config['EC2INSTANCE']['user'] = input('Enter a EC2 user (default "ec2-user"):\n-> ')
config['EC2INSTANCE']['ec2_instance_id'] = input('Enter a EC2 Instance ID:\n-> ')
config['BASTIONHOST'] = {}
config['BASTIONHOST']['b_pem_path'] = input('Enter a Bastion pem file path (absolute path):\n-> ')
config['BASTIONHOST']['b_user'] = input('Enter a Bastion user:\n-> ')
config['BASTIONHOST']['b_ec2_instance_id'] = input('Enter a Bastion Instance ID:\n-> ')
if not config['EC2INSTANCE']['user']:
config['EC2INSTANCE']['user'] = 'ec2-user'
elif usr_input == "3":
config['Connection']['type'] = "asg"
config['ASG'] = {}
config['ASG']['pem_path'] = input('Enter a pem file path (absolute path):\n-> ')
config['ASG']['user'] = input('Enter a user (default "ec2-user"):\n-> ')
config['ASG']['name'] = input('Enter a ASG Name ID:\n-> ')
if not config['ASG']['user']:
config['ASG']['user'] = 'ec2-user'
questions = self.query_yes_no("ASG allow ssh only from Bastion Host?")
if questions == True:
config['BASTIONHOST'] = {}
config['BASTIONHOST']['b_pem_path'] = input('Enter a Bastion pem file path (absolute path):\n-> ')
config['BASTIONHOST']['b_user'] = input('Enter a Bastion user:\n-> ')
config['BASTIONHOST']['b_ec2_instance_id'] = input('Enter a Bastion Instance ID:\n-> ')
with open(self.directory_to_save+self.connection_name+'.ini', 'w') as configfile:
config.write(configfile)
print("File Config "+self.connection_name+" created")
def direct_connect(self,ec2_instance_config):
target = {'key': ec2_instance_config['pem_path'], 'user': ec2_instance_config['user'], 'host': ec2_instance_config['ec2_instance_id']}
target_ec2 = self.client
target_response = target_ec2.describe_instances(InstanceIds=[target['host']])
target_ip = target_response['Reservations'][0]['Instances'][0]['PublicIpAddress']
subprocess.call("ssh-add {}".format(target['key']), shell=True)
subprocess.call("ssh {}@{} -p {}".format(target['user'], target_ip, self.port), shell=True)
def bastion_connect(self,ec2_instance_config,bastion_config):
target = {'key': ec2_instance_config['pem_path'], 'user': ec2_instance_config['user'], 'host': ec2_instance_config['ec2_instance_id']}
target_ec2 = self.client
target_response = target_ec2.describe_instances(InstanceIds=[target['host']])
bastion = {'key': bastion_config['b_pem_path'], 'user': bastion_config['b_user'], 'host': bastion_config['b_ec2_instance_id']}
bastion_ec2 = self.client
bastion_response = bastion_ec2.describe_instances(InstanceIds=[bastion['host']])
bastion_ip = bastion_response['Reservations'][0]['Instances'][0]['PublicIpAddress']
target_ip = target_response['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['PrivateIpAddress']
subprocess.call("ssh-add {} {}".format(bastion['key'], target['key']), shell=True)
subprocess.call("ssh -t -A {}@{} -p {} ssh {}@{}".format(bastion['user'], bastion_ip,self.port, target['user'], target_ip), shell=True)
def ec2ssh(self):
self.session = boto3.Session(profile_name=self.profile)
self.client = self.session.client('ec2',region_name=self.config['Connection']['region'])
config = self.read_config(self.connection_name)
if config['Connection']['type'] == "direct":
self.direct_connect(config['EC2INSTANCE'])
elif config['Connection']['type'] == "bastion":
self.bastion_connect(config['EC2INSTANCE'], config['BASTIONHOST'])
elif config['Connection']['type'] == "asg":
print ('Please select an option:')
print (" 0. All")
i=1
selects = {}
for instance in self.list_instance_in_asg(config['ASG']['name']):
print (" "+str(i)+". "+instance['InstanceId']+" - "+instance['LifecycleState'])
selects[i]=instance['InstanceId']
i+=1
config_asg = {}
choise = input('Enter Value: ')
if choise != "0":
config_asg['pem_path']=config['ASG']['pem_path']
config_asg['user']=config['ASG']['user']
config_asg['ec2_instance_id']=selects[int(choise)]
if config.has_section('BASTIONHOST'):
config_asg_bastion = {}
config_asg_bastion['b_pem_path']=config['BASTIONHOST']['b_pem_path']
config_asg_bastion['b_user']=config['BASTIONHOST']['b_user']
config_asg_bastion['b_ec2_instance_id']=config['BASTIONHOST']['b_ec2_instance_id']
self.bastion_connect(config_asg, config_asg_bastion)
else:
self.direct_connect(config_asg)
else:
self.open_tmux(selects, self.connection_name, self.region_name, self.profile, self.port)
def list_avaible_connection(self):
print (30 * '-')
for file in os.listdir(self.directory_to_save):
if file.endswith(".ini"):
name_file = file.replace('.ini','')
print(" Connection Name: "+name_file)
config = self.read_config(name_file)
print(" Type: "+config['Connection']['type'])
print(" Region Name: "+config['Connection']['region'])
print(" Connection Port: "+config['Connection']['connection_port'])
if config['Connection']['type'] == "direct":
print(" Key Pair: "+config['EC2INSTANCE']['pem_path'])
print(" User Pair: "+config['EC2INSTANCE']['user'])
print(" Instance Id Pair: "+config['EC2INSTANCE']['ec2_instance_id'])
elif config['Connection']['type'] == "bastion":
print(" Key Pair: "+config['EC2INSTANCE']['pem_path'])
print(" User Pair: "+config['EC2INSTANCE']['user'])
print(" Instance Id Pair: "+config['EC2INSTANCE']['ec2_instance_id'])
print(" Bastion Id: "+config['BASTIONHOST']['b_ec2_instance_id'])
elif config['Connection']['type'] == "asg":
print(" Key Pair: "+config['ASG']['pem_path'])
print(" User Pair: "+config['ASG']['user'])
print(" ASG Name: "+config['ASG']['name'])
print(" Bastion Id: "+config['BASTIONHOST']['b_ec2_instance_id'])
print (30 * '-')
def list_instance_in_asg(self, asg_name):
if self.profile!=None:
asg_client = self.session.client('autoscaling',region_name=self.region_name)
else:
asg_client = boto3.client('autoscaling',region_name=self.region_name)
response = asg_client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
asg_name,
]
)
return response['AutoScalingGroups'][0]['Instances']
def rm_connecition(self):
try:
os.remove(self.directory_to_save+self.connection_name+'.ini')
print(self.connection_name+" connection was removed!")
except OSError:
print(self.connection_name+" connection doesn't exist!")
pass
|
mit
| -2,762,748,396,079,273,000 | 39.155235 | 139 | 0.605107 | false |
mmahut/openshift-ansible
|
roles/openshift_health_checker/action_plugins/openshift_health_check.py
|
1
|
5501
|
"""
Ansible action plugin to execute health checks in OpenShift clusters.
"""
# pylint: disable=wrong-import-position,missing-docstring,invalid-name
import sys
import os
from collections import defaultdict
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
# Augment sys.path so that we can import checks from a directory relative to
# this callback plugin.
sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks # noqa: E402
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
task_vars = task_vars or {}
# vars are not supportably available in the callback plugin,
# so record any it will need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
if "openshift" not in task_vars:
result["failed"] = True
result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?"
return result
try:
known_checks = self.load_known_checks(tmp, task_vars)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
resolved_checks = resolve_checks(requested_checks, known_checks.values())
except OpenShiftCheckException as e:
result["failed"] = True
result["msg"] = str(e)
return result
result["checks"] = check_results = {}
user_disabled_checks = normalize(task_vars.get('openshift_disable_check', []))
for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
if not check.is_active():
r = dict(skipped=True, skipped_reason="Not active for this host")
elif check_name in user_disabled_checks:
r = dict(skipped=True, skipped_reason="Disabled by user request")
else:
try:
r = check.run()
except OpenShiftCheckException as e:
r = dict(
failed=True,
msg=str(e),
)
if check.changed:
r["changed"] = True
check_results[check_name] = r
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
return result
def load_known_checks(self, tmp, task_vars):
load_checks()
known_checks = {}
for cls in OpenShiftCheck.subclasses():
check_name = cls.name
if check_name in known_checks:
other_cls = known_checks[check_name].__class__
raise OpenShiftCheckException(
"non-unique check name '{}' in: '{}.{}' and '{}.{}'".format(
check_name,
cls.__module__, cls.__name__,
other_cls.__module__, other_cls.__name__))
known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
return known_checks
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
Resolving a check name expands tag references (e.g., "@tag") to all the
checks that contain the given tag. OpenShiftCheckException is raised if
names contains an unknown check or tag name.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
known_check_names = set(check.name for check in all_checks)
known_tag_names = set(name for check in all_checks for name in check.tags)
check_names = set(name for name in names if not name.startswith('@'))
tag_names = set(name[1:] for name in names if name.startswith('@'))
unknown_check_names = check_names - known_check_names
unknown_tag_names = tag_names - known_tag_names
if unknown_check_names or unknown_tag_names:
msg = []
if unknown_check_names:
msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names))))
if unknown_tag_names:
msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
msg.append('Make sure there is no typo in the playbook and no files are missing.')
raise OpenShiftCheckException('\n'.join(msg))
tag_to_checks = defaultdict(set)
for check in all_checks:
for tag in check.tags:
tag_to_checks[tag].add(check.name)
resolved = check_names.copy()
for tag in tag_names:
resolved.update(tag_to_checks[tag])
return resolved
def normalize(checks):
"""Return a clean list of check names.
The input may be a comma-separated string or a sequence. Leading and
trailing whitespace characters are removed. Empty items are discarded.
"""
if isinstance(checks, string_types):
checks = checks.split(',')
return [name.strip() for name in checks if name.strip()]
|
apache-2.0
| 1,259,086,927,939,239,200 | 36.168919 | 109 | 0.612979 | false |
jakub-m/phantomcurl
|
phantomcurl/test/test_post_data.py
|
1
|
1059
|
from nose.tools import *
from phantomcurl.utils import split_post_items
def test_post_data_good():
expected_given = [
([('foo', 'bar')], ['foo=bar']),
([('foo', '')], ['foo=']),
([('foo', '=')], ['foo==']),
([('', '')], ['=']),
([('', '=')], ['==']),
([('', 'bar')], ['=bar'])
]
for expected, given in expected_given:
yield check_post_data_good, expected, given
def check_post_data_good(expected_dict, post_items):
post_dict = split_post_items(post_items)
assert_equals(expected_dict, post_dict)
def test_post_data_bad():
bad_input = ['foo', '']
for input_item in bad_input:
yield check_post_data_bad, input_item
def check_post_data_bad(post_item):
assert_raises(ValueError, split_post_items, [post_item])
#def test_dict_to_post_string():
# assert_in(
# dict_to_post_string({'foo', 'bar'}),
# ['foo=bar'])
# assert_in(
# dict_to_post_string({'foo': '', 'ham': 'spam '}),
# ['foo=&ham=spam+', 'ham=spam+&foo=']
# )
|
gpl-2.0
| -7,154,057,688,203,821,000 | 24.214286 | 60 | 0.525024 | false |
richardkiss/pycoinnet
|
pycoinnet/peer/Fetcher.py
|
1
|
4318
|
import asyncio
import logging
import weakref
from pycoin.serialize import b2h_rev
from pycoinnet.InvItem import InvItem, ITEM_TYPE_TX, ITEM_TYPE_BLOCK, ITEM_TYPE_MERKLEBLOCK
class Fetcher:
"""
Fetching a merkleblock also fetches the transactions that follow, and
includes them in the message as the "tx" key.
"""
def __init__(self, peer):
self.peer = peer
self.request_q = asyncio.Queue()
self.futures = weakref.WeakValueDictionary()
getdata_loop_future = asyncio.Task(self._getdata_loop())
next_message = peer.new_get_next_message_f(
filter_f=lambda name, data: name in ["tx", "block", "merkleblock", "notfound"])
peer.add_task(self._fetch_loop(next_message, getdata_loop_future))
def fetch(self, inv_item, timeout=None):
"""
Return the fetched object or None if the remote says it doesn't have it, or
times out by exceeding `timeout` seconds.
"""
future = self.futures.get(inv_item)
if not future:
future = asyncio.Future()
self.futures[inv_item] = future
self.request_q.put_nowait(inv_item)
try:
return (yield from asyncio.wait_for(future, timeout=timeout))
except asyncio.TimeoutError:
return None
def queue_size(self):
pass
# ## TODO: finish
@asyncio.coroutine
def _getdata_loop(self):
while True:
so_far = []
inv_item = yield from self.request_q.get()
while True:
so_far.append(inv_item)
if self.request_q.qsize() == 0 or len(so_far) >= 50000:
break
inv_item = yield from self.request_q.get()
self.peer.send_msg("getdata", items=so_far)
@asyncio.coroutine
def _fetch_loop(self, next_message, getdata_loop_future):
try:
while True:
name, data = yield from next_message()
ITEM_LOOKUP = dict(tx="tx", block="block", merkleblock="header")
if name in ITEM_LOOKUP:
item = data[ITEM_LOOKUP[name]]
the_hash = item.hash()
TYPE_DB = {"tx": ITEM_TYPE_TX,
"block": ITEM_TYPE_BLOCK,
"merkleblock": ITEM_TYPE_MERKLEBLOCK}
the_type = TYPE_DB[name]
inv_item = InvItem(the_type, the_hash)
future = self.futures.get(inv_item)
if name == "merkleblock":
txs = []
for h in data["tx_hashes"]:
name, data = yield from next_message()
if name != "tx":
logging.error(
"insufficient tx messages after merkleblock message: missing %s",
b2h_rev(h))
del self.futures[inv_item]
future.set_result(None)
break
tx = data["tx"]
if tx.hash() != h:
logging.error(
"missing tx message after merkleblock message: missing %s", b2h_rev(h))
del self.futures[inv_item]
future.set_result(None)
break
txs.append(tx)
item.txs = txs
if future is not None:
del self.futures[inv_item]
if not future.done():
future.set_result(item)
else:
logging.info("got %s unsolicited", item.id())
if name == "notfound":
for inv_item in data["items"]:
the_hash = inv_item.data
future = self.futures.get(inv_item)
if future:
del self.futures[inv_item]
future.set_result(None)
except EOFError:
getdata_loop_future.cancel()
|
mit
| -5,082,640,224,147,455,000 | 40.12381 | 107 | 0.46943 | false |
GNOME/orca
|
src/orca/scripts/apps/Instantbird/chat.py
|
1
|
6860
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom chat module for Instantbird."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.chat as chat
########################################################################
# #
# The Instantbird chat class. #
# #
########################################################################
class Chat(chat.Chat):
def __init__(self, script, buddyListAncestries):
chat.Chat.__init__(self, script, buddyListAncestries)
########################################################################
# #
# InputEvent handlers and supporting utilities #
# #
########################################################################
def getMessageFromEvent(self, event):
"""Get the actual displayed message. This will almost always be the
unaltered any_data from an event of type object:text-changed:insert.
Arguments:
- event: the Event from which to take the text.
Returns the string which should be presented as the newly-inserted
text. (Things like chatroom name prefacing get handled elsewhere.)
"""
string = ""
# IMs are written in areas that look like bubbles. When a new bubble
# is inserted, we see an embedded object character inserted into the
# document frame. The first paragraph is the bubble title; the
# rest (usually just one) are the message itself.
#
if self._script.utilities.isDocument(event.source):
bubble = event.source[event.detail1]
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_PARAGRAPH
paragraphs = pyatspi.findAllDescendants(bubble, hasRole)
# If the user opted the non-default, "simple" appearance, then this
# might not be a bubble at all, but a paragraph.
#
if not paragraphs and bubble.getRole() == pyatspi.ROLE_PARAGRAPH:
paragraphs.append(bubble)
for paragraph in paragraphs:
msg = self._script.utilities.substring(paragraph, 0, -1)
if msg == self._script.EMBEDDED_OBJECT_CHARACTER:
# This seems to occur for non-focused conversations.
#
msg = self._script.utilities.substring(paragraph[0], 0, -1)
string = self._script.utilities.appendString(string, msg)
return string
# If we instead have a section, we are writing another message into
# the existing bubble. In this case, we get three separate items
# inserted: a separator, a paragraph with the desired text, and an
# empty section.
#
if event.source.getRole() == pyatspi.ROLE_SECTION:
obj = event.source[event.detail1]
if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
try:
text = obj.queryText()
except:
pass
else:
string = text.getText(0, -1)
return string
########################################################################
# #
# Convenience methods for identifying, locating different accessibles #
# #
########################################################################
def isChatRoomMsg(self, obj):
"""Returns True if the given accessible is the text object for
associated with a chat room conversation.
Arguments:
- obj: the accessible object to examine.
"""
if not obj:
return False
if self._script.utilities.isDocument(obj):
return True
return obj.getRole() in [pyatspi.ROLE_SECTION, pyatspi.ROLE_PARAGRAPH]
def getChatRoomName(self, obj):
"""Attempts to find the name of the current chat room.
Arguments:
- obj: The accessible of interest
Returns a string containing what we think is the chat room name.
"""
name = ""
ancestor = self._script.utilities.ancestorWithRole(
obj,
[pyatspi.ROLE_SCROLL_PANE, pyatspi.ROLE_FRAME],
[pyatspi.ROLE_APPLICATION])
if ancestor and ancestor.getRole() == pyatspi.ROLE_SCROLL_PANE:
# The scroll pane has a proper labelled by relationship set.
#
name = self._script.utilities.displayedLabel(ancestor)
if not name:
try:
text = self._script.utilities.displayedText(ancestor)
if text.lower().strip() != self._script.name.lower().strip():
name = text
except:
pass
return name
def isFocusedChat(self, obj):
"""Returns True if we plan to treat this chat as focused for
the purpose of deciding whether or not a message should be
presented to the user.
Arguments:
- obj: the accessible object to examine.
"""
# Normally, we'd see if the top level window associated
# with this object had STATE_ACTIVE. That doesn't work
# here. So see if the script for the locusOfFocus is
# this script. If so, the only other possibility is that
# we're in the buddy list instead.
#
if obj and obj.getState().contains(pyatspi.STATE_SHOWING) \
and self._script.utilities.isInActiveApp(obj) \
and not self.isInBuddyList(obj):
return True
return False
|
lgpl-2.1
| -3,200,580,494,293,113,300 | 37.757062 | 79 | 0.530029 | false |
wfhio/tramcar
|
job_board/models/site_config.py
|
1
|
2504
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.sites.models import Site
class SiteConfig(models.Model):
expire_after = models.SmallIntegerField(default=30)
# NOTE: We set a default here, but we will override this with a more
# suitable default when we create the SiteConfig instance
admin_email = models.EmailField(default='admin@site')
site = models.OneToOneField(Site, on_delete=models.CASCADE)
remote = models.BooleanField(
default=False,
help_text="Select if this job board is for remote jobs only"
)
protocol = models.CharField(
default='http',
choices=(('http', 'http'), ('https', 'https')),
max_length=5,
help_text="The protocol to use when building links in "
"e-mail templates, etc."
)
google_analytics = models.CharField(
max_length=20,
blank=True,
help_text="Google Analytics Tracking ID"
)
twitter_user = models.CharField(
max_length=15,
blank=True,
help_text="Your site's Twitter username, fill in to "
"have a Follow icon appear on select pages"
)
twitter_consumer_key = models.CharField(max_length=100, blank=True)
twitter_consumer_secret = models.CharField(max_length=100, blank=True)
twitter_access_token = models.CharField(max_length=100, blank=True)
twitter_access_token_secret = models.CharField(max_length=100, blank=True)
stripe_secret_key = models.CharField(max_length=100, blank=True)
stripe_publishable_key = models.CharField(max_length=100, blank=True)
price = models.DecimalField(
max_digits=5,
decimal_places=2,
default=0,
help_text="Price to charge for posting a job, "
"set to 0 to disable charging"
)
mailchimp_username = models.CharField(max_length=20, blank=True)
mailchimp_api_key = models.CharField(max_length=50, blank=True)
mailchimp_list_id = models.CharField(max_length=20, blank=True)
def price_in_cents(self):
# Stripe expects an integer
return int(self.price * 100)
def __str__(self):
return self.site.name
|
mit
| -7,891,469,627,665,054,000 | 42.929825 | 78 | 0.580272 | false |
furthz/colegio
|
src/discounts/forms.py
|
1
|
4824
|
from django import forms
from enrollment.models import Servicio
from enrollment.models import TipoServicio
from enrollment.models import Matricula
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.forms import ModelForm, Form
from utils.models import TiposNivel
from django.utils.translation import ugettext_lazy as _
from discounts.models import Descuento
from discounts.models import TipoDescuento
from utils.middleware import get_current_colegio, get_current_userID
##############################################################
# Solicitar Descuentos
##############################################################
class SolicitarDescuentoForm(ModelForm):
"""
Formulario de la clase Descuento
Nota:
solo se añade como campos los que son definidos por los usuarios
"""
class Meta:
model = Descuento
fields = [
'matricula',
'tipo_descuento',
'numero_expediente',
'comentario',
]
labels = {
'matricula':_('Solicitante'),
'tipo_descuento':_('Descuento'),
'numero_expediente':_('Nro. Expediente'),
'comentario':_('Comentario'),
}
def ChoiceNiveles(self):
MY_CHOICES = (
('1', 'Inicial'),
('2', 'Primaria'),
('3', 'Secundaria'),
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles())
#self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados())
self.fields['matricula'].widget.attrs.update({'class': 'form-control'})
self.fields['tipo_descuento'].widget.attrs.update({'class': 'form-control'})
self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'})
self.fields['comentario'].widget.attrs.update({'class': 'form-control'})
self.fields['matricula'].widget.attrs['editable'] = False
class TipoDescuentForm(ModelForm):
"""
Formulario de la clase Descuento
Nota:
solo se añade como campos los que son definidos por los usuarios
"""
servicio = forms.ModelChoiceField(queryset=Servicio.objects.filter(activo=True))
class Meta:
model = TipoDescuento
fields = [
'servicio',
'descripcion',
'porcentaje',
]
labels = {
'servicio': _('Servicio'),
'descripcion': _('Descripción'),
'porcentaje': _('Porcentaje'),
}
def __init__(self, *args, **kwargs):
colegio = kwargs.pop('colegio', None)
super(TipoDescuentForm, self).__init__(*args, **kwargs)
# self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles())
# self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados())
self.fields['servicio'].widget.attrs.update({'class': 'form-control'})
self.fields['descripcion'].widget.attrs.update({'class': 'form-control'})
self.fields['porcentaje'].widget.attrs.update({'class': 'form-control'})
if colegio:
self.fields['servicio'].queryset = Servicio.objects.filter(activo=True,tipo_servicio__colegio__id_colegio=colegio)
##############################################################
# Aprobar Descuentos
##############################################################
class DetalleDescuentosForm(forms.Form):
"""
Formulario para filtar los detalles de Control de ingresos
Nota:
solo se añaden com campos los que son definidos por los usuarios
"""
alumno = forms.CharField(required=False)
anio = forms.CharField()
numero_expediente = forms.CharField(required=False)
estado = forms.CharField()
def ChoiceAnio(self):
MY_CHOICES = (
('2017', '2017'),
('2016', '2016'),
)
return MY_CHOICES
def ChoiceEstado(self):
MY_CHOICES = (
('Todos', 'Todos'),
('Aprobado', 'Aprobado'),
('No_aprobado', 'No aprobado'),
('Pendiente', 'Pendiente'),
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['anio'] = forms.ChoiceField(choices=self.ChoiceAnio())
self.fields['estado'] = forms.ChoiceField(choices=self.ChoiceEstado())
self.fields['alumno'].widget.attrs.update({'class': 'form-control'})
self.fields['anio'].widget.attrs.update({'class': 'form-control'})
self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'})
self.fields['estado'].widget.attrs.update({'class': 'form-control'})
|
mit
| 4,555,708,221,566,041,000 | 35.793893 | 126 | 0.578423 | false |
daviddeng/azrael
|
demos/ctrl_swarm.py
|
1
|
4132
|
# Copyright 2014, Oliver Nagy <olitheolix@gmail.com>
#
# This file is part of Azrael (https://github.com/olitheolix/azrael)
#
# Azrael is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Azrael is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Azrael. If not, see <http://www.gnu.org/licenses/>.
"""
Manoeuvre the swarm of cubes in an orchestrated fashion.
Due to the lack of any feedback control the cubes may not move too orderly but
it suffices to demonstrate the principle.
"""
import os
import sys
import time
import setproctitle
import multiprocessing
# Augment the Python path so that we can include the main project.
p = os.path.dirname(os.path.abspath(__file__))
p = os.path.join(p, '..')
sys.path.insert(0, p)
del p
import azrael.client
import azrael.config as config
class ControllerCubeLeft(multiprocessing.Process):
def __init__(self, objID, ip=config.addr_clerk, port=config.port_clerk):
super().__init__()
self.left = 0
self.right = 1
self.ip = ip
self.port = port
self.objID = objID
def run(self):
client = azrael.client.Client(ip=self.ip, port_clerk=self.port)
# ---------------------------------------------------------------------
# Edit here to change the force of boosters.
# ---------------------------------------------------------------------
# Turn both boosters on after 2s.
left = types.CmdBooster(self.left, force=0.1)
right = types.CmdBooster(self.right, force=0.1)
client.controlParts(self.objID, [right, left], [])
print('{0:02d}: Manoeuvre 1'.format(self.objID))
time.sleep(2)
# Fire the booster asymmetrically to make the cube turn.
left = types.CmdBooster(self.left, force=0)
right = types.CmdBooster(self.right, force=1)
client.controlParts(self.objID, [right, left], [])
print('{0:02d}: Manoeuvre 2'.format(self.objID))
time.sleep(2)
# Reverse the force settings to stop the spinning.
left = types.CmdBooster(self.left, force=1)
right = types.CmdBooster(self.right, force=0)
client.controlParts(self.objID, [right, left], [])
print('{0:02d}: Manoeuvre 3'.format(self.objID))
time.sleep(2)
# Use the same force on both boosters to just move forward without
# inducing any more spinning.
left = types.CmdBooster(self.left, force=0.1)
right = types.CmdBooster(self.right, force=0.1)
client.controlParts(self.objID, [right, left], [])
time.sleep(4)
# Done.
print('{0:02d}: Manoeuvre 4'.format(self.objID))
class ControllerCubeRight(ControllerCubeLeft):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Swap the index for left/right compared to the base class.
self.left = 1
self.right = 0
def main():
addr = config.addr_clerk
# Controllers for columns 1, 2, 3, 4.
CCL, CCR = ControllerCubeLeft, ControllerCubeRight
group_1 = [CCL(4 * _ + 0, addr) for _ in range(1, 5)]
group_2 = [CCL(4 * _ + 1, addr) for _ in range(1, 5)]
group_3 = [CCR(4 * _ + 2, addr) for _ in range(1, 5)]
group_4 = [CCR(4 * _ + 3, addr) for _ in range(1, 5)]
# Start the cubes in the two outer columns.
time.sleep(0.5)
for p0, p1 in zip(group_1, group_4):
p0.start()
p1.start()
time.sleep(0.5)
# Start the cubes in the two inner columns.
time.sleep(1)
for p0, p1 in zip(group_2, group_3):
p0.start()
p1.start()
time.sleep(0.5)
print('done')
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,619,977,157,674,257,000 | 32.056 | 79 | 0.616167 | false |
fmfn/UnbalancedDataset
|
examples/applications/plot_outlier_rejections.py
|
2
|
4354
|
"""
===============================================================
Customized sampler to implement an outlier rejections estimator
===============================================================
This example illustrates the use of a custom sampler to implement an outlier
rejections estimator. It can be used easily within a pipeline in which the
number of samples can vary during training, which usually is a limitation of
the current scikit-learn pipeline.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons, make_blobs
from sklearn.ensemble import IsolationForest
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from imblearn import FunctionSampler
from imblearn.pipeline import make_pipeline
print(__doc__)
rng = np.random.RandomState(42)
def plot_scatter(X, y, title):
"""Function to plot some data as a scatter plot."""
plt.figure()
plt.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1")
plt.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0")
plt.legend()
plt.title(title)
##############################################################################
# Toy data generation
##############################################################################
##############################################################################
# We are generating some non Gaussian data set contaminated with some unform
# noise.
moons, _ = make_moons(n_samples=500, noise=0.05)
blobs, _ = make_blobs(
n_samples=500, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25
)
outliers = rng.uniform(low=-3, high=3, size=(500, 2))
X_train = np.vstack([moons, blobs, outliers])
y_train = np.hstack(
[
np.ones(moons.shape[0], dtype=np.int8),
np.zeros(blobs.shape[0], dtype=np.int8),
rng.randint(0, 2, size=outliers.shape[0], dtype=np.int8),
]
)
plot_scatter(X_train, y_train, "Training dataset")
##############################################################################
# We will generate some cleaned test data without outliers.
moons, _ = make_moons(n_samples=50, noise=0.05)
blobs, _ = make_blobs(
n_samples=50, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25
)
X_test = np.vstack([moons, blobs])
y_test = np.hstack(
[np.ones(moons.shape[0], dtype=np.int8), np.zeros(blobs.shape[0], dtype=np.int8)]
)
plot_scatter(X_test, y_test, "Testing dataset")
##############################################################################
# How to use the :class:`~imblearn.FunctionSampler`
##############################################################################
##############################################################################
# We first define a function which will use
# :class:`~sklearn.ensemble.IsolationForest` to eliminate some outliers from
# our dataset during training. The function passed to the
# :class:`~imblearn.FunctionSampler` will be called when using the method
# ``fit_resample``.
def outlier_rejection(X, y):
"""This will be our function used to resample our dataset."""
model = IsolationForest(max_samples=100, contamination=0.4, random_state=rng)
model.fit(X)
y_pred = model.predict(X)
return X[y_pred == 1], y[y_pred == 1]
reject_sampler = FunctionSampler(func=outlier_rejection)
X_inliers, y_inliers = reject_sampler.fit_resample(X_train, y_train)
plot_scatter(X_inliers, y_inliers, "Training data without outliers")
##############################################################################
# Integrate it within a pipeline
##############################################################################
##############################################################################
# By elimnating outliers before the training, the classifier will be less
# affected during the prediction.
pipe = make_pipeline(
FunctionSampler(func=outlier_rejection),
LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng),
)
y_pred = pipe.fit(X_train, y_train).predict(X_test)
print(classification_report(y_test, y_pred))
clf = LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng)
y_pred = clf.fit(X_train, y_train).predict(X_test)
print(classification_report(y_test, y_pred))
plt.show()
|
mit
| 2,748,291,866,553,691,000 | 34.688525 | 85 | 0.562701 | false |
scott-maddox/simplepl
|
src/simplepl/dialogs/lockin_config_dialog.py
|
1
|
4145
|
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# third party imports
from PySide import QtGui, QtCore
class LockinConfigDialog(QtGui.QDialog):
def __init__(self, lockin, parent=None):
super(LockinConfigDialog, self).__init__(parent)
self.setModal(True)
settings = QtCore.QSettings()
timeConstantIndex = int(settings.value('lockin/time_constant_index',
9)) # 300 ms default
reserveModeIndex = int(settings.value('lockin/reserve_mode_index',
0)) # High reserve default
inputLineFilterIndex = int(settings.value('lockin/input_line_filter_index',
3)) # both filters default
self.timeConstantComboBox = QtGui.QComboBox()
for text in lockin.getTimeConstantLabelsList():
self.timeConstantComboBox.addItem(text)
self.timeConstantComboBox.setCurrentIndex(timeConstantIndex)
self.reserveModeComboBox = QtGui.QComboBox()
self.reserveModeComboBox.addItem('High Reserve')
self.reserveModeComboBox.addItem('Normal')
self.reserveModeComboBox.addItem('Low Noise (minimum)')
self.reserveModeComboBox.setCurrentIndex(reserveModeIndex)
self.inputLineFilterComboBox = QtGui.QComboBox()
self.inputLineFilterComboBox.addItem('no filters')
self.inputLineFilterComboBox.addItem('line notch filter')
self.inputLineFilterComboBox.addItem('2x line notch filter')
self.inputLineFilterComboBox.addItem('both notch filters')
self.inputLineFilterComboBox.setCurrentIndex(inputLineFilterIndex)
layout = QtGui.QVBoxLayout(self)
form = QtGui.QFormLayout()
form.addRow('Time Constant', self.timeConstantComboBox)
form.addRow('Reserve Mode', self.reserveModeComboBox)
form.addRow('Input Line Filter', self.inputLineFilterComboBox)
layout.addLayout(form)
# OK and Cancel buttons
self.buttons = QtGui.QDialogButtonBox(
QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
layout.addWidget(self.buttons)
# Connect buttons
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getLockinConfig(lockin, parent=None):
'''
Returns (timeConstantIndex, reserveModeIndex, inputLineFilterIndex,
accepted), and changes the corresponding values in the settings.
'''
dialog = LockinConfigDialog(lockin, parent)
result = dialog.exec_()
accepted = (result == QtGui.QDialog.Accepted)
timeConstantIndex = dialog.timeConstantComboBox.currentIndex()
reserveModeIndex = dialog.reserveModeComboBox.currentIndex()
inputLineFilterIndex = dialog.inputLineFilterComboBox.currentIndex()
settings = QtCore.QSettings()
settings.setValue('lockin/time_constant_index', timeConstantIndex)
settings.setValue('lockin/reserve_mode_index', reserveModeIndex)
settings.setValue('lockin/input_line_filter_index',
inputLineFilterIndex)
settings.sync()
return timeConstantIndex, reserveModeIndex, \
inputLineFilterIndex, accepted
|
agpl-3.0
| -6,380,274,891,226,863,000 | 42.177083 | 83 | 0.666104 | false |
eawag-rdm/xlsxtocsv
|
xlsxtocsv/xlsxtocsv.py
|
1
|
3605
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime as dt
from Tkinter import Tk
import tkFileDialog
import openpyxl as op
import argparse
import os.path
import sys
import re
import csv
__metaclass__ = type
class RFC4180(csv.Dialect):
def __init__(self):
csv.Dialect.__init__(self)
delimiter = b','
doublequote = True
escapechar = None
lineterminator = b'\r\n'
quotechar = b'"'
quoting = csv.QUOTE_MINIMAL
skipinitialspace = False
stric = True
def parseargs():
pa = argparse.ArgumentParser(description=
'Exports multiple CSV files from an Excel *.xlsx Workbook')
pa.add_argument('-f', metavar='EXCELFILE',
help='The Excel file to export. ' +
'If omitted, a graphical file chooser will be used.')
pa.add_argument('-o', metavar='OUTPUTDIRECTORY',
help='The output directory. Default is the current ' +
'directory if EXCELFILE was given, otherwise a ' +
'file chooser will be used as well.')
args = pa.parse_args(sys.argv[1:])
return vars(args)
def _stringify(dat):
if not isinstance(dat, basestring):
return str(dat).encode('utf-8')
else:
return dat.encode('utf-8')
def _transmap(dat):
transmap = {
# empty cells are going to be empty strings
None: '',
# workaround for bug in openpyxl
# https://bitbucket.org/openpyxl/openpyxl/issues/674/
dt.datetime(1899, 12, 30, 0, 0): dt.time(0, 0),
dt.datetime(1899, 12, 31, 0, 0): dt.datetime(1900, 1, 1, 0, 0),
}
return transmap[dat] if dat in transmap else dat
def _datefix(dat):
# if typ is datetime.datetime and time-part is 0:0:0,
# covert to datetime.date (assume xlsx cell-type is "Date").
if (type(dat) == dt.datetime and
(dat.hour, dat.minute, dat.second) == (0, 0, 0)):
dat = dat.date()
return dat
def transform(l):
l = [_transmap(f) for f in l]
l = [_datefix(f) for f in l]
l = [_stringify(f) for f in l]
return l
def write_csv(data, outfile):
with open(outfile, 'wb') as fout:
writer = csv.writer(fout, dialect='RFC4180')
writer.writerows(data)
def main():
csv.register_dialect(u'RFC4180', RFC4180)
home = os.path.expanduser('~')
xlsxfile = parseargs()['f']
out_dir = parseargs()['o']
if xlsxfile is None:
root = Tk()
root.withdraw()
f = tkFileDialog.askopenfile(title='Choose file to convert',
filetypes=[('xlsx', '*.xlsx')],
initialdir=home)
if f:
xlsxfile = f.name
f.close()
else:
sys.exit()
if out_dir is None:
out_dir = tkFileDialog.askdirectory(title='Choose output directory',
initialdir=home)
if not out_dir:
sys.exit()
root.destroy()
if not out_dir:
out_dir = os.getcwd()
out_prefix = os.path.splitext(os.path.basename(xlsxfile))[0]
wb = op.load_workbook(xlsxfile, data_only=True)
for sn in wb.sheetnames:
outfile = os.path.join(out_dir, out_prefix + '_' +
re.sub(r'\s+', '_', sn) + '.csv')
data = []
sheet = wb.get_sheet_by_name(sn)
for l in sheet.values:
data.append(transform(l))
write_csv(data, outfile)
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,526,891,086,674,498,000 | 30.347826 | 82 | 0.561165 | false |
dtnaylor/web-profiler
|
webloader/docs/source/conf.py
|
1
|
9104
|
# -*- coding: utf-8 -*-
#
# Web Profiler documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 17:09:00 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../..'))
# Mock out modules that ReadTheDocs doesn't have
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['numpy', 'selenium', 'requests',
'selenium.common',
'selenium.common.exceptions',
'selenium.webdriver',
'selenium.webdriver.support',
'selenium.webdriver.support.ui',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['sphinx_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Web Profiler'
copyright = u'2014, David Naylor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'WebProfilerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'WebProfiler.tex', u'Web Profiler Documentation',
u'David Naylor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webprofiler', u'Web Profiler Documentation',
[u'David Naylor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'WebProfiler', u'Web Profiler Documentation',
u'David Naylor', 'WebProfiler', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| -4,861,983,116,973,850,000 | 30.178082 | 79 | 0.692443 | false |
nedbat/unittest-mixins
|
setup.py
|
1
|
1288
|
#!/usr/bin/env python
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/unittest-mixins/blob/master/NOTICE.txt
from setuptools import setup
classifiers = """\
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Quality Assurance
Topic :: Software Development :: Testing
Development Status :: 5 - Production/Stable
"""
setup(
name='unittest-mixins',
version='1.6',
description='Helpful mixins for unittest classes',
author='Ned Batchelder',
author_email='ned@nedbatchelder.com',
url='https://github.com/nedbat/unittest-mixins',
packages=['unittest_mixins'],
install_requires=[
'six >= 1.4.0',
],
license='Apache 2.0',
classifiers=classifiers.splitlines(),
)
|
apache-2.0
| 8,195,750,661,228,339,000 | 32.025641 | 79 | 0.714286 | false |
rboman/progs
|
bin/powergrep.py
|
1
|
3561
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# try to replace "old_div(a,b)"" by "a/b"
# with a and b being complex expressions involving brackets, etc.
# processes all the python files recursively from the current folder
#
# you must use the script several times
# (it processes 1 "old_div" per line at a time)
# Does not process old_divs spanning several lines such as
# old_div(a,
# b)
import sys, os
import fnmatch, re
import subprocess
def all_files(root,
patterns='*',
skips='*.svn*;*.git*;*build*',
single_level=False,
yield_folders=False):
#self.checkPath(root)
patterns = patterns.split(';')
skips = skips.split(';')
for path, subdirs, files in os.walk(root):
# print('processing folder', path)
if yield_folders:
files.extend(subdirs)
files.sort()
for name in files:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
fullname = os.path.join(path, name)
ok = True
for skip in skips:
if fnmatch.fnmatch(fullname, skip):
ok = False
if ok:
yield fullname
break
if single_level:
break
def paren_matcher (n):
# poor man's matched paren scanning, gives up
# after n+1 levels. Matches any string with balanced
# parens inside; add the outer parens yourself if needed.
# Nongreedy.
# https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
return r"[^()]*?(?:\("*n+r"[^()]*?"+r"\)[^()]*?)*?"*n
if __name__ == '__main__':
# the regexp
reg = re.compile("old_div\s*\(("+paren_matcher(5)+'),('+paren_matcher(5)+')\)')
# loop recursively on all files with a given extension
for f in all_files(os.getcwd(), patterns='*.py;*.pyw'):
#print('f=',f)
# red the whole file
file = open(f, mode='r', encoding='utf-8')
try:
alllines = file.readlines()
except:
print(f'\nERROR: file {f} contains non-unicode characters!\n')
raise
file.close()
newlines = []
modified = False
for l in alllines:
m = reg.search(l)
if m:
print(f"match found in {f}")
g = m.groups()
if len(g)!=2:
raise Exception ("=> ERROR: {len(g)} arguments found instead of 2!")
else:
#print(f'\t{m.group(0)} => {g[0].strip()}/{g[1].strip()}')
newl = l.replace(m.group(0), f'{g[0].strip()}/{g[1].strip()}')
print("\told string:", l.rstrip())
print("\tnew string:", newl.rstrip())
newlines.append(newl)
modified = True
else:
newlines.append(l)
if modified:
file = open(f, mode='w', encoding='utf-8')
for l in newlines:
file.write(l)
file.close()
"""
with open(f, "rb") as source:
m = reg.search(s1)
# print(m)
if m:
g = m.groups()
if len(g)!=2:
print ("error:")
print (g)
else:
print(f'{m.group(0)} => {g[0].strip()}/{g[1].strip()}')
print("old string:", s1)
print("new string:", s1.replace(m.group(0), f'{g[0].strip()}/{g[1].strip()}'))
"""
|
apache-2.0
| 5,097,906,595,243,001,000 | 30.236842 | 99 | 0.495366 | false |
soupmonkey/pushcoin
|
PoS/payment-processor/settings.py
|
1
|
1339
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Slawomir Lisznianski <sl@minta.com>
#
# GNU General Public Licence (GPL)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
#
# PushCoin Error Codes from
# https://pushcoin.com/Pub/SDK/ErrorCodes
#
ERR_ACCOUNT_NOT_FOUND=201
ERR_INVALID_CURRENCY=202
ERR_PAYMENT_SIGNATURE_CHECK_FAILED=203
ERR_CRYPTO_FAILURE=204
ERR_INVALID_GRATUITY_TYPE=205
ERR_VALUE_OUT_OF_RANGE=206
ERR_INVALID_RECIPIENT=207
ERR_EXPIRED_PTA=208
ERR_DUPLICATE_PTA=209
ERR_INSUFFICIENT_FUNDS=300
MAX_SCALE_VAL = 6
MERCHANT_MAT = '5bf54dd118bc866567061a2be41860f7b5389f7c'
CURRENCY_CODE = 'USD'
PUSHCOIN_SERVER_URL = 'https://api.pushcoin.com:20001/pcos/'
|
gpl-3.0
| -4,831,188,054,566,369,000 | 33.333333 | 79 | 0.765497 | false |
PuzzleboxIO/brainstorms-python
|
setup.py2app.py
|
1
|
3818
|
"""
This is a setup.py script generated by py2applet
Usage:
python2.7 setup.py py2app
"""
from setuptools import setup
APP = ['brainstorms-local.py']
data_files=[ \
(".", \
#("Content/Resources", \
["puzzlebox_brainstorms_configuration.ini"]),
("images", \
["images/puzzlebox.ico", \
"images/puzzlebox.icns", \
"images/puzzlebox_logo.png", \
"images/1-upper_left-orange.png", \
"images/1-upper_left-white.png", \
"images/2-up-orange.png", \
"images/2-up-white.png", \
"images/3-upper_right-orange.png", \
"images/3-upper_right-white.png", \
"images/7-lower_left-orange.png", \
"images/7-lower_left-white.png", \
"images/8-down-orange.png", \
"images/8-down-white.png", \
"images/9-lower_right-orange.png", \
"images/9-lower_right-white.png", \
"images/brainstorms-aileron_left.svg", \
"images/brainstorms-aileron_right.svg", \
"images/brainstorms-elevator_forward.svg", \
"images/brainstorms-elevator_reverse.svg", \
"images/brainstorms-fly_forward.svg", \
"images/brainstorms-hover.svg", \
"images/brainstorms-land_arrow.svg", \
"images/brainstorms-rudder-left.svg", \
"images/brainstorms-rudder-right.svg", \
"images/brainstorms_stop.svg", \
"images/brainstorms_wheelchair_forward.svg", \
"images/brainstorms_wheelchair_left.svg", \
"images/brainstorms_wheelchair_reverse.svg", \
"images/brainstorms_wheelchair_right.svg", \
"images/braintorms-throttle_up.svg", \
"images/puzzlebox_helicopter.svg", \
]), \
#("qt_menu.nib", \
#["/opt/local/lib/Resources/qt_menu.nib/classes.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/info.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/keyedobjects.nib", \
#]), \
]
data_files=[]
OPTIONS = { \
#'argv_emulation': True, \
'argv_emulation': False, \
'iconfile': 'images/puzzlebox.icns', \
'strip': True, \
# Semi-standalone is an option you can enable with py2app that makes
# your code reliant on the version of Python that is installed with the OS.
# You also need to enable site-packages, as well (which apparently encourages
# py2app to create the links to Python necessary for getting the bundle up
# and running, although it's only supposed to tell it to include the
# system and user site-packages in the system path)
# http://beckism.com/2009/03/pyobjc_tips/
#'semi_standalone': True, \
#'site_packages': True, \
'includes': [ \
'PySide.QtSvg', \
], \
'excludes': ['PyQt4', 'sip'], \
'frameworks': [ \
"/opt/local/share/qt4/plugins/imageformats/libqjpeg.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqgif.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqico.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqmng.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqsvg.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqtiff.dylib", \
], \
"resources": [ \
"puzzlebox_brainstorms_configuration.ini", \
#"images/puzzlebox.ico", \
#"/opt/local/lib/Resources/qt_menu.nib/classes.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/info.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/keyedobjects.nib", \
], \
}
setup(
name='Puzzlebox Brainstorms',
version='0.8.0',
description='Puzzlebox Brainstorms provides Brain-Computer Interface (BCI) controls for robots and devices',
author='Steve Castellotti',
author_email='sc@puzzlebox.info',
url='http://brainstorms.puzzlebox.info',
classifiers=[ \
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python',
'Operating System :: OS Independent',
'License :: Commercial',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
],
app=APP,
data_files=data_files,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
agpl-3.0
| -7,486,098,163,901,852,000 | 30.04065 | 109 | 0.680723 | false |
nmih/ssbio
|
ssbio/databases/pdb.py
|
1
|
34959
|
"""
PDBProp
=======
"""
import gzip
import json
import logging
import os.path as op
import mmtf
import os
from cobra.core import DictList
import pandas as pd
import requests
import deprecation
from Bio.PDB import PDBList
from lxml import etree
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import ssbio.databases.pisa as pisa
import ssbio.utils
from ssbio.protein.structure.structprop import StructProp
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
log = logging.getLogger(__name__)
class PDBProp(StructProp):
"""Store information about a protein structure from the Protein Data Bank.
Extends the :class:`~ssbio.protein.structure.structprop.StructProp` class to allow initialization of the structure
by its PDB ID, and then enabling downloads of the structure file as well as parsing its metadata.
Args:
ident (str):
description (str):
chains (str):
mapped_chains (str):
structure_path (str):
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
"""
def __init__(self, ident, description=None, chains=None, mapped_chains=None, structure_path=None, file_type=None):
StructProp.__init__(self, ident, description=description, chains=chains, mapped_chains=mapped_chains,
is_experimental=True, structure_path=structure_path, file_type=file_type)
self.experimental_method = None
self.resolution = None
self.date = None
self.taxonomy_name = None
self.biological_assemblies = DictList()
"""DictList: A list for storing Bioassembly objects related to this PDB ID"""
def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):
"""Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists
"""
ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type',
custom_error_text='Please set file type to be downloaded from the PDB: '
'pdb, mmCif, xml, or mmtf')
# XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists
# I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension
# Then check if file exists, if not then download again
p = PDBList()
with ssbio.utils.suppress_stdout():
structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)
if not op.exists(structure_file):
log.debug('{}: {} file not available'.format(self.id, file_type))
raise URLError('{}.{}: file not available to download'.format(self.id, file_type))
else:
log.debug('{}: {} file saved'.format(self.id, file_type))
# Rename .ent files to .pdb
if file_type == 'pdb':
new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')
os.rename(structure_file, new_name)
structure_file = new_name
self.load_structure_path(structure_file, file_type)
if load_header_metadata and file_type == 'mmtf':
self.update(parse_mmtf_header(structure_file))
if load_header_metadata and file_type != 'mmtf':
self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun)))
def get_pisa_complex_predictions(self, outdir, existing_pisa_multimer_xml=None):
if not existing_pisa_multimer_xml:
pisa_xmls = pisa.download_pisa_multimers_xml(pdb_ids=self.id, outdir=outdir,
save_single_xml_files=True)
else:
pisa_xmls = {}
pisa_xmls[self.id] = existing_pisa_multimer_xml
pisa_dict = pisa.parse_pisa_multimers_xml(pisa_xmls[self.id], download_structures=True,
outdir=outdir)
def __json_encode__(self):
# TODO: investigate why saving with # does not work!
to_return = {}
for x in self.__dict__.keys():
if x == 'pdb_title' or x == 'description':
sanitized = ssbio.utils.force_string(getattr(self, x)).replace('#', '-')
else:
to_return.update({x: getattr(self, x)})
return to_return
def parse_mmtf_header(infile):
"""Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
"""
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))
infodict['chemicals'] = chemicals
return infodict
def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
"""Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile
def parse_mmcif_header(infile):
"""Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header
"""
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
# TODO: refactor how to get resolutions based on experimental method
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict
def download_sifts_xml(pdb_id, outdir='', force_rerun=False):
"""Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file
"""
baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
filename = '{}.xml.gz'.format(pdb_id.lower())
outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = urlopen(baseURL + filename)
with open(outfile, 'wb') as f:
f.write(gzip.decompress(response.read()))
return outfile
def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):
"""Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue
"""
# Load the xml with lxml
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(sifts_file, parser)
root = tree.getroot()
my_pdb_resnum = None
# TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that
my_pdb_annotation = False
# Find the right chain (entities in the xml doc)
ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'
for chain in root.findall(ent):
# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!
if chain.attrib['entityId'] == chain_id:
# Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here"
# Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum"
# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)
ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum
my_uniprot_residue = chain.findall(ures)
if len(my_uniprot_residue) == 1:
# Get crossRefDb dbSource="PDB"
parent = my_uniprot_residue[0].getparent()
pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]'
my_pdb_residue = parent.findall(pres)
my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])
# Get <residueDetail dbSource="PDBe" property="Annotation">
# Will be Not_Observed if it is not seen in the PDB
anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]'
my_pdb_annotation = parent.findall(anno)
if len(my_pdb_annotation) == 1:
my_pdb_annotation = my_pdb_annotation[0].text
if my_pdb_annotation == 'Not_Observed':
my_pdb_annotation = False
else:
my_pdb_annotation = True
else:
return None, False
return my_pdb_resnum, my_pdb_annotation
def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False):
"""Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism
"""
outfile = ''
if not outdir:
outdir = ''
# if output dir is specified but not outname, use the uniprot
if not outname and outdir:
outname = uniprot_id
if outname:
outname = op.join(outdir, outname)
outfile = '{}.json'.format(outname)
# Load a possibly existing json file
if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(outfile, 'r') as f:
raw_data = json.load(f)
log.debug('{}: loaded existing json file'.format(uniprot_id))
# Otherwise run the web request
else:
# TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored
response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id),
data={'key': 'value'})
if response.status_code == 404:
log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id))
raw_data = {uniprot_id: {}}
else:
log.debug('{}: Obtained best structures'.format(uniprot_id))
raw_data = response.json()
# Write the json file if specified
if outfile:
with open(outfile, 'w') as f:
json.dump(raw_data, f)
log.debug('{}: Saved json file of best structures'.format(uniprot_id))
data = dict(raw_data)[uniprot_id]
# Filter for sequence identity percentage
if seq_ident_cutoff != 0:
for result in data:
if result['coverage'] < seq_ident_cutoff:
data.remove(result)
return data
def blast_pdb(seq, outfile='', outdir='', evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False):
"""Returns a list of BLAST hits of a sequence to available structures in the PDB.
Args:
seq (str): Your sequence, in string format
outfile (str): Name of output file
outdir (str, optional): Path to output directory. Default is the current directory.
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default).
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
link (bool, optional): Set to True if a link to the HTML results should be displayed
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
Returns:
list: Rank ordered list of BLAST hits in dictionaries.
"""
if len(seq) < 12:
raise ValueError('Sequence must be at least 12 residues long.')
if link:
page = 'PDB results page: http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=HTML'.format(seq, evalue)
print(page)
parser = etree.XMLParser(ns_clean=True)
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(force_rerun, outfile):
# Load the BLAST XML results if force_rerun=True
page = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=XML'.format(
seq, evalue)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if outfile:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded BLAST results from REST server')
else:
log.error('BLAST request timed out')
return []
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing BLAST XML results'.format(outfile))
# Get length of original sequence to calculate percentages
len_orig = float(len(seq))
root = tree.getroot()
hit_list = []
for hit in root.findall('BlastOutput_iterations/Iteration/Iteration_hits/Hit'):
info = {}
hitdef = hit.find('Hit_def')
if hitdef is not None:
info['hit_pdb'] = hitdef.text.split('|')[0].split(':')[0].lower()
info['hit_pdb_chains'] = hitdef.text.split('|')[0].split(':')[2].split(',')
# One PDB can align to different parts of the sequence
# Will just choose the top hit for this single PDB
hsp = hit.findall('Hit_hsps/Hsp')[0]
# Number of identical residues
hspi = hsp.find('Hsp_identity')
if hspi is not None:
info['hit_num_ident'] = int(hspi.text)
info['hit_percent_ident'] = int(hspi.text)/len_orig
if int(hspi.text)/len_orig < seq_ident_cutoff:
log.debug('{}: does not meet sequence identity cutoff'.format(hitdef.text.split('|')[0].split(':')[0]))
continue
# Number of similar residues (positive hits)
hspp = hsp.find('Hsp_positive')
if hspp is not None:
info['hit_num_similar'] = int(hspp.text)
info['hit_percent_similar'] = int(hspp.text) / len_orig
# Total number of gaps (unable to align in either query or subject)
hspg = hsp.find('Hsp_gaps')
if hspg is not None:
info['hit_num_gaps'] = int(hspg.text)
info['hit_percent_gaps'] = int(hspg.text) / len_orig
# E-value of BLAST
hspe = hsp.find('Hsp_evalue')
if hspe is not None:
info['hit_evalue'] = float(hspe.text)
# Score of BLAST
hsps = hsp.find('Hsp_score')
if hsps is not None:
info['hit_score'] = float(hsps.text)
hit_list.append(info)
log.debug("{}: Number of BLAST hits".format(len(hit_list)))
return hit_list
def blast_pdb_df(blast_results):
"""Make a dataframe of BLAST results"""
cols = ['hit_pdb', 'hit_pdb_chains', 'hit_evalue', 'hit_score', 'hit_num_ident', 'hit_percent_ident',
'hit_num_similar', 'hit_percent_similar', 'hit_num_gaps', 'hit_percent_gaps']
return pd.DataFrame.from_records(blast_results, columns=cols)
def _property_table():
"""Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns
"""
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'
r = requests.get(url)
p = pd.read_csv(StringIO(r.text)).set_index('structureId')
return p
def get_resolution(pdb_id):
"""Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[pdb_id, 'resolution']
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution
def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date
def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False):
"""Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
if not outdir:
outdir = os.getcwd()
outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id))
if ssbio.utils.force_rerun(force_rerun, outfile):
page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if cache:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded bioassembly information from REST server')
else:
log.error('Request timed out')
req.raise_for_status()
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing XML results'.format(outfile))
r = tree.getroot()
has_biomols = r.get('hasAssemblies')
if has_biomols == 'true':
has_biomols = True
else:
has_biomols = False
if has_biomols:
num_biomols = r.get('count')
else:
num_biomols = 0
num_biomols = int(num_biomols)
return num_biomols
def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False):
"""Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
#
# if not outdir:
# outdir = os.getcwd()
# outfile = op.join(outdir, '{}.xml'.format(self.id))
#
# if ssbio.utils.force_rerun(force_rerun, outfile):
# page = 'https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId={}&nr={}'.format(
# self.original_pdb_id, biomol_num)
# req = requests.get(page)
#
# if req.status_code == 200:
# response = req.text
#
# # Save the XML file
# if cache:
# with open(outfile, 'w') as f:
# f.write(response)
#
# # Parse the XML string
# r = xmltodict.parse(response)
# log.debug('Loaded bioassembly information from REST server')
# else:
# log.error('Request timed out')
# req.raise_for_status()
# else:
# with open(outfile, 'r') as f:
# r = xmltodict.parse(f.read())
# log.debug('{}: Loaded existing XML results'.format(outfile))
#
# self.biomol_to_chain_dict[biomol_num] = {'chains': r['bioassembly']['transformations']['@chainIds'],
# 'multiplier': len(r['bioassembly']['transformations']['transformation'])}
# # TODO: figure out how to store matrices etc.
#
# log.info('{}_{}: ')
def download_biomol(pdb_id, biomol_num, outdir, file_type='pdb', force_rerun=False):
import zlib
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import contextlib
ssbio.utils.make_dir(outdir)
server_folder = pdb_id[1:3]
if file_type == 'pdb':
# server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/{}/'.format(server_folder)
server = 'https://files.rcsb.org/download/'
server_filename = pdb_id + '.pdb%i.gz' % biomol_num
local_filename = pdb_id + '_bio%i.pdb' % biomol_num
outfile = op.join(outdir, local_filename)
elif file_type.lower() == 'mmcif' or file_type.lower() == 'cif':
server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/mmCIF/divided/{}/'.format(server_folder)
server_filename = pdb_id + '-assembly%i.cif.gz' % biomol_num
local_filename = pdb_id + '_bio%i.cif' % biomol_num
outfile = op.join(outdir, local_filename)
else:
raise ValueError('Biological assembly only available in PDB or mmCIF file types.')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = op.join(server, server_filename)
try:
with contextlib.closing(urlopen(download_link)) as f:
decompressed_data = zlib.decompress(f.read(), 16 + zlib.MAX_WBITS)
with open(op.join(outdir, local_filename), 'wb') as f:
f.write(decompressed_data)
except URLError as e:
print(e)
return None
return outfile
########################################################################################################################
########################################################################################################################
# DEPRECATED FUNCTIONS
########################################################################################################################
########################################################################################################################
@deprecation.deprecated(deprecated_in="1.0", removed_in="2.0",
details="Use Biopython's PDBList.retrieve_pdb_file function instead")
def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
"""Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id)
else:
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile
|
mit
| 1,119,046,539,905,341,600 | 40.225236 | 268 | 0.615778 | false |
tensorflow/probability
|
tensorflow_probability/python/optimizer/linesearch/hager_zhang.py
|
1
|
30378
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implements the Hager-Zhang inexact line search algorithm.
Line searches are a central component for many optimization algorithms (e.g.
BFGS, conjugate gradient etc). Most of the sophisticated line search methods
aim to find a step length in a given search direction so that the step length
satisfies the
[Wolfe conditions](https://en.wikipedia.org/wiki/Wolfe_conditions).
[Hager-Zhang 2006](https://epubs.siam.org/doi/abs/10.1137/030601880)
algorithm is a refinement of the commonly used
[More-Thuente](https://dl.acm.org/citation.cfm?id=192132) algorithm.
This module implements the Hager-Zhang algorithm.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl
__all__ = [
'hager_zhang',
]
def _machine_eps(dtype):
"""Returns the machine epsilon for the supplied dtype."""
dtype = dtype_util.as_numpy_dtype(tf.as_dtype(dtype))
return np.finfo(dtype).eps
HagerZhangLineSearchResult = collections.namedtuple(
'HagerZhangLineSearchResults', [
'converged', # Whether a point satisfying Wolfe/Approx wolfe was found.
'failed', # Whether the line search failed. It can fail if either the
# objective function or the gradient are not finite at
# an evaluation point.
'func_evals', # Number of function evaluations made.
'iterations', # Number of line search iterations made.
'left', # The left end point of the final bracketing interval.
# If converged is True, it is equal to `right`.
# Otherwise, it corresponds to the last interval computed.
'right' # The right end point of the final bracketing interval.
# If converged is True, it is equal to `left`.
# Otherwise, it corresponds to the last interval computed.
])
def hager_zhang(value_and_gradients_function,
initial_step_size=None,
value_at_initial_step=None,
value_at_zero=None,
converged=None,
threshold_use_approximate_wolfe_condition=1e-6,
shrinkage_param=0.66,
expansion_param=5.0,
sufficient_decrease_param=0.1,
curvature_param=0.9,
max_iterations=50,
name=None):
"""The Hager Zhang line search algorithm.
Performs an inexact line search based on the algorithm of
[Hager and Zhang (2006)][2].
The univariate objective function `value_and_gradients_function` is typically
generated by projecting a multivariate objective function along a search
direction. Suppose the multivariate function to be minimized is
`g(x1,x2, .. xn)`. Let (d1, d2, ..., dn) be the direction along which we wish
to perform a line search. Then the projected univariate function to be used
for line search is
```None
f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a)
```
The directional derivative along (d1, d2, ..., dn) is needed for this
procedure. This also corresponds to the derivative of the projected function
`f(a)` with respect to `a`. Note that this derivative must be negative for
`a = 0` if the direction is a descent direction.
The usual stopping criteria for the line search is the satisfaction of the
(weak) Wolfe conditions. For details of the Wolfe conditions, see
ref. [3]. On a finite precision machine, the exact Wolfe conditions can
be difficult to satisfy when one is very close to the minimum and as argued
by [Hager and Zhang (2005)][1], one can only expect the minimum to be
determined within square root of machine precision. To improve the situation,
they propose to replace the Wolfe conditions with an approximate version
depending on the derivative of the function which is applied only when one
is very close to the minimum. The following algorithm implements this
enhanced scheme.
### Usage:
Primary use of line search methods is as an internal component of a class of
optimization algorithms (called line search based methods as opposed to
trust region methods). Hence, the end user will typically not want to access
line search directly. In particular, inexact line search should not be
confused with a univariate minimization method. The stopping criteria of line
search is the satisfaction of Wolfe conditions and not the discovery of the
minimum of the function.
With this caveat in mind, the following example illustrates the standalone
usage of the line search.
```python
# Define value and gradient namedtuple
ValueAndGradient = namedtuple('ValueAndGradient', ['x', 'f', 'df'])
# Define a quadratic target with minimum at 1.3.
def value_and_gradients_function(x):
return ValueAndGradient(x=x, f=(x - 1.3) ** 2, df=2 * (x-1.3))
# Set initial step size.
step_size = tf.constant(0.1)
ls_result = tfp.optimizer.linesearch.hager_zhang(
value_and_gradients_function, initial_step_size=step_size)
# Evaluate the results.
with tf.Session() as session:
results = session.run(ls_result)
# Ensure convergence.
assert results.converged
# If the line search converged, the left and the right ends of the
# bracketing interval are identical.
assert results.left.x == result.right.x
# Print the number of evaluations and the final step size.
print ("Final Step Size: %f, Evaluations: %d" % (results.left.x,
results.func_evals))
```
### References:
[1]: William Hager, Hongchao Zhang. A new conjugate gradient method with
guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1,
pp. 170-172. 2005.
https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf
[2]: William Hager, Hongchao Zhang. Algorithm 851: CG_DESCENT, a conjugate
gradient method with guaranteed descent. ACM Transactions on Mathematical
Software, Vol 32., 1, pp. 113-137. 2006.
http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf
[3]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
Operations Research. pp 33-36. 2006
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
initial_step_size: (Optional) Scalar positive `Tensor` of real dtype, or
a tensor of shape [n] in batching mode. The initial value (or values) to
try to bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than 0.
A good initial value will make the search converge faster.
value_at_initial_step: (Optional) The full return value of evaluating
value_and_gradients_function at initial_step_size, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If supplied the value of
`initial_step_size` will be ignored, otherwise the tuple will be computed
by evaluating value_and_gradients_function.
value_at_zero: (Optional) The full return value of
value_and_gradients_function at `0.`, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If not supplied the tuple
will be computed by evaluating value_and_gradients_function.
converged: (Optional) In batching mode a tensor of shape [n], indicating
batch members which have already converged and no further search should
be performed. These batch members are also reported as converged in the
output, and both their `left` and `right` are set to the
`value_at_initial_step`.
threshold_use_approximate_wolfe_condition: Scalar positive `Tensor`
of real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in
[Hager and Zhang (2006)][2].
If the secant**2 step does not shrink the bracketing interval by this
proportion, a bisection step is performed to reduce the interval width.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
max_iterations: Positive scalar `Tensor` of integral dtype or None. The
maximum number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'hager_zhang' is used.
Returns:
results: A namedtuple containing the following attributes.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the final bracketing interval. Values are
equal to those of `right` on batch members where converged is True.
Otherwise, it corresponds to the last interval computed.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the final bracketing interval. Values are
equal to those of `left` on batch members where converged is True.
Otherwise, it corresponds to the last interval computed.
"""
with tf.name_scope(name or 'hager_zhang'):
val_0, val_initial, f_lim, prepare_evals = _prepare_args(
value_and_gradients_function,
initial_step_size,
value_at_initial_step,
value_at_zero,
threshold_use_approximate_wolfe_condition)
valid_inputs = (hzl.is_finite(val_0) & (val_0.df < 0) &
tf.math.is_finite(val_initial.x) & (val_initial.x > 0))
if converged is None:
init_converged = tf.zeros_like(valid_inputs) # i.e. all false.
else:
init_converged = tf.convert_to_tensor(converged)
failed = ~init_converged & ~valid_inputs
init_interval = HagerZhangLineSearchResult(
converged=init_converged,
failed=failed,
func_evals=prepare_evals,
iterations=tf.convert_to_tensor(0),
left=val_0,
right=hzl.val_where(init_converged, val_0, val_initial))
def _apply_bracket_and_search():
"""Bracketing and searching to do for valid inputs."""
return _bracket_and_search(
value_and_gradients_function, init_interval, f_lim, max_iterations,
shrinkage_param, expansion_param, sufficient_decrease_param,
curvature_param)
init_active = ~init_interval.failed & ~init_interval.converged
return prefer_static.cond(
tf.reduce_any(init_active),
_apply_bracket_and_search,
lambda: init_interval)
_LineSearchInnerResult = collections.namedtuple('_LineSearchInnerResult', [
'iteration',
'found_wolfe',
'failed',
'num_evals',
'left',
'right'])
def _bracket_and_search(
value_and_gradients_function,
init_interval,
f_lim,
max_iterations,
shrinkage_param,
expansion_param,
sufficient_decrease_param,
curvature_param):
"""Brackets the minimum and performs a line search.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
init_interval: Instance of `HagerZhangLineSearchResults` containing
the initial line search interval. The gradient of init_interval.left must
be negative (i.e. must be a descent direction), while init_interval.right
must be positive and finite.
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the updated bracketing interval.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the updated bracketing interval.
"""
bracket_result = hzl.bracket(value_and_gradients_function, init_interval,
f_lim, max_iterations, expansion_param)
converged = init_interval.converged | _very_close(
bracket_result.left.x, bracket_result.right.x)
# We fail if we have not yet converged but already exhausted all iterations.
exhausted_iterations = ~converged & (
bracket_result.iteration >= max_iterations)
line_search_args = HagerZhangLineSearchResult(
converged=converged,
failed=bracket_result.failed | exhausted_iterations,
iterations=bracket_result.iteration,
func_evals=bracket_result.num_evals,
left=bracket_result.left,
right=bracket_result.right)
return _line_search_after_bracketing(
value_and_gradients_function, line_search_args, init_interval.left,
f_lim, max_iterations, sufficient_decrease_param, curvature_param,
shrinkage_param)
def _line_search_after_bracketing(
value_and_gradients_function,
search_interval,
val_0,
f_lim,
max_iterations,
sufficient_decrease_param,
curvature_param,
shrinkage_param):
"""The main loop of line search after the minimum has been bracketed.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
search_interval: Instance of `HagerZhangLineSearchResults` containing
the current line search interval.
val_0: A namedtuple as returned by value_and_gradients_function evaluated
at `0.`. The gradient must be negative (i.e. must be a descent direction).
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the updated bracketing interval.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the updated bracketing interval.
"""
def _loop_cond(curr_interval):
"""Loop condition."""
active = ~(curr_interval.converged | curr_interval.failed)
return (curr_interval.iterations <
max_iterations) & tf.reduce_any(active)
def _loop_body(curr_interval):
"""The loop body."""
secant2_raw_result = hzl.secant2(
value_and_gradients_function, val_0, curr_interval, f_lim,
sufficient_decrease_param, curvature_param)
secant2_result = HagerZhangLineSearchResult(
converged=secant2_raw_result.converged,
failed=secant2_raw_result.failed,
iterations=curr_interval.iterations + 1,
func_evals=secant2_raw_result.num_evals,
left=secant2_raw_result.left,
right=secant2_raw_result.right)
should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed)
def _do_check_shrinkage():
"""Check if interval has shrinked enough."""
old_width = curr_interval.right.x - curr_interval.left.x
new_width = secant2_result.right.x - secant2_result.left.x
sufficient_shrinkage = new_width < old_width * shrinkage_param
func_is_flat = (
_very_close(curr_interval.left.f, curr_interval.right.f) &
_very_close(secant2_result.left.f, secant2_result.right.f))
new_converged = (
should_check_shrinkage & sufficient_shrinkage & func_is_flat)
needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage
inner_bisect_args = secant2_result._replace(
converged=secant2_result.converged | new_converged)
def _apply_inner_bisect():
return _line_search_inner_bisection(
value_and_gradients_function, inner_bisect_args,
needs_inner_bisect, f_lim)
return prefer_static.cond(
tf.reduce_any(needs_inner_bisect),
_apply_inner_bisect,
lambda: inner_bisect_args)
next_args = prefer_static.cond(
tf.reduce_any(should_check_shrinkage),
_do_check_shrinkage,
lambda: secant2_result)
interval_shrunk = (
~next_args.failed & _very_close(next_args.left.x, next_args.right.x))
return [next_args._replace(converged=next_args.converged | interval_shrunk)]
return tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[search_interval],
parallel_iterations=1)[0]
def _line_search_inner_bisection(
value_and_gradients_function,
search_interval,
active,
f_lim):
"""Performs bisection and updates the interval."""
midpoint = (search_interval.left.x + search_interval.right.x) / 2
val_mid = value_and_gradients_function(midpoint)
is_valid_mid = hzl.is_finite(val_mid)
still_active = active & is_valid_mid
new_failed = active & ~is_valid_mid
next_inteval = search_interval._replace(
failed=search_interval.failed | new_failed,
func_evals=search_interval.func_evals + 1)
def _apply_update():
update_result = hzl.update(
value_and_gradients_function, next_inteval.left, next_inteval.right,
val_mid, f_lim, active=still_active)
return HagerZhangLineSearchResult(
converged=next_inteval.converged,
failed=next_inteval.failed | update_result.failed,
iterations=next_inteval.iterations + update_result.iteration,
func_evals=next_inteval.func_evals + update_result.num_evals,
left=update_result.left,
right=update_result.right)
return prefer_static.cond(
tf.reduce_any(still_active), _apply_update, lambda: next_inteval)
def _prepare_args(value_and_gradients_function,
initial_step_size,
val_initial,
val_0,
approximate_wolfe_threshold):
"""Prepares the arguments for the line search initialization.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
initial_step_size: Scalar positive `Tensor` of real dtype, or a tensor of
shape [n] in batching mode. The initial value (or values) to try to
bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than 0.
A good initial value will make the search converge faster.
val_initial: The full return value of evaluating
value_and_gradients_function at initial_step_size, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If not None the value of
`initial_step_size` will be ignored, otherwise the tuple will be computed
by evaluating value_and_gradients_function.
val_0: The full return value of value_and_gradients_function at `0.`, i.e.
a namedtuple with 'x', 'f', 'df', if already known by the caller. If None
the tuple will be computed by evaluating value_and_gradients_function.
approximate_wolfe_threshold: Scalar positive `Tensor` of
real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
Returns:
left: A namedtuple, as returned by value_and_gradients_function,
containing the value and derivative of the function at `0.`.
val_initial: A namedtuple, as returned by value_and_gradients_function,
containing the value and derivative of the function at
`initial_step_size`.
f_lim: Real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked.
eval_count: Scalar int32 `Tensor`. The number of target function
evaluations made by this function.
"""
eval_count = 0
if val_initial is None:
if initial_step_size is not None:
initial_step_size = tf.convert_to_tensor(initial_step_size)
else:
initial_step_size = np.float32(1.)
val_initial = value_and_gradients_function(initial_step_size)
eval_count += 1
if val_0 is None:
x_0 = tf.zeros_like(val_initial.x)
val_0 = value_and_gradients_function(x_0)
eval_count += 1
f_lim = val_0.f + (approximate_wolfe_threshold * tf.math.abs(val_0.f))
return val_0, val_initial, f_lim, tf.convert_to_tensor(eval_count)
def _very_close(x, y):
return tf.math.nextafter(x, y) >= y
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(x)
if x.dtype == tf.bool:
return tf.where(x, 'True', 'False')
return x
# A convenience function useful while debugging in the graph mode.
def _print(pass_through_tensor, values):
"""Wrapper for tf.Print which supports lists and namedtuples for printing."""
flat_values = []
for value in values:
# Checks if it is a namedtuple.
if hasattr(value, '_fields'):
for field in value._fields:
flat_values.extend([field, _to_str(getattr(value, field))])
continue
if isinstance(value, (list, tuple)):
for v in value:
flat_values.append(_to_str(v))
continue
flat_values.append(_to_str(value))
return tf.Print(pass_through_tensor, flat_values)
|
apache-2.0
| 6,853,210,817,681,248,000 | 46.539906 | 94 | 0.697511 | false |
tonyduckles/svn2svn
|
svn2svn/run/parse.py
|
1
|
2731
|
""" optparser helper functions """
import optparse
import textwrap
class HelpFormatter(optparse.IndentedHelpFormatter):
"""
Modified version of certain optparse.IndentedHelpFormatter methods:
* Respect line-breaks in parser.desription and option.help_text
* Vertically-align long_opts
Inspired by: http://groups.google.com/group/comp.lang.python/browse_thread/thread/6df6e6b541a15bc2/09f28e26af0699b1?pli=1
"""
def format_description(self, description):
if not description: return ""
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [("%s" % (sopt)) if option._long_opts else \
(self._short_opt_fmt % (sopt, metavar))
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
return (" " if not short_opts else "")+(", ".join(short_opts + long_opts))
def format_option(self, option):
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def format_usage(self, usage):
return usage
|
gpl-3.0
| -6,258,201,666,401,967,000 | 40.378788 | 125 | 0.556573 | false |
tensorflow/tpu
|
models/official/detection/utils/config_utils.py
|
1
|
2218
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config utils."""
import os
import tensorflow.compat.v1 as tf
from hyperparameters import params_dict
_PARSERS = [
'classification_parser',
'retinanet_parser',
'maskrcnn_parser',
'segmentation_parser',
'shapemask_parser',
]
_BACKBONES = [
'resnet',
'spinenet',
'spinenet_mbconv',
]
_MULTILEVEL_FEATURES = [
'fpn',
'nasfpn',
]
def filter_unused_blocks(params):
"""Filters unused architecture params blocks."""
filtered_params = params_dict.ParamsDict(params)
if 'parser' in params.architecture.as_dict().keys():
for parser in _PARSERS:
if (parser in params.as_dict().keys() and
parser != params.architecture.parser):
delattr(filtered_params, parser)
if 'backbone' in params.architecture.as_dict().keys():
for backbone in _BACKBONES:
if (backbone in params.as_dict().keys() and
backbone != params.architecture.backbone):
delattr(filtered_params, backbone)
if 'multilevel_features' in params.architecture.as_dict().keys():
for features in _MULTILEVEL_FEATURES:
if (features in params.as_dict().keys() and
features != params.architecture.multilevel_features):
delattr(filtered_params, features)
return filtered_params
def save_config(params, model_dir):
if model_dir:
params = filter_unused_blocks(params)
if not tf.gfile.Exists(model_dir):
tf.gfile.MakeDirs(model_dir)
params_dict.save_params_dict_to_yaml(
params, os.path.join(model_dir, 'params.yaml'))
|
apache-2.0
| -6,733,611,204,582,500,000 | 30.239437 | 80 | 0.670875 | false |
WillsB3/glue
|
glue/formats/jsonformat.py
|
1
|
2704
|
import os
import json
import codecs
from base import BaseJSONFormat
class JSONFormat(BaseJSONFormat):
extension = 'json'
build_per_ratio = True
@classmethod
def populate_argument_parser(cls, parser):
group = parser.add_argument_group("JSON format options")
group.add_argument("--json",
dest="json_dir",
nargs='?',
const=True,
default=os.environ.get('GLUE_JSON', False),
metavar='DIR',
help="Generate JSON files and optionally where")
group.add_argument("--json-format",
dest="json_format",
metavar='NAME',
type=unicode,
default=os.environ.get('GLUE_JSON_FORMAT', 'array'),
choices=['array', 'hash'],
help=("JSON structure format (array, hash)"))
def get_context(self, *args, **kwargs):
context = super(JSONFormat, self).get_context(*args, **kwargs)
frames = dict([[i['filename'], {'filename': i['filename'],
'frame': {'x': i['x'],
'y': i['y'],
'w': i['width'],
'h': i['height']},
'rotated': False,
'trimmed': False,
'spriteSourceSize': {'x': i['x'],
'y': i['y'],
'w': i['width'],
'h': i['height']},
'sourceSize': {'w': i['original_width'],
'h': i['original_height']}}] for i in context['images']])
data = dict(frames=None, meta={'version': context['version'],
'hash': context['hash'],
'name': context['name'],
'sprite_path': context['sprite_path'],
'sprite_filename': context['sprite_filename'],
'width': context['width'],
'height': context['height']})
if self.sprite.config['json_format'] == 'array':
data['frames'] = frames.values()
else:
data['frames'] = frames
return data
|
bsd-3-clause
| 3,899,344,654,583,642,000 | 41.920635 | 112 | 0.366864 | false |
LennonChin/Django-Practices
|
MxShop/apps/utils/alipay.py
|
1
|
6122
|
# _*_ coding: utf-8 _*_
__author__ = 'LennonChin'
__date__ = '2017/10/23 21:37'
# pip install pycryptodome
__author__ = 'bobby'
from datetime import datetime
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from base64 import b64encode, b64decode
from urllib.parse import quote_plus
from urllib.parse import urlparse, parse_qs
from urllib.request import urlopen
from base64 import decodebytes, encodebytes
import json
class AliPay(object):
"""
支付宝支付接口
"""
def __init__(self, appid, app_notify_url, app_private_key_path,
alipay_public_key_path, return_url, debug=False):
self.appid = appid
self.app_notify_url = app_notify_url
self.app_private_key_path = app_private_key_path
self.app_private_key = None
self.return_url = return_url
with open(self.app_private_key_path) as fp:
self.app_private_key = RSA.importKey(fp.read())
self.alipay_public_key_path = alipay_public_key_path
with open(self.alipay_public_key_path) as fp:
self.alipay_public_key = RSA.import_key(fp.read())
if debug is True:
self.__gateway = "https://openapi.alipaydev.com/gateway.do"
else:
self.__gateway = "https://openapi.alipay.com/gateway.do"
def direct_pay(self, subject, out_trade_no, total_amount, return_url=None, **kwargs):
biz_content = {
"subject": subject,
"out_trade_no": out_trade_no,
"total_amount": total_amount,
"product_code": "FAST_INSTANT_TRADE_PAY",
# "qr_pay_mode":4
}
biz_content.update(kwargs)
data = self.build_body("alipay.trade.page.pay", biz_content, self.return_url)
return self.sign_data(data)
def build_body(self, method, biz_content, return_url=None):
data = {
"app_id": self.appid,
"method": method,
"charset": "utf-8",
"sign_type": "RSA2",
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"version": "1.0",
"biz_content": biz_content
}
if return_url is not None:
data["notify_url"] = self.app_notify_url
data["return_url"] = self.return_url
return data
def sign_data(self, data):
data.pop("sign", None)
# 排序后的字符串
unsigned_items = self.ordered_data(data)
unsigned_string = "&".join("{0}={1}".format(k, v) for k, v in unsigned_items)
sign = self.sign(unsigned_string.encode("utf-8"))
ordered_items = self.ordered_data(data)
quoted_string = "&".join("{0}={1}".format(k, quote_plus(v)) for k, v in ordered_items)
# 获得最终的订单信息字符串
signed_string = quoted_string + "&sign=" + quote_plus(sign)
return signed_string
def ordered_data(self, data):
complex_keys = []
for key, value in data.items():
if isinstance(value, dict):
complex_keys.append(key)
# 将字典类型的数据dump出来
for key in complex_keys:
data[key] = json.dumps(data[key], separators=(',', ':'))
return sorted([(k, v) for k, v in data.items()])
def sign(self, unsigned_string):
# 开始计算签名
key = self.app_private_key
signer = PKCS1_v1_5.new(key)
signature = signer.sign(SHA256.new(unsigned_string))
# base64 编码,转换为unicode表示并移除回车
sign = encodebytes(signature).decode("utf8").replace("\n", "")
return sign
def _verify(self, raw_content, signature):
# 开始计算签名
key = self.alipay_public_key
signer = PKCS1_v1_5.new(key)
digest = SHA256.new()
digest.update(raw_content.encode("utf8"))
if signer.verify(digest, decodebytes(signature.encode("utf8"))):
return True
return False
def verify(self, data, signature):
if "sign_type" in data:
sign_type = data.pop("sign_type")
# 排序后的字符串
unsigned_items = self.ordered_data(data)
message = "&".join(u"{}={}".format(k, v) for k, v in unsigned_items)
return self._verify(message, signature)
if __name__ == "__main__":
return_url = 'http://47.92.87.172:8000/?total_amount=0.01×tamp=2017-08-15+17%3A15%3A13&sign=jnnA1dGO2iu2ltMpxrF4MBKE20Akyn%2FLdYrFDkQ6ckY3Qz24P3DTxIvt%2BBTnR6nRk%2BPAiLjdS4sa%2BC9JomsdNGlrc2Flg6v6qtNzTWI%2FEM5WL0Ver9OqIJSTwamxT6dW9uYF5sc2Ivk1fHYvPuMfysd90lOAP%2FdwnCA12VoiHnflsLBAsdhJazbvquFP%2Bs1QWts29C2%2BXEtIlHxNgIgt3gHXpnYgsidHqfUYwZkasiDGAJt0EgkJ17Dzcljhzccb1oYPSbt%2FS5lnf9IMi%2BN0ZYo9%2FDa2HfvR6HG3WW1K%2FlJfdbLMBk4owomyu0sMY1l%2Fj0iTJniW%2BH4ftIfMOtADHA%3D%3D&trade_no=2017081521001004340200204114&sign_type=RSA2&auth_app_id=2016080600180695&charset=utf-8&seller_id=2088102170208070&method=alipay.trade.page.pay.return&app_id=2016080600180695&out_trade_no=201702021222&version=1.0'
alipay = AliPay(
appid="2016080600180695",
app_notify_url="http://projectsedus.com/",
app_private_key_path=u"../trade/keys/private_2048.txt",
alipay_public_key_path="../trade/keys/alipay_pub_key.txt", # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url="http://47.92.87.172:8000/"
)
o = urlparse(return_url)
query = parse_qs(o.query)
processed_query = {}
ali_sign = query.pop("sign")[0]
for key, value in query.items():
processed_query[key] = value[0]
print (alipay.verify(processed_query, ali_sign))
url = alipay.direct_pay(
subject="测试订单",
out_trade_no="201702021222",
total_amount=0.01
)
re_url = "https://openapi.alipaydev.com/gateway.do?{data}".format(data=url)
print(re_url)
|
apache-2.0
| 472,593,492,612,720,300 | 36.454545 | 699 | 0.607264 | false |
marcoconstancio/yanta
|
libs/python/NoteViewer.py
|
1
|
8163
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
from PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout
from libs.python.NoteFunctions import NoteFunctions
class NoteViewer(QWidget):
def __init__(self, param):
QWidget.__init__(self)
# Function for several oprations
if 'NoteFunctions' in param:
self.functions = param['NoteFunctions']
else:
self.functions = NoteFunctions()
# Get all viewer, html viewer, text viewer, text
self.available_note_viewers = {} #= self.functions.available_note_viewers()
self.available_note_viewers_config = {}
self.current_note_viewer_name = ""
# Layout of this widget
self.layout = QVBoxLayout()
# self.layout.addWidget(self.get_current_note_viewer())
self.setLayout(self.layout)
# Toolbar buttons that are going to be disabled/enabled depeding on the active note viewer
# Active viewer config
self.set_current_note_viewer(self.functions.config('Default viewer'))
#self.current_note_viewer_name = self.functions.config('Default viewer')
#self.current_note_viewer = self.functions.get_viewer(self.current_note_viewer_name)
#self.current_note_viewer_config = self.functions.get_viewer_config(self.current_note_viewer_name)
self.toolbar_buttons = None
def set_function(self, functions):
if functions:
self.functions = functions
def set_toolbar_buttons(self, toolbar_buttons=None):
if toolbar_buttons:
self.toolbar_buttons = toolbar_buttons
def get_current_note_viewer(self):
return self.available_note_viewers[self.current_note_viewer_name]
def get_current_note_viewer_config(self):
return self.available_note_viewers_config[self.current_note_viewer_name]
# def current_note_viewer(self):
# return self.available_note_viewers[self.current_note_viewer_name]
#
# def current_note_viewer_config(self):
# return self.available_note_viewers_config[self.current_note_viewer_name]
def set_current_note_viewer(self, viewer_name=''):
self.current_note_viewer_name = viewer_name
if viewer_name not in list(self.available_note_viewers.keys()):
self.available_note_viewers[self.current_note_viewer_name] = self.functions.get_viewer(viewer_name)
self.available_note_viewers_config[self.current_note_viewer_name] = self.functions.get_viewer_config(viewer_name)
self.layout.addWidget(self.available_note_viewers[self.current_note_viewer_name])
for anv_viewer_name in self.available_note_viewers:
if anv_viewer_name == viewer_name:
self.available_note_viewers[anv_viewer_name].show()
else:
self.available_note_viewers[anv_viewer_name].set_content(' ')
self.available_note_viewers[anv_viewer_name].hide()
#viewer.hide()
#self.current_note_viewer = self.functions.get_viewer(viewer_name)
def get_content(self):
return self.get_current_note_viewer().get_content()
def set_readonly(self, param=True):
if param == True:
self.get_current_note_viewer().set_readonly(True)
elif param == False:
self.get_current_note_viewer().set_readonly(False)
def set_writeable(self):
self.get_current_note_viewer().set_readonly(False)
def open_file (self, file_name, viewer_name=None):
# CHANGE THE VIEWER TYPE IF REQUESTED
if viewer_name:
if viewer_name != self.current_note_viewer_name:
self.set_current_note_viewer(viewer_name)
# Change toolbar button enable or disable
viewer_config = self.get_current_note_viewer_config()
viewer_supported_actions = []
if 'supported_actions' in viewer_config:
viewer_supported_actions = viewer_config['supported_actions']
for button_name in self.toolbar_buttons:
if button_name in viewer_supported_actions:
self.toolbar_buttons[button_name].setEnabled(True)
else:
self.toolbar_buttons[button_name].setEnabled(False)
## APPLY INITIAL SETTINGS
# Context menu extra options
if 'context_menu_actions' in viewer_config:
viewer_context_menu_actions = []
for button_name in viewer_config['context_menu_actions']:
viewer_context_menu_actions.append(self.toolbar_buttons[button_name])
self.call_function('set_context_menu_append_actions', viewer_context_menu_actions)
if file_name:
self.get_current_note_viewer().open_file(file_name)
def set_content(self, content=None, viewer_name=None):
# CHANGE THE VIEWER TYPE IF REQUESTED
if viewer_name:
if viewer_name != self.current_note_viewer_name:
#self.current_note_viewer_name = viewer_name
#self.get_current_note_viewer()
self.set_current_note_viewer(viewer_name)
# if self.current_note_viewer_name in self.available_note_viewers:
# self.available_note_viewers[self.current_note_viewer_name] = self.functions.get_viewer(viewer_name)
# self.available_note_viewers_config[self.current_note_viewer_name] = self.functions.functions(viewer_name)
# http://stackoverflow.com/questions/4528347/clear-all-widgets-in-a-layout-in-pyqt
# for i in reversed(range(self.layout.count())):
# widgetToRemove = self.layout.itemAt(i).widget()
# # remove it from the layout list
# self.layout.removeWidget(widgetToRemove)
# # remove it from the gui
# widgetToRemove.setParent(None)
#
# self.layout.addWidget(self.get_current_note_viewer())
# Change toolbar button enable or disable
viewer_config = self.get_current_note_viewer_config()
viewer_supported_actions = []
if 'supported_actions' in viewer_config:
viewer_supported_actions = viewer_config['supported_actions']
for button_name in self.toolbar_buttons:
if button_name in viewer_supported_actions:
self.toolbar_buttons[button_name].setEnabled(True)
else:
self.toolbar_buttons[button_name].setEnabled(False)
## APPLY INITIAL SETTINGS
# Context menu extra options
if 'context_menu_actions' in viewer_config:
viewer_context_menu_actions = []
for button_name in viewer_config['context_menu_actions']:
viewer_context_menu_actions.append(self.toolbar_buttons[button_name])
self.call_function('set_context_menu_append_actions', viewer_context_menu_actions)
if content:
self.get_current_note_viewer().set_content(content)
def get_config(self):
return self.get_current_note_viewer().get_config()
def get_save_extensions(self):
save_extensions = []
viewer_config = self.get_current_note_viewer_config()
if 'save_extensions' in viewer_config:
save_extensions = viewer_config['save_extensions']
return save_extensions
def get_open_extensions(self):
open_extensions = []
viewer_config = self.get_current_note_viewer_config()
if 'open_extensions' in viewer_config:
open_extensions = viewer_config['open_extensions']
return open_extensions
def print_(self):
if hasattr(self.current_note_viewer, 'print_'):
return self.get_current_note_viewer().print_
return None
def print_(self, param=None):
return self.get_current_note_viewer().print_(param)
def call_function(self, function_name=None, *args):
if function_name:
if callable(getattr(self.get_current_note_viewer(), function_name, None)):
return getattr(self.get_current_note_viewer(), function_name)(*args)
|
gpl-2.0
| 7,265,503,332,393,626,000 | 38.631068 | 127 | 0.635918 | false |
trevor/calendarserver
|
txdav/who/opendirectory.py
|
1
|
1226
|
# -*- test-case-name: txdav.who.test.test_xml -*-
##
# Copyright (c) 2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from __future__ import absolute_import
"""
Calendar and contacts directory extentions to L{twext.who.opendirectory}.
"""
__all__ = [
"DirectoryService",
]
from twext.who.opendirectory import DirectoryService
DirectoryService # Something has to use the import
# Hoorj OMG haxx
from twext.who.opendirectory._constants import ODRecordType as _ODRecordType
from .idirectory import RecordType as _CSRecordType
_ODRecordType.location.recordType = _CSRecordType.location
_ODRecordType.resource.recordType = _CSRecordType.resource
|
apache-2.0
| -4,844,171,048,777,520,000 | 31.263158 | 76 | 0.764274 | false |
tonysyu/scrappyr-app
|
config/settings/production.py
|
1
|
7727
|
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
import logging
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .base import * # noqa
# Explicit import reused variables to prevent flake8 errors:
from .base import DATABASES, env, INSTALLED_APPS, MIDDLEWARE, TEMPLATES
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='scrappyr <noreply@example.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[scrappyr]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
(
'django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior # noqa
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT',
default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
mit
| -3,475,508,035,025,968,000 | 33.495536 | 125 | 0.599845 | false |
edermartioli/ExoplanetLight
|
src/spectrum.py
|
1
|
2235
|
# -*- coding: utf-8 -*-
"""
Created on Nov 25 2016
@author: Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil
spectrum.py is a library of classes and functions to
handle spectral data.
"""
import numpy as np
from scipy import constants
########## SPECTRUM CLASS ############
class Spectrum :
'Common base class for a spectrum'
def __init__(self, Filename):
"""
Create a Spectrum object.
Parameters
----------
filename : string
File to read the spectrum from.
Examples
--------
>>> sp = Spectrum("spectrumfile.1d.spc")
"""
self.filename = Filename
self.load_spectrum(self.filename)
def load_spectrum(self,Filename):
try:
self.wl,self.flux,self.var = np.loadtxt(Filename, unpack=True, comments='#', usecols=(0,1,2), delimiter=' ')
except:
print "Error: could not open file:",Filename
exit()
def getdata(self, wl0=0., wlf=0.) :
"""
Retrieve data for a given wavelength range
Parameters
----------
wl0 : initial wavelength [nm]
wlf : final wavelength [nm]
Return : wl[], flux[], variance[]
"""
if (wl0 == 0.) :
wl0 = self.wl[0]
if (wlf == 0.) :
wlf = self.wl[-1]
mask = np.where((self.wl > wl0) & (self.wl < wlf))
return self.wl[mask],self.flux[mask],self.var[mask]
def applyRVShift(self, RVshift, interp=False) :
"""
Apply radial velocity shift to the wavelength data.
Parameters
----------
RVshift : radial velocity shift [m/s]
interp : interpolate shifted data to keep original wavelength sampling? [boolean]
"""
self.rvshit = RVshift
if interp == True :
wl_tmp = self.wl*(1.0 + self.rvshit/constants.c)
flux_tmp = np.interp(self.wl, wl_tmp, self.flux)
self.flux = flux_tmp
else :
self.wl *= (1.0 + self.rvshit/constants.c)
|
mit
| 5,748,290,067,744,352,000 | 26.256098 | 120 | 0.499329 | false |
dpgaspar/Flask-AppBuilder
|
examples/quickactions/config.py
|
1
|
1945
|
import os
from flask_appbuilder.security.manager import (
AUTH_OID,
AUTH_REMOTE_USER,
AUTH_DB,
AUTH_LDAP,
AUTH_OAUTH,
)
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"
OPENID_PROVIDERS = [
{"name": "Yahoo", "url": "https://me.yahoo.com"},
{"name": "AOL", "url": "http://openid.aol.com/<username>"},
{"name": "Flickr", "url": "http://www.flickr.com/<username>"},
{"name": "MyOpenID", "url": "https://www.myopenid.com"},
]
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db")
# SQLALCHEMY_DATABASE_URI = 'mysql://root:password@localhost/quickhowto'
# SQLALCHEMY_DATABASE_URI = 'postgresql://fab:password@localhost:5432/quickhowto2'
# SQLALCHEMY_ECHO = True
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_FOLDER = "translations"
LANGUAGES = {
"en": {"flag": "gb", "name": "English"},
"pt": {"flag": "pt", "name": "Portuguese"},
"es": {"flag": "es", "name": "Spanish"},
"de": {"flag": "de", "name": "German"},
"zh": {"flag": "cn", "name": "Chinese"},
"ru": {"flag": "ru", "name": "Russian"},
}
# ------------------------------
# GLOBALS FOR GENERAL APP's
# ------------------------------
UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_URL = "/static/uploads/"
AUTH_TYPE = AUTH_DB
AUTH_ROLE_ADMIN = "Admin"
AUTH_ROLE_PUBLIC = "Public"
APP_NAME = "F.A.B. Example"
APP_ICON = "/static/img/brand.jpg"
# APP_THEME = "bootstrap-theme.css" # default
# APP_THEME = "cerulean.css" # COOL
# APP_THEME = "amelia.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css" # COOL
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "readable.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css" # COOL
# APP_THEME = "spacelab.css" # NICE
# APP_THEME = "united.css"
# APP_THEME = "yeti.css"
|
bsd-3-clause
| -8,113,568,028,232,835,000 | 28.923077 | 82 | 0.594859 | false |
belokop/indico_bare
|
indico/web/handlers/tracker.py
|
1
|
1352
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pkg_resources
import sys
from flask import jsonify
from MaKaC.common import info
from MaKaC.webinterface.rh.base import RH
class RHSystemInfo(RH):
def _process(self):
try:
indico_version = pkg_resources.get_distribution('indico').version
except pkg_resources.DistributionNotFound:
indico_version = 'dev'
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
stats = {'python_version': '.'.join(map(str, sys.version_info[:3])),
'indico_version': indico_version,
'language': minfo.getLang()}
return jsonify(stats)
|
gpl-3.0
| -6,417,300,197,965,362,000 | 35.540541 | 78 | 0.707101 | false |
chipaca/snapcraft
|
snapcraft/project/_project_options.py
|
1
|
12984
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import multiprocessing
import os
import platform
import sys
from typing import Set
from snapcraft import file_utils
from snapcraft.internal import common, errors, os_release
logger = logging.getLogger(__name__)
_ARCH_TRANSLATIONS = {
"aarch64": {
"kernel": "arm64",
"deb": "arm64",
"uts_machine": "aarch64",
"cross-compiler-prefix": "aarch64-linux-gnu-",
"cross-build-packages": ["gcc-aarch64-linux-gnu", "libc6-dev-arm64-cross"],
"triplet": "aarch64-linux-gnu",
"core-dynamic-linker": "lib/ld-linux-aarch64.so.1",
},
"armv7l": {
"kernel": "arm",
"deb": "armhf",
"uts_machine": "arm",
"cross-compiler-prefix": "arm-linux-gnueabihf-",
"cross-build-packages": ["gcc-arm-linux-gnueabihf", "libc6-dev-armhf-cross"],
"triplet": "arm-linux-gnueabihf",
"core-dynamic-linker": "lib/ld-linux-armhf.so.3",
},
"i686": {
"kernel": "x86",
"deb": "i386",
"uts_machine": "i686",
"triplet": "i386-linux-gnu",
},
"ppc": {
"kernel": "powerpc",
"deb": "powerpc",
"uts_machine": "powerpc",
"cross-compiler-prefix": "powerpc-linux-gnu-",
"cross-build-packages": ["gcc-powerpc-linux-gnu", "libc6-dev-powerpc-cross"],
"triplet": "powerpc-linux-gnu",
},
"ppc64le": {
"kernel": "powerpc",
"deb": "ppc64el",
"uts_machine": "ppc64el",
"cross-compiler-prefix": "powerpc64le-linux-gnu-",
"cross-build-packages": [
"gcc-powerpc64le-linux-gnu",
"libc6-dev-ppc64el-cross",
],
"triplet": "powerpc64le-linux-gnu",
"core-dynamic-linker": "lib64/ld64.so.2",
},
"riscv64": {
"kernel": "riscv64",
"deb": "riscv64",
"uts_machine": "riscv64",
"cross-compiler-prefix": "riscv64-linux-gnu-",
"cross-build-packages": ["gcc-riscv64-linux-gnu", "libc6-dev-riscv64-cross"],
"triplet": "riscv64-linux-gnu",
"core-dynamic-linker": "lib/ld-linux-riscv64-lp64d.so.1",
},
"s390x": {
"kernel": "s390",
"deb": "s390x",
"uts_machine": "s390x",
"cross-compiler-prefix": "s390x-linux-gnu-",
"cross-build-packages": ["gcc-s390x-linux-gnu", "libc6-dev-s390x-cross"],
"triplet": "s390x-linux-gnu",
"core-dynamic-linker": "lib/ld64.so.1",
},
"x86_64": {
"kernel": "x86",
"deb": "amd64",
"uts_machine": "x86_64",
"triplet": "x86_64-linux-gnu",
"core-dynamic-linker": "lib64/ld-linux-x86-64.so.2",
},
}
_32BIT_USERSPACE_ARCHITECTURE = {
"aarch64": "armv7l",
"armv8l": "armv7l",
"ppc64le": "ppc",
"x86_64": "i686",
}
_WINDOWS_TRANSLATIONS = {"AMD64": "x86_64"}
_HOST_CODENAME_FOR_BASE = {"core18": "bionic", "core": "xenial"}
_HOST_COMPATIBILITY = {
"xenial": ["trusty", "xenial"],
"bionic": ["trusty", "xenial", "bionic"],
}
_STATIC_BASES = ["bare"]
# TODO: just check the base.
_LINKER_VERSION_FOR_BASE = {"core20": "2.31", "core18": "2.27", "core": "2.23"}
def _get_platform_architecture():
architecture = platform.machine()
# Translate the windows architectures we know of to architectures
# we can work with.
if sys.platform == "win32":
architecture = _WINDOWS_TRANSLATIONS.get(architecture)
if platform.architecture()[0] == "32bit":
userspace = _32BIT_USERSPACE_ARCHITECTURE.get(architecture)
if userspace:
architecture = userspace
return architecture
class ProjectOptions:
@property
def parallel_build_count(self) -> int:
try:
build_count = len(os.sched_getaffinity(0))
except AttributeError:
# Fall back to multiprocessing.cpu_count()...
try:
build_count = multiprocessing.cpu_count()
except NotImplementedError:
logger.warning(
"Unable to determine CPU count; disabling parallel builds"
)
build_count = 1
return build_count
@property
def is_cross_compiling(self):
return self.__target_machine != self.__platform_arch
@property
def target_arch(self):
return self.__target_arch
@property
def cross_compiler_prefix(self):
try:
# cross-compilation of x86 32bit binaries on a x86_64 host is
# possible by reusing the native toolchain - let Kbuild figure
# it out by itself and pass down an empty cross-compiler-prefix
# to start the build
if self.__platform_arch == "x86_64" and self.__target_machine == "i686":
return ""
return self.__machine_info["cross-compiler-prefix"]
except KeyError:
raise errors.SnapcraftEnvironmentError(
"Cross compilation not supported for target arch {!r}".format(
self.__target_machine
)
)
@property
def additional_build_packages(self):
packages = []
if self.is_cross_compiling:
packages.extend(self.__machine_info.get("cross-build-packages", []))
return packages
@property
def arch_triplet(self):
return self.__machine_info["triplet"]
@property
def deb_arch(self):
return self.__machine_info["deb"]
@property
def kernel_arch(self):
return self.__machine_info["kernel"]
@property
def parts_dir(self) -> str:
return self._parts_dir
@property
def stage_dir(self) -> str:
return self._stage_dir
@property
def prime_dir(self) -> str:
return self._prime_dir
@property
def debug(self):
return self._debug
def __init__(
self, target_deb_arch=None, debug=False, *, work_dir: str = None
) -> None:
# Here for backwards compatibility.
project_dir = os.getcwd()
if work_dir is None:
work_dir = project_dir
self._debug = debug
self._parts_dir = os.path.join(work_dir, "parts")
self._stage_dir = os.path.join(work_dir, "stage")
self._prime_dir = os.path.join(work_dir, "prime")
logger.debug("Parts dir {}".format(self._parts_dir))
logger.debug("Stage dir {}".format(self._stage_dir))
logger.debug("Prime dir {}".format(self._prime_dir))
self._set_machine(target_deb_arch)
def _get_content_snaps(self) -> Set[str]:
"""Temporary shim for unit tests using ProjectOptions
where Project is really required. Will be removed in
future convergence work.
"""
return set()
def _get_provider_content_dirs(self) -> Set[str]:
"""Temporary shim for unit tests using ProjectOptions
where Project is really required. Will be removed in
future convergence work.
"""
return set()
def _get_stage_packages_target_arch(self) -> str:
"""Stub for 'Project' interface for tests using ProjectOptions()."""
return self.deb_arch
def is_static_base(self, base: str) -> bool:
"""Return True if a base that is intended to be static is used.
Static bases require all their necessary components to live within
the snap.
"""
return base in _STATIC_BASES
def is_host_compatible_with_base(self, base: str) -> bool:
"""Determines if the host is compatible with the GLIBC of the base.
The system should warn early on when building using a host that does
not match the intended base, this mechanism here enables additional
logic when that is ignored to determine built projects will actually
run.
:param str base: the base core snap to search for linker.
:returns: True if there are no GLIBC incompatibilities with the chosen
build host, else it returns False.
:rtype: bool
"""
try:
codename = os_release.OsRelease().version_codename()
except errors.OsReleaseCodenameError:
return False
logger.debug("Running on {!r}".format(codename))
build_host_for_base = _HOST_CODENAME_FOR_BASE.get(base)
if build_host_for_base is None:
return False
compatible_hosts = _HOST_COMPATIBILITY.get(build_host_for_base, [])
return codename in compatible_hosts
# This is private to not make the API public given that base
# will be part of the new Project.
def _get_linker_version_for_base(self, base: str) -> str:
"""Returns the linker version for base."""
try:
return _LINKER_VERSION_FOR_BASE[base]
except KeyError:
linker_file = os.path.basename(self.get_core_dynamic_linker(base))
return file_utils.get_linker_version_from_file(linker_file)
def get_core_dynamic_linker(self, base: str, expand: bool = True) -> str:
"""Returns the dynamic linker used for the targeted core.
:param str base: the base core snap to search for linker.
:param bool expand: expand the linker to the actual linker if True,
else the main entry point to the linker for the
projects architecture.
:return: the absolute path to the linker
:rtype: str
:raises snapcraft.internal.errors.SnapcraftMissingLinkerInBaseError:
if the linker cannot be found in the base.
:raises snapcraft.internal.errors.SnapcraftEnvironmentError:
if a loop is found while resolving the real path to the linker.
"""
core_path = common.get_installed_snap_path(base)
dynamic_linker_path = os.path.join(
core_path,
self.__machine_info.get("core-dynamic-linker", "lib/ld-linux.so.2"),
)
# return immediately if we do not need to expand
if not expand:
return dynamic_linker_path
# We can't use os.path.realpath because any absolute symlinks
# have to be interpreted relative to core_path, not the real
# root.
seen_paths = set() # type: Set[str]
while True:
if dynamic_linker_path in seen_paths:
raise errors.SnapcraftEnvironmentError(
"found symlink loop resolving dynamic linker path"
)
seen_paths.add(dynamic_linker_path)
if not os.path.lexists(dynamic_linker_path):
raise errors.SnapcraftMissingLinkerInBaseError(
base=base, linker_path=dynamic_linker_path
)
if not os.path.islink(dynamic_linker_path):
return dynamic_linker_path
link_contents = os.readlink(dynamic_linker_path)
if os.path.isabs(link_contents):
dynamic_linker_path = os.path.join(core_path, link_contents.lstrip("/"))
else:
dynamic_linker_path = os.path.join(
os.path.dirname(dynamic_linker_path), link_contents
)
def _set_machine(self, target_deb_arch):
self.__platform_arch = _get_platform_architecture()
if not target_deb_arch:
self.__target_machine = self.__platform_arch
else:
self.__target_machine = _find_machine(target_deb_arch)
logger.info("Setting target machine to {!r}".format(target_deb_arch))
self.__machine_info = _ARCH_TRANSLATIONS[self.__target_machine]
# Set target arch to match the host if unspecified.
if target_deb_arch is None:
self.__target_arch = self.__machine_info.get("deb")
else:
self.__target_arch = target_deb_arch
def _get_deb_arch(machine):
return _ARCH_TRANSLATIONS[machine].get("deb", None)
def _find_machine(deb_arch):
for machine in _ARCH_TRANSLATIONS:
if _ARCH_TRANSLATIONS[machine].get("deb", "") == deb_arch:
return machine
elif _ARCH_TRANSLATIONS[machine].get("uts_machine", "") == deb_arch:
return machine
raise errors.SnapcraftEnvironmentError(
"Cannot set machine from deb_arch {!r}".format(deb_arch)
)
|
gpl-3.0
| -6,421,764,754,426,104,000 | 32.900783 | 88 | 0.59658 | false |
eroicaleo/LearningPython
|
interview/leet/146_LRU_Cache.py
|
1
|
2568
|
#!/usr/bin/env python
class LRUCache:
class Node:
def __init__(self, key, val):
self.val, self.key = val, key
self.prev = None
self.next = None
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.dict = dict()
self.head, self.tail = None, None
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key in self.dict:
node = self.dict[key]
if node == self.head:
return node.val
node.prev.next = node.next
if node == self.tail:
self.tail = node.prev
else:
node.next.prev = node.prev
print('In get node: %d' % node.key)
print('In get self.head: %d' % self.head.key)
self.head.prev = node
node.next, self.head, = self.head, node
print('In get after swapping node: %d' % node.key)
print('In get after swapping self.head: %d' % self.head.key)
print('In get after swapping self.head.next.prev: %d' % self.head.next.prev.key)
return node.val
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.get(key) != -1:
self.head.val = value
elif len(self.dict) < self.capacity:
print("I am inserting new node: %d" % (key))
node = self.Node(key, value)
if len(self.dict) == 0:
self.tail = node
else:
self.head.prev = node
node.next, self.head = self.head, node
print("new head: %d" % self.head.key)
self.dict[key] = node
else:
self.get(self.tail.key)
node = self.head
node.val = value
print('Prepare to delete key %d' % node.key)
del self.dict[node.key]
node.key = key
self.dict[key] = node
cache = LRUCache(2)
print(cache.get(1))
cache.put(2, 6)
print(cache.get(1))
cache.put(1, 5)
cache.put(1, 2)
print(cache.get(1))
print(cache.get(2))
quit()
cache = LRUCache(2)
cache.put(1, 1)
print(cache.get(1))
print("now head: ", cache.head.key)
print(cache.get(2))
cache.put(2, 2)
print("now head: ", cache.head.key)
print(cache.get(1))
print("now head: ", cache.head.key)
print(cache.get(2))
cache.put(3, 3)
print(cache.get(2))
print(cache.get(1))
|
mit
| 2,415,132,396,107,739,600 | 27.21978 | 92 | 0.503505 | false |
jmalonzo/pywebkitgtk
|
tests/test_webbackforwardlist.py
|
1
|
2703
|
from webkit import WebHistoryItem, WebBackForwardList, WebView
import gobject
import unittest
class TestWebBackForwardList (unittest.TestCase):
"""Test WebBackForwardList functions"""
def setUp(self):
gobject.threads_init()
self.view = WebView()
self.bf_list = self.view.get_back_forward_list()
def testadditem(self):
item = WebHistoryItem(uri="http://example.com/", title="Example1")
self.bf_list.add_item(item)
self.assertEqual(item, self.bf_list.get_current_item())
currentItem = self.bf_list.get_current_item()
self.assertEqual("Example1", currentItem.get_title())
self.assertEqual("http://example.com/", currentItem.get_uri())
def testbackforwardlistwithlimit(self):
item1 = WebHistoryItem(uri="http://example.com/1/", title="Example1")
item2 = WebHistoryItem(uri="http://example.com/2/", title="Example2")
item3 = WebHistoryItem(uri="http://example.com/3/", title="Example3")
item4 = WebHistoryItem(uri="http://example.com/4/", title="Example4")
self.bf_list.add_item(item1)
self.bf_list.add_item(item2)
self.bf_list.add_item(item3)
self.bf_list.add_item(item4)
backList = list()
backList = self.bf_list.get_back_list_with_limit(10)
self.assertTrue(backList is not None)
self.assertEqual("Example4", self.bf_list.get_current_item().get_title())
self.assertEqual("Example3", backList[0].props.title)
self.assertEqual("Example2", backList[1].props.title)
self.assertEqual("Example1", backList[2].props.title)
self.bf_list.go_to_item(item1)
self.assertEqual(self.bf_list.get_current_item().get_title(), item1.get_title())
forwardList = list()
forwardList = self.bf_list.get_forward_list_with_limit(10)
self.assertTrue(forwardList is not None)
self.assertEqual("Example4", forwardList[0].props.title)
self.assertEqual("Example3", forwardList[1].props.title)
self.assertEqual("Example2", forwardList[2].props.title)
def testclearbackforwardlist(self):
item1 = WebHistoryItem(uri="http://example.com/1/", title="Example1")
item2 = WebHistoryItem(uri="http://example.com/2/", title="Example2")
self.bf_list.set_limit(0)
self.bf_list.set_limit(1)
self.bf_list.add_item(item2)
self.assertEqual(self.bf_list.get_back_length(), 0)
self.assertEqual(self.bf_list.get_forward_length(), 0)
self.assertEqual(self.bf_list.get_current_item(), item2)
def tearDown(self):
# nothing here.
pass
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
| -4,415,265,464,655,258,600 | 41.234375 | 88 | 0.650388 | false |
martinsch/vigra
|
vigranumpy/lib/pyqt/imagewindow.py
|
1
|
23939
|
#######################################################################
#
# Copyright 2009-2010 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# ullrich.koethe@iwr.uni-heidelberg.de or
# vigra@informatik.uni-hamburg.de
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
import math, os, numpy, PyQt4
import PyQt4.QtCore as qcore
import PyQt4.QtGui as qt
from PyQt4.QtCore import SIGNAL
import vigra
import vigra.ufunc
try:
from VigraQt import OverlayViewer, ImageCursor
except Exception, e:
vigra._fallbackModule('VigraQt',
'''
%s
If VigraQt is missing on your system, you can download it from
http://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/.''' % str(e))
from VigraQt import OverlayViewer, ImageCursor
import quickdialog
import weakref
import viewer2svg
class Crosshair(ImageCursor):
def __init__(self, *args):
ImageCursor.__init__(self, *args)
self.visible = False
self.position = qcore.QPoint(-1, -1)
def setVisible(self, what=True):
self.visible = what
if what:
ImageCursor.setPosition(self, self.position)
else:
ImageCursor.setPosition(self, qcore.QPoint(-1, -1))
def setPosition(self, pos):
self.position = pos
if self.visible:
ImageCursor.setPosition(self, self.position)
class ImageViewer(OverlayViewer):
activeViewers = weakref.WeakValueDictionary()
def __init__(self, image, normalize=True, title=None, parent=None):
OverlayViewer.__init__(self, parent)
self.setImage(image, normalize)
self._savedExpression = "x"
self._lastSaveType = 2
self.overlays = []
if title is not None:
self.setWindowTitle(title)
elif hasattr(image, "name"):
self.setWindowTitle(image.name)
else:
for k in xrange(1, 10000):
if not ImageViewer.activeViewers.has_key(k):
break
ImageViewer.activeViewers[k] = self
self.setWindowTitle("Image %d" % k)
#self.imageCursor = ImageCursor(self) # doesn't work anymore - setVisible() is gone
self.imageCursor = Crosshair(self)
self.imageCursor.setVisible(False)
self.imageCursor.setPosition(qcore.QPoint(self.image.width // 2, self.image.height // 2))
OverlayViewer.addOverlay(self, self.imageCursor)
self.zoomInAction = qt.QAction("Zoom in", self)
self.zoomInAction.setShortcut("+")
self.connect(self.zoomInAction, SIGNAL("triggered()"), self.zoomInPopup)
self.zoomOutAction = qt.QAction("Zoom out", self)
self.zoomOutAction.setShortcut("-")
self.connect(self.zoomOutAction, SIGNAL("triggered()"), self.zoomOutPopup)
self.saveAction = qt.QAction("Save image...", self)
self.saveAction.setShortcut("S")
self.connect(self.saveAction, SIGNAL("triggered()"), self.writeImage)
self.svgAction = qt.QAction("Save as SVG...", self)
self.svgAction.setShortcut("V")
self.connect(self.svgAction, SIGNAL("triggered()"), self.writeSVG)
self.expressionAction = qt.QAction("Apply expression...", self)
self.expressionAction.setShortcut("E")
self.connect(self.expressionAction, SIGNAL("triggered()"), self.applyExpression)
self.cursorAction = qt.QAction("Line cursor", self)
self.cursorAction.setShortcut("L")
self.cursorAction.setCheckable(True)
self.cursorAction.setChecked(False)
self.connect(self.cursorAction, SIGNAL("triggered()"), self._toggleImageCursor)
self.popup = qt.QMenu(self)
self.popup.addAction(self.zoomInAction)
self.popup.addAction(self.zoomOutAction)
self.popup.addAction(self.saveAction)
self.popup.addAction(self.svgAction)
self.popup.addAction(self.expressionAction)
self.popup.addAction(self.cursorAction)
self.overlayMenu = self.popup.addMenu("Overlays")
self.connect(self.overlayMenu, SIGNAL("aboutToShow()"), self.overlayPopup)
def setImage(self, image, normalize=True):
if not hasattr(image, "qimage"):
image = image.view(vigra.Image)
self.image = image
self._normalized = normalize
OverlayViewer.setImage(self, image.qimage(normalize))
def showImageCursor(self, yesOrNo=True):
if yesOrNo != self.cursorAction.isChecked():
self.cursorAction.trigger()
def _toggleImageCursor(self):
self.imageCursor.activateTool(self.cursorAction.isChecked())
self.imageCursor.setVisible(self.cursorAction.isChecked())
def addOverlay(self, overlay):
if not hasattr(overlay, "draw"):
raise TypeError("addOverlay: " + str(overlay) +
"is no valid overlay with 'draw' method!")
if overlay.parent() is None:
overlay.setParent(self)
overlay.visible = True
if not hasattr(overlay, "name") or not overlay.name:
overlay.name = self._defaultOverlayName(overlay)
self.overlays.append(overlay)
OverlayViewer.addOverlay(self, overlay)
self.update()
return len(self.overlays) - 1
def removeOverlay(self, overlay):
if type(overlay) == int:
try:
OverlayViewer.removeOverlay(self, self.overlays[overlay])
self.overlays.pop(overlay)
self.update()
except IndexError, e:
print "No such overlay."
else:
try:
self.overlays.remove(overlay)
OverlayViewer.removeOverlay(self, overlay)
self.update()
except ValueError, e:
print "No such overlay."
def _slideAfterZoom(self, shift):
if self.zoomLevel() > 0:
shift *= 1 + self.zoomLevel()
elif self.zoomLevel() < 0:
shift /= 1 - self.zoomLevel()
self.slideBy(shift)
def zoomInPopup(self):
beforePos = self.imageCoordinate(self.mousepos)
self.zoomUp()
afterPos = self.imageCoordinate(self.mousepos)
self._slideAfterZoom(afterPos - beforePos)
def zoomOutPopup(self):
beforePos = self.imageCoordinate(self.mousepos)
self.zoomDown()
afterPos = self.imageCoordinate(self.mousepos)
self._slideAfterZoom(afterPos - beforePos)
def _defaultOverlayName(self, o):
name = str(o.__class__)
if name[:8] == "<class '":
name = name[8:-2]
try:
name = name[name.rindex(".") + 1:]
except ValueError:
pass
return name
def overlayPopup(self):
self.overlayMenu.clear()
index = 0
hideable = False
showable = False
for o in self.overlays:
overlayName = o.name
text = "[%d] %s" % (index, overlayName)
color = None
if hasattr(o, "color") and isinstance(o.color, qt.QColor):
color = o.color
pmHeight = 5
elif hasattr(o, "fillColor") and isinstance(o.fillColor, qt.QColor):
color = o.fillColor
pmHeight = 16
if color:
colorPM = qt.QPixmap(16, pmHeight)
colorPM.fill(color)
icon = qt.QIcon(colorPM)
id = qt.QAction(icon, text, self)
else:
id = qt.QAction(text, self)
self.overlayMenu.addAction(id)
id.setCheckable(True)
self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(o))
id.setChecked(o.isVisible())
if o.isVisible():
hideable = True
else:
showable = True
index += 1
id = qt.QAction("&Hide all", self)
self.overlayMenu.addAction(id)
self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(False))
id.setEnabled(hideable)
id = qt.QAction("&Show all", self)
self.overlayMenu.addAction(id)
self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(True))
id.setEnabled(showable)
def toggleOverlayVisibilityWithParam(self, o):
return lambda: self.toggleOverlayVisibility(o)
def toggleOverlayVisibility(self, o=None):
'''Toggle or set visibility of given overlay and update view.
The parameter can be a boolean - which sets the visibility of
all overlays accordingly - an overlay object or the index
of the overlay to be hidden/re-shown. If it is omitted, all
overlays will be toggled.
'''
if o is None:
for k in self.overlays:
k.setVisible(not k.isVisible())
elif type(o) is bool:
for k in self.overlays:
k.setVisible(o)
else:
if type(o) is int:
o = self.overlays[o]
o.setVisible(not o.isVisible())
self.update()
def applyExpression(self, expr=None, normalized=None):
if expr is not None:
self._savedExpression = expr
else:
d = quickdialog.QuickDialog(self, "Enter Expression")
d.expression = quickdialog.OptionalStringInput(d, "Execute 'lambda x: ")
d.expression.setText(self._savedExpression)
d.expression.setFocus()
d.addSpacing(10)
d.norm = quickdialog.CheckBox(d, "Normalize intensity to range 0...255")
d.norm.setChecked(self._normalized)
if d.exec_() == 0:
return
self._savedExpression = d.expression.text()
self._normalized = True if d.norm.selection() else False
if normalized is not None:
self._normalized = normalized
try:
image, normalized = self.getDisplayedImage()
except Exception, e:
qt.QMessageBox.critical(self, "Error Applying Expression", str(e))
return
OverlayViewer.setImage(self, image.qimage(normalized))
def getDisplayedImage(self):
"""Returns the displayed image and the normalize flag
(BYTE or NBYTE) as tuple/pair.
Note that the returned image is the original image if no
expression is applied, i.e. you should not change the returned
object. If active, the expression is applied via
eval() on every call of getDisplayedImage()."""
if not self._savedExpression or self._savedExpression == "x":
self._savedExpression = "x"
image = self.image
else:
for f in vigra.ufunc.__all__:
exec 'from vigra.ufunc import %s' % f
for f in dir(vigra.colors):
if not f.startswith('__'):
exec 'from vigra.colors import %s' % f
x = self.image
image = eval(self._savedExpression)
return image, self._normalized
def writeImage(self):
d = quickdialog.QuickDialog(self, "Write Image")
imageFileExtensions = '*.' + ' *.'.join(vigra.impex.listExtensions().split(' '))
d.filedialog = quickdialog.OutputFile(
d, "Output filename:", "Image Files (" + imageFileExtensions + ")")
d.filedialog.setFocus()
d.choices = quickdialog.HDialogGroup(d)
d.type = quickdialog.VChoice(d.choices, "Output Pixel Type")
d.type.addButton("Byte", "UINT8")
d.type.addButton("Normalized to byte", "NBYTE")
d.type.addButton("Keep type", "NATIVE")
d.type.selectButton(1 if self._normalized else 0)
d.type.buttonBox.setEnabled(self._lastSaveType)
d.choices.addStretch(1)
d.which = quickdialog.VChoice(d.choices, "Save ...")
d.which.addButton("displayed image (zoomed, overlays)", 0)
d.which.addButton("displayed image (1:1)", 1)
d.which.addButton("original image", 2)
d.connect(d.which.buttonBox, SIGNAL("clicked(int)"), \
d.type.buttonBox.setEnabled)
d.which.selectButton(self._lastSaveType)
while True:
if d.exec_() == 0:
return
filename = d.filedialog.text()
pixelType = d.type.selection()
self._lastSaveType = d.which.selection()
if d.which.selection():
if d.which.selection() == 2:
image = self.image
else:
image = self.getDisplay()[0]
try:
image.writeImage(filename, pixelType)
except RuntimeError, e:
qt.QMessageBox.critical(self, "Error", str(e))
else:
return
else:
formats = {"png": "PNG", \
"bmp": "BMP", \
"xbm": "XBM", \
"xpm": "XPM", \
"pnm": "PPM", \
"ppm": "PPM", \
"png": "PNG", \
"jpg": "JPEG", \
"jpeg": "JPEG", \
"tif": "TIF"}
_, ext = os.path.splitext(filename)
if not formats.has_key(ext[1:]):
f = " ".join(formats.keys())
qt.QMessageBox.critical(self, "Error", \
"Displayed image with overlays can only be stored as\n" + f)
else:
pixmap = self.getContentsPixmap()
pixmap.save(filename, formats[ext[1:]])
return
def writeSVG(self):
d = quickdialog.QuickDialog(self, "Write Viewer Contents to SVG")
d.filedialog = quickdialog.OutputFile(
d, "Output filename:", "SVG Files (*.svg)")
d.filedialog.setFocus()
d.choices = quickdialog.HDialogGroup(d)
d.which = quickdialog.VChoice(d.choices, "Save ...")
d.which.addButton("all overlays", 0)
d.which.addButton("only displayed overlays", 1)
d.which.selectButton(self._lastSaveType)
while True:
if d.exec_() == 0:
return
self._lastSaveType = d.which.selection()
allOVs = (d.which.selection() == 0)
filename = d.filedialog.text()
basename, ext = os.path.splitext(filename)
try:
if ext == ".SVG" or ext == ".svg":
viewer2svg.viewer2svg(self, basename, not allOVs)
else:
viewer2svg.viewer2svg(self, filename, not allOVs)
except RuntimeError, e:
qt.QMessageBox.critical(self, "Error", str(e))
return
def contextMenuEvent(self, e):
"handles pop-up menu"
self.overlayMenu.setEnabled(len(self.overlays) > 0)
self.mousepos = e.pos()
self.popup.exec_(e.globalPos())
def keyPressEvent(self, e):
"handles keys [S], [E], and possibly [Q] (for toplevel-windows)"
if e.key() == qcore.Qt.Key_Q and not self.parent():
self.close()
elif e.key() == qcore.Qt.Key_S:
self.writeImage()
elif e.key() == qcore.Qt.Key_E:
self.applyExpression()
elif e.key() == qcore.Qt.Key_L:
self.cursorAction.trigger()
elif e.key() == qcore.Qt.Key_Right or e.key() == qcore.Qt.Key_Left or \
e.key() == qcore.Qt.Key_Up or e.key() == qcore.Qt.Key_Down:
OverlayViewer.keyPressEvent(self, e)
elif e.key() == qcore.Qt.Key_Plus or e.key() == qcore.Qt.Key_Greater:
OverlayViewer.zoomUp(self)
elif e.key() == qcore.Qt.Key_Minus or e.key() == qcore.Qt.Key_Less:
OverlayViewer.zoomDown(self)
else:
self.emit(qcore.SIGNAL("keyPressed"), (e.key()))
e.ignore()
def keyReleaseEvent(self, e):
self.emit(qcore.SIGNAL("keyReleased"), (e.key()))
e.ignore()
def mousePressEvent(self, e):
imagePos = OverlayViewer.imageCoordinateF(self, qcore.QPoint(e.x(), e.y()))
self.emit(qcore.SIGNAL("mousePressed"), (imagePos.x(), imagePos.y(), e.button()))
OverlayViewer.mousePressEvent(self, e)
e.ignore()
class CaptionImageViewer(qt.QFrame):
def __init__(self, image, normalize=True, title=None, parent=None):
qt.QFrame.__init__(self, parent)
self.viewer = ImageViewer(image, normalize, title, parent=self)
self.setWindowTitle(self.viewer.windowTitle())
self._captionCoords = 0, 0
self._xplaces = int(math.log10(self.viewer.image.width) + 1.0)
self._yplaces = int(math.log10(self.viewer.image.height) + 1.0)
self._valueplaces = self.viewer.image.channels * 5
self.label = qt.QLabel(self)
font = qt.QFont()
font.setPointSize(10)
font.setStyleHint(qt.QFont.TypeWriter)
self.label.setFont(font)
self._layout = qt.QVBoxLayout(self)
self._layout.setSpacing(5)
self._layout.addWidget(self.viewer, 1)
self._layout.addWidget(self.label)
self.connect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption)
self.connect(self.viewer.cursorAction, SIGNAL('triggered()'), self._toggleCaptionSignals)
self.updateCaption()
def updateCaption(self, x=None, y=None):
x = int(round(x)) if x is not None else self._captionCoords[0]
y = int(round(y)) if y is not None else self._captionCoords[1]
if x < 0 or x >= self.viewer.image.width or \
y < 0 or y >= self.viewer.image.height:
return
self._captionCoords = x, y
label = str(x).rjust(self._xplaces) + " x " + str(y).rjust(self._yplaces) +\
" = " + str(self.viewer.image[x, y]).ljust(self._valueplaces)
self.label.setText(label)
self.emit(SIGNAL('captionChanged'), self.label.text())
def updateCaptionP(self, point):
self.updateCaption(point.x(), point.y())
def _toggleCaptionSignals(self):
if self.viewer.cursorAction.isChecked():
self.disconnect(self.viewer,
SIGNAL('mouseOver(int, int)'), self.updateCaption)
self.connect(self.viewer.imageCursor,
SIGNAL('positionChanged(QPoint)'), self.updateCaptionP)
else:
self.connect(self.viewer,
SIGNAL('mouseOver(int, int)'), self.updateCaption)
self.disconnect(self.viewer.imageCursor,
SIGNAL('positionChanged(QPoint)'), self.updateCaptionP)
def setImage(self, image, normalize=None):
"""imageWindow.setImage(image, normalize = None)
Replace the current image with the given one. If normalized
is not given (or None), the normalized state is not changed."""
self.viewer.setImage(image, normalize)
self.updateCaption()
class CursorAction(qt.QAction):
def __init__(self, name, parent):
qt.QAction.__init__(self, name, parent)
self.x, self.y = -1, -1
self.zoomLevel = 0
def trigger(self):
qt.QAction.trigger(self)
for v in self.viewers:
v.viewer.cursorAction.setChecked(self.isChecked())
v.viewer._toggleImageCursor()
v._toggleCaptionSignals()
def broadcastPosition(self, pos):
if self.x == pos.x() and self.y == pos.y():
return
self.x, self.y = pos.x(), pos.y()
for v in self.viewers:
v.viewer.imageCursor.setPosition(pos)
def broadcastZoom(self, level):
if self.zoomLevel == level:
return
self.zoomLevel = level
for v in self.viewers:
v.viewer.setZoomLevel(level)
class ImageWindow(qt.QFrame):
'''Display one or more images in a grid-like layout.
'''
def __init__(self, parent=None):
qt.QFrame.__init__(self, parent)
self.cursorAction = CursorAction("Connected line cursors", self)
self.cursorAction.setCheckable(True)
self.cursorAction.setChecked(False)
self.addAction(self.cursorAction)
self.cursorAction.viewers = []
self.layout = qt.QGridLayout(self)
def setImage(self, image, x=0, y=0, normalize=True, title=None):
"""Place the given image at the given position of this window's grid layout.
If an image already exists at this position, it is replaced.
"""
if self.layout.itemAtPosition(y, x):
self.layout.itemAtPosition(y, x).widget().setImage(image, normalize)
else:
CIviewer = CaptionImageViewer(image, normalize, title, parent=self)
self.layout.addWidget(CIviewer, y, x)
self.cursorAction.viewers.append(CIviewer)
if len(self.cursorAction.viewers) == 1:
self.setWindowTitle(CIviewer.windowTitle())
if self.cursorAction.x != -1:
CIviewer.viewer.imageCursor.setPosition(
qcore.QPoint(self.cursorAction.x, self.cursorAction.y))
CIviewer.viewer.setZoomLevel(self.cursorAction.zoomLevel)
if self.cursorAction.isChecked():
CIviewer.viewer.cursorAction.trigger()
self.disconnect(CIviewer.viewer.cursorAction, SIGNAL("triggered()"),
CIviewer.viewer._toggleImageCursor)
self.connect(CIviewer.viewer.cursorAction, SIGNAL("triggered()"),
self.cursorAction.trigger)
self.connect(CIviewer.viewer.imageCursor, SIGNAL("positionChanged(QPoint)"),
self.cursorAction.broadcastPosition)
self.connect(CIviewer.viewer, SIGNAL("zoomLevelChanged(int)"),
self.cursorAction.broadcastZoom)
self.updateGeometry()
# this call is necessary to update the sizeHint() before adjustSize() is called
qcore.QCoreApplication.processEvents()
self.adjustSize()
def viewer(self, x=0, y=0):
if self.layout.itemAtPosition(y, x):
return self.layout.itemAtPosition(y, x).widget().viewer
raise ValueError("ImageWindow.viewer(): viewer at (%d, %d) is undefined." % (x, y))
def showImage(image, normalize=True, title=None):
if isinstance(image, str):
image = vigra.impex.readImage(image)
v = ImageWindow()
v.setImage(image, normalize=normalize, title=title)
v.show()
return v
|
mit
| 4,147,204,187,256,811,500 | 37.799028 | 97 | 0.588329 | false |
eagleamon/home-assistant
|
homeassistant/components/fan/__init__.py
|
1
|
8794
|
"""
Provides functionality to interact with fans.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/fan/
"""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.components import group
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (SERVICE_TURN_ON, SERVICE_TOGGLE,
SERVICE_TURN_OFF, ATTR_ENTITY_ID,
STATE_UNKNOWN)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
DOMAIN = 'fan'
SCAN_INTERVAL = timedelta(seconds=30)
GROUP_NAME_ALL_FANS = 'all fans'
ENTITY_ID_ALL_FANS = group.ENTITY_ID_FORMAT.format(GROUP_NAME_ALL_FANS)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Bitfield of features supported by the fan entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
SUPPORT_SET_SPEED = 1
SUPPORT_OSCILLATE = 2
SUPPORT_DIRECTION = 4
SERVICE_SET_SPEED = 'set_speed'
SERVICE_OSCILLATE = 'oscillate'
SERVICE_SET_DIRECTION = 'set_direction'
SPEED_OFF = 'off'
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
DIRECTION_FORWARD = 'forward'
DIRECTION_REVERSE = 'reverse'
ATTR_SPEED = 'speed'
ATTR_SPEED_LIST = 'speed_list'
ATTR_OSCILLATING = 'oscillating'
ATTR_DIRECTION = 'direction'
PROP_TO_ATTR = {
'speed': ATTR_SPEED,
'speed_list': ATTR_SPEED_LIST,
'oscillating': ATTR_OSCILLATING,
'supported_features': ATTR_SUPPORTED_FEATURES,
'direction': ATTR_DIRECTION,
} # type: dict
FAN_SET_SPEED_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_SPEED): cv.string
}) # type: dict
FAN_TURN_ON_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_SPEED): cv.string
}) # type: dict
FAN_TURN_OFF_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids
}) # type: dict
FAN_OSCILLATE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_OSCILLATING): cv.boolean
}) # type: dict
FAN_TOGGLE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids
})
FAN_SET_DIRECTION_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_DIRECTION): cv.string
}) # type: dict
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id: str=None) -> bool:
"""Return if the fans are on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_FANS
state = hass.states.get(entity_id)
return state.attributes[ATTR_SPEED] not in [SPEED_OFF, STATE_UNKNOWN]
def turn_on(hass, entity_id: str=None, speed: str=None) -> None:
"""Turn all or specified fan on."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_SPEED, speed),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id: str=None) -> None:
"""Turn all or specified fan off."""
data = {
ATTR_ENTITY_ID: entity_id,
}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id: str=None) -> None:
"""Toggle all or specified fans."""
data = {
ATTR_ENTITY_ID: entity_id
}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
def oscillate(hass, entity_id: str=None, should_oscillate: bool=True) -> None:
"""Set oscillation on all or specified fan."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_OSCILLATING, should_oscillate),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_OSCILLATE, data)
def set_speed(hass, entity_id: str=None, speed: str=None) -> None:
"""Set speed for all or specified fan."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_SPEED, speed),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_SET_SPEED, data)
def set_direction(hass, entity_id: str=None, direction: str=None) -> None:
"""Set direction for all or specified fan."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_DIRECTION, direction),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_SET_DIRECTION, data)
def setup(hass, config: dict) -> None:
"""Expose fan control via statemachine and services."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_FANS)
component.setup(config)
def handle_fan_service(service: str) -> None:
"""Hande service call for fans."""
# Get the validated data
params = service.data.copy()
# Convert the entity ids to valid fan ids
target_fans = component.extract_from_service(service)
params.pop(ATTR_ENTITY_ID, None)
service_fun = None
for service_def in [SERVICE_TURN_ON, SERVICE_TURN_OFF,
SERVICE_SET_SPEED, SERVICE_OSCILLATE,
SERVICE_SET_DIRECTION]:
if service_def == service.service:
service_fun = service_def
break
if service_fun:
for fan in target_fans:
getattr(fan, service_fun)(**params)
for fan in target_fans:
if fan.should_poll:
fan.update_ha_state(True)
return
# Listen for fan service calls.
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_TURN_ON, handle_fan_service,
descriptions.get(SERVICE_TURN_ON),
schema=FAN_TURN_ON_SCHEMA)
hass.services.register(DOMAIN, SERVICE_TURN_OFF, handle_fan_service,
descriptions.get(SERVICE_TURN_OFF),
schema=FAN_TURN_OFF_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_SPEED, handle_fan_service,
descriptions.get(SERVICE_SET_SPEED),
schema=FAN_SET_SPEED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_OSCILLATE, handle_fan_service,
descriptions.get(SERVICE_OSCILLATE),
schema=FAN_OSCILLATE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_DIRECTION, handle_fan_service,
descriptions.get(SERVICE_SET_DIRECTION),
schema=FAN_SET_DIRECTION_SCHEMA)
return True
class FanEntity(ToggleEntity):
"""Representation of a fan."""
# pylint: disable=no-self-use
def set_speed(self: ToggleEntity, speed: str) -> None:
"""Set the speed of the fan."""
if speed is SPEED_OFF:
self.turn_off()
return
raise NotImplementedError()
def set_direction(self: ToggleEntity, direction: str) -> None:
"""Set the direction of the fan."""
raise NotImplementedError()
def turn_on(self: ToggleEntity, speed: str=None, **kwargs) -> None:
"""Turn on the fan."""
if speed is SPEED_OFF:
self.turn_off()
return
raise NotImplementedError()
def turn_off(self: ToggleEntity, **kwargs) -> None:
"""Turn off the fan."""
raise NotImplementedError()
def oscillate(self: ToggleEntity, oscillating: bool) -> None:
"""Oscillate the fan."""
pass
@property
def is_on(self):
"""Return true if the entity is on."""
return self.speed not in [SPEED_OFF, STATE_UNKNOWN]
@property
def speed(self) -> str:
"""Return the current speed."""
return None
@property
def speed_list(self: ToggleEntity) -> list:
"""Get the list of available speeds."""
return []
@property
def current_direction(self) -> str:
"""Return the current direction of the fan."""
return None
@property
def state_attributes(self: ToggleEntity) -> dict:
"""Return optional state attributes."""
data = {} # type: dict
for prop, attr in PROP_TO_ATTR.items():
if not hasattr(self, prop):
continue
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
@property
def supported_features(self: ToggleEntity) -> int:
"""Flag supported features."""
return 0
|
apache-2.0
| -1,414,916,165,946,151,400 | 28.911565 | 78 | 0.617921 | false |
mathiasertl/django-ca
|
ca/django_ca/apps.py
|
1
|
1049
|
# This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
# see <http://www.gnu.org/licenses/>.
"""Default Django app configuration.
.. seealso:: https://docs.djangoproject.com/en/dev/ref/applications/
"""
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class DjangoCAConfig(AppConfig): # pylint: disable=missing-class-docstring
name = "django_ca"
verbose_name = _("Certificate Authority")
|
gpl-3.0
| -834,903,598,605,759,000 | 40.96 | 98 | 0.756911 | false |
wereallfeds/webshag
|
webshag/gui/gui_images.py
|
1
|
223034
|
## ################################################################# ##
## (C) SCRT - Information Security, 2007 - 2008 // author: ~SaD~ ##
## ################################################################# ##
## This program is free software: you can redistribute it and/or ##
## modify it under the terms of the GNU General Public License as ##
## published by the Free Software Foundation, either version 3 of ##
## the License, or (at your option) any later version. ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see http://www.gnu.org/licenses. ##
## ################################################################# ##
## last mod: 2008-11-03
## This file contains images generated with img2py.py script (wx.tools.img2py).
import wx
import cStringIO, zlib
from wx.lib.embeddedimage import PyEmbeddedImage
splash = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAbAAAAEhCAIAAACRFmKbAAAAA3NCSVQICAjb4U/gAAAgAElE"
"QVR4nOy9d7yV1ZU3vtbmUi5NFJSOiAhYsGFJNIpimxGNJRkHNRlMr5Y4ySSvJrFMEnQyToom"
"v4wymk8m0XlNIhqj0eiMGksUEBBRgUsR8NKkXbiXcuGyf3/s1XY55x6K83n/uM9M5J5znmfv"
"tdf6ru8qTwPo2Dq2jq1j69g6to6tY+vYOraOrWPr2Dq2jq1j69g6to6tY+vYOraOrWPr2Dq2"
"jq1j69g6to6tY+vYOraOrWPr2Dq2jq1j69g6to6tY+vY9mlDRMy/BPC1Ha07yp9huPaOj45E"
"n+5fswTVh25nh/AHIvg9nqx2AfdlKXt6cMXd4x/sp30Sr+o0lT/UcnS7X++5VDULVOuMyX6F"
"j/tTu+WxajR5+/uX9oT9CY7aJNkrjdV2UE17YSfnfG2IYQWVWLDC4DHVEP/QMIWx8+8LYwIC"
"gCesFQWgL6tQdH6YyFZ9t2pfGw8o7VEj75cPraaXPfm1ZNT2Jdh/bo0I3icwM2ohwOwZexbp"
"vQjpfRN9/5FDOlQ7VmE8J7rbUyos7JWmBkWd7vHK20l2KsoqfmqPqhhWwk6px9ae3FRy7k6d"
"OpmfEdH7asapPn7t+0I1Jqu8KoNzSGivJGtOcMC6TiexkKg4TPmg+GN7OqjI/bljF+GUhZV2"
"8j0ZJOeIivyfxr4qoSxelnEEyFRlli2aKuE9VmNODogcESuL0458ZqLSrKyzygGyoqebYYqR"
"BVJrkQbCZ29+K01QDJdBHWXMVA7eNTFVqiEVURKTdJaCotvLm/IolkC2+haJGv2jKrGrLVGp"
"TI11dXVV5C4F2spALPGFJSH+CouaLHl24i8ZHybObTKMas6eWBqRDeJjgxR4gEARfVGKYPFa"
"yuArMWCZjwuxsDRq7NOx1KkKKxJjthh7cGTgnLj4d7sfsros3NGLdEGBCOD18LC0TBtpwInl"
"DgkUekPFVqZMzREhpSkJemQBq6urYK32nDidLV2awg7Bp3EDY1XaSil1/Mp4qCJuDKsUZDUF"
"ITEfjR3HVEFL2NGXia/AialHE67sqlm8CjYgGQypJ0wMANi5c2emEQQP5ZqlxL5GKpIo86tI"
"gIxbrXIr0HXZ55VjVcultRVUnHIzUHyFSrKUuA6NGUu8W45IqTQKY2tMyFSsjJIPYDQSkwSL"
"UIgxBRhU0r79OQ8OaNQPOdFYbeQmySYhmJCVKtqwpKB8CUB2DSpIbAGF+oMlDfv6xPGzJWj1"
"mruEkSanExJIjkz4PJJGYMa0qEuHKGJHk9KxJchkrpgdY/LVaAKLAIzCTU4KBTAV4igYV1ZV"
"a+jXHyuzb15J6nCWGdrP5Xk89B67dunCJBv20uUanWjoJ8QAIoKP0wFTbxvEIABAFAcKpK/W"
"IJfg4w0OiHvVCKg/pEqX0RF0rCRKUdKPrDjjCLILoo/kqKBdglCW9/CIhiBVWxFrsJwFdrXq"
"j+kjdsnS7OX2Q2ExxtoQmSkxv9qgOGQkOkDmPonbJFYxpUMcFNWEYaYoAWVE5mEgARqykGnC"
"FUG3gs4yLlUlhjFZhQgAiN5DygLAKY3RiSGoKHExusxUFqRA9RCTH7HP5b4Z4TFh7xJt27lT"
"3oUkFMiYeYVWglrsAWiGFwCCskZsjsS0miYyeoDxIgLLtAIUowjdhXSJXbt2JSSF/JUiq3iX"
"KreAemRu9OBj8jLObT4gog+hK/YUoPVAhCE08VSgZkOcOJDRcQCl/sBAj42a9fbJIcPesTEB"
"EmCgoWANZ6xg9XrPXh7bM2MqzShJP1n4F6Chj+BHX9sMSFZuaIZYPewQRGP+tGICYcC6IERK"
"MuIKxydBjY0AuspsGA07wGkhEDAM9ZOCLQtjLk1Ke2wbGieiQTu2YSJL/BgpNC7cbCUfN+7A"
"7GKSTB+t34BODWkMa0lTEkRZEjLcVCLgJkGmWyx6hY5ZjhrCL2B1EQjY+lSMIdEkrxFEzaV2"
"vSavCduqWu0Kpd+SBCyVUg+IBuMwZY1jYidzXmjU2D3ru9WDTmbtxcMG9XkJvsAAFQOIlwLL"
"g6RFpUNkYwFNZ0okm8fZUcTtI3aN9akf4oNZc7p8XjwQP0exTahEfCRhxUjVPuU6xbbhN/E0"
"lSkmRKYDXS7apdkmjBgTrTB2bjYFWPipzi2gNGO1tRUTp4Yh48MZkxuvymglsY/BU1bBGjdj"
"UIWZRXdotRPFIFkLcPdQ03BaStC9txQQJ3ox+RuMqxY9h3Ew3p4ebkW05yUlXoJ2u0xck0gI"
"ao7IbRPCYEwggPVaWyFHACEuZR2HFCPt3yuHxmWGXa7mfhynjAkVdGzetPMhAUX+T+OGdn2D"
"TyF4E5rB+pgOLARk/cI4OeV05q8kvlin4xjqvQfsXl8PHAtI0yEeBRakyA1GGB1eYk4cNkya"
"YWkkLDSNKRAbLw0nwqZeEoUweUy0+gdbJW7YKrHbhoxNCAxVooKbrGrOFhi9BjGA7QHIFKKs"
"GVvTRCpxMNEtmD9ttwEYWZwjIMcsk/pajrBdCsUjoCzGNjkAIv/NaIxNFHluVFQJtM1hUuWq"
"tgkPXkuBON/lLDFA2NAsrw2kGkY1na5YP5hVRDHPeJcGWUYC4QOYOyCKZMQNYhebfHIUMchW"
"q9vgAWaTdYVFUZQ3SWjSlte5heTtUNoet3mcWZiES4t51WVcfRt2VgqOcmUGFOfvyihMIHy8"
"8p3muWCwZJhSom2U0FqqlhBoI2qSk4gncQlneJatxjmV+DVyqAePPbr3ANEh/U9FDuzPbqsB"
"KczNVjAG1HBhoOZRp2BiB+Z/gzLL26nNolqOB4pJlasri235FHEvKJXbUyQMPVkBqm3V51l7"
"UYYk6TeTFjD+kuJc6za7PkWThEuNAsjq4ZUznaBVrARIa8ikxpK4AsaUUTFpOMpGedOKALOr"
"/Mrwy1w/XPdvGxEaFcQkkf4ZeVEZQQhX2pdsK55VdMbDkYyS/hu3ChRBvxm1eVGeJmuYyhOn"
"JCDpn61TZQu4YIeMqVYoW1aouoOoIrMAM79HDsX0aiKzpGOSzQAAyEkDOQQ0sLEbUXAyzmpr"
"ICFhUZsJfYaXvegIFECSISkomSvNyELXqU6ipoiBZUQgTJA2eMTepXZQ0/fo0ZPNKXLb5bGt"
"OBqC8ToTbiAyDtJEJorT2iJeUQgatjQLFd1rUGLsxs0OMJFE+RnAmsK6tCURWqcnwTkHUxSC"
"jXhRbETmTi9IMH9FziGOaPqtxlSS7KNRsEZGLZRYbIGpqVcMVxiHldNCnOXwTmpNMR5XAWgd"
"yNQppH/KwMWWZjFcA5lmrLG8uLdZvPCXfp2VgLJMkAGU0b03ONcDjD692hs49FAFlFbIJt8x"
"KNcECFgnCggGpelbmTVptAR2XqN2UwQYUUwFJXzMcwj6aM1xvcdEoEyDRjYLTbWRxkTxZPVF"
"G6zEcTWbk2rNC/PIvuKXwPkvH4sso2hBcm7Dd54LVd3Hkr2hgLgXYXIcCV9SRWvwkSQL2dfB"
"IwD26tnLuoiIZUHsLZZEFbxu24+KuweMf4y+Ih15s5Pq0No4oAHjMA3qG6x6tJ4tY6lFJAIC"
"mS22lEWmwFBti+rrBncsIKOSdMqxj3JsTfCMygWeag+ekAMq8M/qMkkdKl4chTBLvUL4hi+S"
"roQQLOOav5M8gxlB0nGmK27ymIRF1aehsWAjE9xBqBI45yCCAfF/b0KIziNL5bAABBZb9EmL"
"R6YVnpDdLL0J2wONqWcZDf+CyQfi1RqWNBGOQAeaq0nyKeqXdQhIo6aABNmERuyqTGBgLNsS"
"lYcIvEzfM0tLuWVyD1Bt2mxMQA+8fhOLNO6yydgAkosgu7L3yeFofUQ0KavXrgmYyQBN4suc"
"aGKhKpjdS5xUsWZqbo/Yq1dvk+GRwF5QCTq4OIgYjzdNwjh8mwBCazZCaMjnryWAVoifSqUs"
"QJgE1Y/YaqazF44X9+b5tKrVgZUjTFIvUT7MSUmT9Hu46gs4pzUaxkSDcVa3IVcwKvLqfFoh"
"g+F/jdzaw1SnDuIJx0nLmoSWUAoSe3lFAkDT6bayCJsAiE6NHswfxrkE9UwejG5JRjjrFa/Q"
"T5yY2PEZh2QOgbE4PogJxYfFN2UVOiXpy4u5uS4QGHmFAjCqeD1Mv2b6qHIRoo3qHT2hIeFQ"
"Ypisk7NW9jCvHYekPpHMOIKroW3GGtM8SiLC3RY2OSOLeVz/5pWRmJJDRyVuzIcQFTnqOZyk"
"gSBX0RvlpuIEnFIYDuRET92AHcpEJB5TLS2/qMcbkICxCQBg7969NeEBI0o0MLD/iL8xZlQa"
"DZRe7SamN8lSUKpWRmhgxNFFjkIFPpqiADjbi4hLoxAvXkO04ABMLJHLLyVgshNwUcjmZKcW"
"06BXVaBZhWUmFlRxysBlrxQ60lHFdDqq9CY01qgNJcoISSGHNIlGGnXkIKZRBb1SlC7PlDcR"
"hRXwZL1WmtiWOzTkcIbExJaUa2zAJM6wm3OSBbJOmsNbmzOVIgc9LcgUJVKXKYDNSqTVoJFO"
"s9Ls9Bon92iWBKx9/lOVRr4lq+UPSkfA3EhCETsaeBp343SBgRzV0DbjR7RyAMcLMynD14Ry"
"GcVgOkrGuGbxli1B1Uzey7GeyVOyZ1tvqG/JJiZnL5esQ5NYy0TKI5rTA+fLSu6GUoFXf0Cf"
"PqwwQbBxADYAiAbsaWVjFZuDGo8Dju7G1MZd2cycR4t366iaVUWnh1hQCchiNm8cAWyexnJ4"
"Nrv16GhT6maYaVovKQ+fBQHxE1N32tAAujRWm6SFEUOaEstqUWznGRx6KYFEPKnT2Mul7BPa"
"M3GZcGR0J1SiNpN8XSCEHDElOpgILq4ICkPmAnEuk8NrwqMsxLymqYtEF0KXosnLsUZIDn1W"
"gVFir50jie2cFCuk1SG12pQ0mHWnBpGl0AdJyiUoMWGAiQTGS7ywneY9QVqezeY8Yn1dpDdz"
"mfwAVHQWnsOaAQtrmKdV39XrTTWYCzA4htqMQRMG24oQbBovYpIHLSg4eHo2msZW4GBnEKe5"
"iCRz7AnCydxGoHSW3d60h6R9QDbHA/scaBYq/Gf1bajPy9DspCB4Yn9Sd1Qfs3W4gJ69B4yj"
"gGQ2ZlyTh/MPyome3EY6jELS1v4MJBWYRTBxVw5jU0eDq9gAkTcrBOJ5JCIqQ6LwnwYnHYMd"
"NvJ+0HgvsEUN1wQeUrLamb0zCloaOEEv+NCYzstVCKHBl2IOZFjpeRjdmihNAAYQpccxNk6z"
"rAWQF6M1N0azkXdEIUBGN9mnNmRAIy+YhoagUGODtmiB1YoWTYY+lCxkBylIwNiedSbm4TKe"
"Fsf8B2o34U8JHyjSGvUaUmYqB25WmnoCjZNIzOZUg9ULjF7NRzUsSsxhDUpq47m8DfKohgX4"
"jAjTLlF2BFPDgZZywJk5sNbUFWQXIbJisSPwA6MuUaxN3sUmeOCBBwFoK9TQIhGWMh3TtxFJ"
"KcHUQd4Mpayq3g0US5TlTCNQ2FxijEZDhjoljAbHEtQk1mbRVzIcEIWIodnPBSTGnb1dmzKW"
"rEI6AvwrcABjr/FqAMZsvCLD7UZfbDmGLeNYAeLFA8AWYKZUMgmB4X2T0ihl2IIAmAFRRTap"
"kipOGECVZxbNmxCbHqCYj3JiE8jF1Tii2WjKAxv9mcSMlWxAqv6n6QWLK8mgIVGWWpTH44n5"
"2PJgXFwZmS1o5Y3baKoSgbDiQAtGztvMwIpk/s3EIaM0gxxWodFcFivA5FY0Q5KKy8KEJqXc"
"Ss+JkcOki/PeABWYjDWOcUXNA2nuwNAzYyjLm3xE9RTtrLWR9HElTjNIg7UO6ts3CqvGKWNO"
"Y6cUO4hXR+szkQUiicSFpUq2fCVNmJjfSE1e8WtqLVY7+6PSEkV3zZlV896sCLxRCgum2bBR"
"K0j8ZvBxnq0GZZnZWNr9lGQ4ymrFUnwQmD4LQyj1Ei5zZU0A5rOomZvoGmk5a4hSNeDZTf4h"
"LEEDolo9xoSWdwwQrazUdryoOAhE8LHeAKJQaQIIG9uiQ9DI6kP1nbzhg5FMFl8KVflkcwpD"
"vnZhEi1YGrWimVPNqsu02IrEjAobiVmadWuXKXYN2lmg721OZZSYLj+yU5zfc2LKC4gihnU5"
"RqtRUFSkGmrXVq6RlhcO1qXAiGjLSnMOyPCNUXHGXGbZ9gMPIOlDwAx5P/br10+TDhNIbJ/U"
"5Gwms5IcTqKlNa6pBZGdJWVeSGK8DYdR2iH+o3EqpszI/BCpi9YCloVVyhhg1lyRjyaeZ2Ia"
"kPEF4iZSyT5SZxu0Zq6p1xgw78pabViiwBERo6bB4ohRRFdZFFvMSDa5IWnVpFkEEZgkbAbx"
"VxpsGDtZfompaIoEQ4oKPdnNC1t4C/JIoQmGbA0cTyue7tn7sjBno76PtC4jaFLFuGN/lPxS"
"LW0TXuI/07QINKRZPZtEkmZQaYApzKoiCjW2GNFumTKZ+LE5l2bCn1JkpltQ6xjbqGtGwUKb"
"tYItHUthaUKkshXNzYNykKC9Yq8Wn1WhTGhXTJkDQfcEQPR4cL+DhZG1thNvYcMnwLMTpk4H"
"luLALodRb5NGCzq7XkwVJpgserosmU3nibysFHG4jgk4qEQVnHAbk1+EiEimTDd2M47OrT9b"
"v0Tjx9PEP5ioG6FQ1hD1ByLKUnazS1PEc73jM1Wp/oT+OcwIo0ajB/RwEQcgApcZWolYgrHO"
"aJozEC/KjqZBmDXKk1vbeMzQairYiLQMZcj/0E4EYAN2QkOSgFiGUDrweqxaH+xgrDMWBW00"
"IhLloMIHFUhchTEZq3qGYXv7ky3vknzZjCrldcEexsNTXzK1SNopUhDbBpBAIaLjjAMBuH4w"
"P8R7664QrdpYEBAPOeSQVHLemZRuyym2XwYtXV9qZW9Y1TJ9XG4DpIeWeCVRR17oRfwWBks0"
"KJmrxT4TaT5n6leFUg8MD0FiDbABw9iWlp6mdRBLH+WwJknWUXjhVqZkehoxpyPZM2ET5iX2"
"R4kT8QhxfiCFZxwf0jSfh4wXGWslsh/oatWgUbeKJwLMVweGR1LoSNPfJz9D7AjRYHH/xxsJ"
"SpEk/qRxJ7hvwtaqBe092PlsoMhdMvJ/iDwoihsJulLIRmYVVpA6CeNjVSkyWpr7snYiCayS"
"gflA2Nyc0EADA46JHvT8WYVN6F15VaY2qI2cR3oL/fv314Hi1C0aNyGpApaLDBbbI2qkRXMa"
"eUFJPLGyjgjGOjE4QLqG9KVYSjzIjKp0Q8wAYEJizDMRCo3h05UmvTAbGfL8xoDN5jc5x5sF"
"pHxjlRAtLi9p8viYkVbEC9EyiwKWzK8lL1iGjBVQVAcYMxjjGeKK5lIZxHkTNkurDwAzJyRW"
"Aql/459iIc00EGFXFJgAzHTdZJS0wQBKKWm8zcKLlZ67YCB0CaZ9irJX6kSaBFq9q7Uib0/E"
"iIk/HqAErbTPENsqxa5dZ4zJSLa4I2f3i0WkHaKMRcNIbFnwADhgwIBEzsQYYHk0HyM1U2a1"
"aP+EHQsyETiAdZhxYmwHiP8s/p7sZO1jkFhx80nDIRqZjBOmxDgdNH+q0SNrqI7TPEnmtsdV"
"kJJpMz4Vy9wiA2cTR0UJlJP2FM7qzMacyZ9FrlT3jPrkzHwxlCuDSF03aC1Oc1LP5YI4kT8G"
"Yj5DFkji6c1oNprbA6zuiwDLQCQHRo5WUkTKZbzSmAKy8il3bvNnppIYs5ksyak58yWkirWI"
"yBQRZZc2jJgGeixJJmU7nytQDNr9rTZw4ICBVegkppHEToYRIR9ZGy9Rsp2EGZ21aK6kQxzR"
"YnHpCSqhHDqK7qCeryxXyXGKJVWlOFV9a3eviGNYwGrJeSJBZstICVnASdYXL0r5CpLRSsxR"
"IClKxfNzq7ExS9qtRJIxGancufcFMNkVVVBVO1sFrsx+zz4VSt1qureHV5wrSqfKe+laLQPY"
"FSeuVdvs7WmrAh3lU6QFeAy19ORZpn8lzmgVScadskEUhUCiFg4aNNBGlBogUcHcuc9kKi6E"
"tBwOxsCV9inOSwdD6cxj4biEOcusVwnX5VEr05K5EbDSCNVkrrB70Yfbc4lanb7mnWtcyp6t"
"uGiVIqayofJTz7kUBfBU12PNNqnld1/qt7S7JQ5vJilDuILuPQAmpFMk5YqGK3tjcgKkPcOn"
"pFQOgJCn/9kY1WFaW8iyGw4ePBiKMpWCRrUkoPLWvkrDqspl5d5vRfGqVGO1wKP4udKXteml"
"Nkqzv9bCae0kFoV929+1Ni71AJC2hdpdVZUaee9kiSDUvkr3aO692qrOwtJWC2dll6wydi1K"
"/d9Z+wewVY0A1feu6ql1FKuyi2ciVWLyr35MU7LSlv6cc2oQASsesXdbUeoqEKmwylyXxSEK"
"X2IVDOsR1dRTnqYW9ZQHL4MGsz+qjlnDzPmI7axqjy3e3gElTe/ZUPuZK6qOZaGXz1sCZg3r"
"a1+pexyG2hlt/0TUmnbLd6h2SHva0q/qisk7pp98bXJV27O8T5Fi2zuslmmSPbNDah8jOWov"
"D9i7+eKR9k5gq22sMkZmyrD5fRfdDovR5RiZBDVMxPcDfNBbzbZmsY34+6KymsPO3sxQ1fv2"
"aUM911VxSjT/rSJ9kez2g8VrGaUOnThJvr/lkxpH1GhWAbYY/ZnvUcFIITHz5n9GszRQ0dsM"
"JWHs3lFGWhC1ndXypMlehaNITmyHD6rPoyuo6QBFXQWiqyJyJCVWwXrloSpHoupJCba3Qlpc"
"Ta5cWdv7ibvscvlz+tUeT9E+RgR5Jey1O1lF1SU5B1jXSsav6DDFwbhoSGWtKkH7YifEGi8+"
"8k5+9llZkmir4wsiK4iYsEbuGlW4JIdtbslYh+h1TdG/5oO9RR0QwDvJsZFzIMaUSXPscgx8"
"k/ZZqtBKgUDVDYX18InMYjjeoyJFqRRiEVUhCPLEGD3OGX0BePUxfmhMvI4sWkRKTpaYHpr8"
"bbWb/JFPmvtvceeiIgsj7gn7Ot23kpULX6uC5JIRMrmJ/wWvc4WxjUPHRgWMhfO5ohxYwMW7"
"JviuvDZdRAJoHZBvE1LIR9wfLzqBTFaOuCTQlsi7YNz0qPIB6eKtWQEACz3ADFsIUIfojDGs"
"KXM2AboeTxXBQ6YieuCHwqhLs0fSCAiQPSqKOJQb8iwZejmQhohNDWg8nvd2XiHDD+0wjKwL"
"yEBQUDWAkwPkwl8besxdOqhSKAFX4pJYrkyJ4evEPdSc/CViNJGwcqQiXQqpKCghJQYe1hm0"
"k+c48BDBoaS2lLyKhQJaUQv4iT46vsGwPEMwvQMA1k1Zmxmr+nTiilwbLZS1EBxHkWUTC4aG"
"nVrghKLGIKwgJZod9UC9VyHx3nytCT0I8AOzWU8nk4bry4w2+Dp2dTfxVkCwJ38RwDvzvNdc"
"Ipda3wRaHYSdJY1pRnRrjUiPqsvwTbiHKSY6u+BUZWgXgwAeDzvssHT61Hslb2OmA8k/0dv0"
"w1oiXUAhEkQMVpUyBBe+crhg5kWzOm9/APnGHh6RccShEMBqr9UqR1ym+AqdrdwZi1owTJ18"
"SzdtFEQWM7OOrUvJ1/IxFrwkUypPwtj6rTylKfpF8tUKw5t5FLkVfy6BINJGJHammngUzoYS"
"EIjc3qolV0NRSIAqpiW8Rs4GVGPlzhBlD/Ivi5OAXn7jtcnBmvKVxbHmtJaNjchS5iqtqAkz"
"BLAXWqLyPh3crjsayPgCRKa26BMKYrNWaysjAEjRXPBdKwmOGDGiQAU+Oowf8uftoiECZgS7"
"eKacsiNZo2ST1wjGbjxkxd6KQqRE6gnA9QYCS/AQ+YMRjB6VUAJYhVlLDCukr4uRDxGQUmVm"
"rp/JYHGsciGvSJoHqryYZ4BiTWxLMm7MH5GFDI1EZG7nQgQwBb3OwsGGN3Y9Hg/BpCiWThQd"
"kcKs4Lz0GE926jLXGV3qTDGdilUz9zNAlSBk1Z3wru7qC4jMWTpLA6pQK01txqU1JCSQOmbB"
"vRhKYL0a9ImFancDE7mYWaMBT14osTNzRO5qVqSwtlE3lZeXahcnrqfP9EnCBs0JCN7j4SMP"
"F1sYQ0YUrwwIYshI/Zy8gbylxKdatyuLPNO0Jc3Q7Dwiq5Smsa58NFJwszyegs1qDdiQr340"
"HcdCGGG0WGMp1GLKSWXSCKaaipyVlm7IR2wTgVge+ZK6Me2L6vAmf8sCQqRke1CiMUCeK9V3"
"Aln1GmMiNXnshpb4tN2gLzdI6Fq/M24dZ5eGW20ekmQeDIs48wHzQPCsC2SEEX3a3wzvJ6tk"
"yoxpTJ6Erm6alTyRns0fdioIBgYN4lHpZKKREEBMPYbTA5cV2SUxEuMsykrZOY3u7XKMXnPc"
"IQjLgX3wXYTaSEkR+gicumoE8Hn8SjVpaS0LckHSkSNHRnkdCyr8zlgHlUeYUfSuXkCya6sf"
"MutmdMoK5PWY46LAkcAihY98BGJTFg+imaVpwN0TAKNJC9tofCbPoHm1WOx3sjSf5CrMd2jk"
"iwnA9B2lX4/AdgZu/Jl8WbjY0riiynJ3XMJYI4qZDR/boKDEHiFAHzHIXaVEc2YY3juAKzIo"
"GAVGdBMbNCb3VEmmeonGSsAR+kUhMtlUhYfiHyH/kZHjc81EokaOq6whgrDOkmVDonJ+eJiP"
"zYhGcpEgtoyFv/Ur6436jw2LcQzgtrz1fHNzMQ3hLfUQvk1KaXI5G1okYkBanhUMF3fUZHrJ"
"0s3DzG3LG2zazWOxIInfRikZjjriCGDf85YxmQ0jHzEZDe+Hdi4ZVmhRnc50bb2JaLI7w8XE"
"NZuYJZFGn+VHBwFPGcISEttxbecDBow8AlkVi/HDgSmhbvUOM4o+psZbEzCxJ3YTeorQzPuy"
"Iq2B+X0e4gCMNywpkqtOk/Qwo0k+xj9FpK8pjRoVTHiweDJvU4jcQtAitC0mZUgz70Qno0wE"
"ZUkkyZJXimTG4D8lx4s0p65jinZD55COJ+YEAxozDHUQKEQxNwu7FLI9axFJiphubFYvOwM7"
"M7tBYHBOUKxTmsFRtCFiW3sr5oGFZACKbmQYAVNUrjOowUP0q9Wh+m6OJQIAACAASURBVB6Y"
"atm6WaJxjqasSYtxcVkdTgex/UBRh2Zp4u2G6UIyoYGBlKIrVHCOGj0a+fAoh1VeRRMQYggA"
"w5tJyOYZkjMYoeWppOqpJggIxjkOqnKAO1rMGGxpZZCgETCRWHsG/BOLTao3wUsQJQFStcIO"
"I2QDbCTTNhIp9Zon9nwdNEwgzyQXHQiCBdxRD1gITFzHoFidgDGCoqUE06zRFNZIZhbvELSZ"
"YGbLgNhYqjJb+6j/M91yfCrwFRgjaRgDu9TYqKBTChkxWAPY0fpN3oZj3JkgYMppCfNCzcaG"
"Ph4iZ0OBitiIY4iQgW3BRCkMpGph8cBkAExyahxuWIE8KI5BaB8c50GbN6ZJYKlahxYHUMlU"
"4YwYHszmXWgkVJPa/1omj8nSGEj/I7gFO5yaFi23hTVI3ilPKgYAMyVkc4fhcfSY0VG1o84N"
"xHJC+EZaMO8WEYdHjiPAyg8jcn0vjRxp3SjXcLygMZAHKEVBO6W6rGoR1KW9IF0MZdOpjGQN"
"NVpOA6+liiEBC2bJZwSFrAkiP8NnAJLoARrFMi2Kou2YSdIutkB5XQVEnsvSUbogwRM4AROn"
"hIjBw1T6Fg3bZePAyJUOw0BlIEdRsyolpPW5OFnMmiKPkV+pxXpgYn/NUOjfNMEzcRKIP4DF"
"5dJNJeVEWnBtigwtuaNsGFU2SXNBmU97HhmRGa6MQw4gB12Bn7abkcf1kU5M+iHjCVzUMeLA"
"xhoULzS9MAnKyCDNMjT6wA8WjkpNxpgc4Q1dMsWo1rgikrzMq5dK80WiE5jXBhEe1YVNoLVJ"
"Ay/PcGbAtUfAI8ccCXFZQiqQpN7CwnKs6RdozaSCqwGUT6QGUPImW2pHzEoq03LnjZXCNuKp"
"be7FVCQpoPEHXoSJe3GXyORzogNQ9zDlHE8u/ivMqK9zAlGghCKNYTKt+gKXwRpGuVIDkzzq"
"EgQv4jlS1ssCaUCQMkTYTqmAtCQG16xGeJFHYTjbIILKGRL7Na9i7mEa9iBRiTM0iR1mBQQV"
"G4k11zc5nQggFKNOx1LLZpjdR2wuCa84LoOS/ZK7MjKSRFlakHQSQOKU4RP5LwhzkohIvsgk"
"ibJCtoSaWdJsTQzQRlsA/UVLXRMFVQwtEPhg056EOHHxDKUovQSjaa0/2csYxPzSFwmQMgiB"
"wZxbYyaVLpHaQfNjOkaRzebQlJmZ3g4CIN96/VmMIt3No446kneg0JcEPxsiTDXF8ZPdCL2G"
"WcaLPS+KSo5iYQ6+BhvibaZ6YQ5lH5ArTKWM0CAVcR6TERj8scWZpjlV8iohxciIKoXfGdIm"
"etn4odJLcFJ3YK6jCA46hKLaVHBiQVCXTSEan1HQWkaMwDMbsLOeqMK3UUOCnzZlNeIBaDdH"
"a1jDAKCdqTi62ETbUC5zKE9CdXVcroj6MfclYLo2MZi5STQMwJmnuJvYOIrLTIgRXbD1SJUM"
"M53U27CIYmtSLYsvrz8xkSQ6w2tONxBWDa0A8oEmLaRxAfTtK2xmdTYDLtG1qewktWfpOCWJ"
"QlI0v4k//DN3gJhgFI2MD4yRJjFUSEnGEI+xZwlsKarXdjOkROPSipIUl03PPGiNINWTZoLo"
"EY8++mgCisRfphlg3hCqkujJRjE0GCNKHU+CZ8TcqEd568YoKYiejWLla+hEIyY5iPT86AQd"
"MESAWzwSSM1FEczzwk2eT1WAYkzySzYIqDKE/EFok+/RkRJQIC4RRkCu/K9VKhOhcQp+zYTV"
"itG4VHeqLEGwKRbEvl7qMABxQvpJgMTHp1yn5mEUagBS1hcSlJxUSh35LnEWRpvp+TB6QEZk"
"7IMxIZE7J8CgXsGJOC9DkaHUbZ6HbowErCvmZAY1kyyYZWtIZpdUIuAsA/RiDIIjETuwsxi9"
"xVWILEJZhM8BE7UYVmCEs2k0YtsYJ17MeqGB0NCLrQCBMSlMIJpIf2ScijRERsb1QQIlr5eN"
"La7hTeIo3KuhQBsIEisFAUggZF0zW0qYoZ0960EF9QB49DFHSyKu8nD08QIkxij7vEEN85XQ"
"QNiF1RW7kwyDSXAUntPIzcA1gUhsEqYS1iboAHuVzMRg8lJFCrmS3hgwghOJzyY5Z4bwdhUm"
"ydJ6UZIILg65DGauZ3bl/ZjKWVQ0biSRUYM1Z5BaPskHRq/20b3yLzEqsa9xdWADS9bDeDGF"
"iWQ4OqUSnSEJq0QbGdjjFNag0iUxgbEpmhZOBbt0PkEHunCwcYoUGeVNDDgmdH0FgepbWg6S"
"sipUtBQ1xCC5o/xlSM9yOLBfSXtTcwEOvZqaemCtsw+IPxrzaBEj9CCpkCoMmIgB1JQAnL1w"
"xsCOKQ7EGDdGE5dM0+0o7+T4rKWSxDFGo+miGWJRlo2Qq/OySk3B4XUnyVYkVZJYwJ+jPF5L"
"LwbBMceMFbuJGRgd4sUKZw6XqSLAiBBmYK6SnYDHU341UV1TFclSWP8oqiE60EQuckOJ9rJ8"
"AI1mUnMKY2p6wW7IfiqWBvkGlY+tE2r5zvmgOFycUHHoY6E9lzqmTwi0mqg6kMtRbPxhs3oO"
"nXyoAh0jLgem86haMTGV4SOsBQBKeGBTc4WhUhbKD4pbVSnTDM+qTmavjxJXlKawgZFaV9yK"
"uRxFdKIJXimZzxpVBUPrs8rn0kAADdMazk181pQURBpmUSY7mZPWz4QJUnaQLaTVxjoUS+mc"
"3h7E05jaHSNRRDlK8l6wRtzFeZsWT4oxKbZJ/3Z3wZKP4qrEEOB5hRHVzXktklILfMQ7jc+y"
"cU1AsPtphDAginN4xpp6M/MIOY1GPADwOPbYY42fRdJpjgk6ktgl8gVNMiRLZjsLk4G6i9qM"
"CYSWJJWgia2GhcTbJXGTTfxGJmLoCZjMCgQkYFJH66kShrSsMZSta5IVKRlzZDOpk3FMpidx"
"d6+9AvGmYEDJLRgi/I9xLQsQkzjHShfbWjsbMgfGtEfZXUSWQo4pSajQmpyLUdveV0pSavJc"
"GmhEk+RE4KfEzooHSV4lQEJsU8P8siTNW5X1TXSPYSEfaRmc8tNPEUHbzpzozipMIqGGTf1H"
"UiBQLZimDZC+ueCQs3QCvCgWoyCTwQA2wZY6ToFgVMLsb7K9oGIDTW0z6KEmcWZZuFi3MrJa"
"JAmQbMarbQSrqWE9S5LGUeZW0/UQopDcWQgKoqzclD1JeCU7H3vcsbw/rTdpwkX6U8ehg4B5"
"i/PEqIUgjQlhTa2R9EiWSslAOB2EdFAWqW5pw6jxhChrtyllvAtbCnUGMARv8CdkQ2JpxRMG"
"SdiOYa9k63VRxtXJ+DbkFiAGin52AVB1KJ4UVWDqzYgJUFUuAcjQpVpXPFbgC4by+RcaUKKB"
"Pb+g+YjWfSotF/YACXiFDmzaLo7PyxP+1aDGcNZYJv6oEDO2NJWJhaPqkvnV8xqB3dsYS0KI"
"pXEV1YQaVH15IR77t+FhQxGWMECdmznDVoK2YgCQnh0YN0CjkOjMQxYftA6Xme0pCg34AlfF"
"rql3zMliSyI+8u04WgDXTlwnAljPABEMo4kVGhInfKo5GgHEKaMSggbB448/nhYa+Y9oNowg"
"mhQtW+lsjQRmVapKS0hglO1FV6IWMIjVrMmax1Zs4sWmmw32IObPKEUwABWhGf9xySJJB5qp"
"jRtxFqfML2BkOPKE6tBpsFZF6IjKaJ6rZYVPFOxMlIoIknleY5WNidoTAfXqqGMGVAlYVWkF"
"LSYTvVFs4PUovrVJYJEqKzY8ZblEqyyzi08pw0QLbRypW4vujMsW3FHcTtMAs7MFFdOr5p5a"
"9ptyWic0PisuoXQGNqDy5/C3ej4KUqJcyGbAcTCzU7LuAADRqkH9W2OE+V90dHTynyIEZ7Ck"
"RwN3Xrbph5jBIvpRftRegPEHY+WUO0EXIA0BwTVq4c6at+yofzHP6TluPOGE48XBrcwpem2J"
"HH1hoCInnb1VPpGEhMsIMLHuTQwycVRxaDZJA+13UYOD91KTKOAS9ceHVxIwVmXkLkKSpDYF"
"SVLaRyc/pRawKDQaFi9TZOpqrvj7K2688cYuXbpA1e3ZZ5/9p3/6p0xzMeK5yWAbTsJ0aSzO"
"3DDBmxTiGH9NGJF+EGkKpFFp9K7cYdozJoIxjI194vPNbJErrrjiH/+xfS09/fTT3/rW/9Hs"
"xQt/S7hNp7ThI4KcBlWwoUz0pLEQIn8wMSHDhMG2pqu62Fh1iacodD3vk+wgAEi69RpOzDC8"
"eslNkfkb0HAWL07DsE0/gYUwAYmtzqCA9ECbELBFDPMbrEYMJNNFAVBADUJ/AAA+PCCWdndh"
"ZEcczJ4byJ/WS8+TdV4iDnhwtAOJTqoiACH5iDklqAKKTUiNLqScCE5+U5tEYMBoEDUIIDhm"
"0ESlzNfgHaAyE6OV8Yg8rwDA8wNIFXe0YNIELQnF0nKIgpfNROtxPI334JBJinZB8BjMgYii"
"JpqgR8+et3znu+ecew7UsL311jxBeVCEqoQZG1WZ+hfl1Y41EQbxKnhApWgdmWqdRubgwKEC"
"ptnQRG5uqRhiCV+iM34ufxHcmPcUbqxndWD00LNXz+9+97vnnHNuLVqaN2+ec6hzBaOIYXlo"
"+Qge0BnLQixuEE/zcVKQntDix9l6pineOTx+FhlFwFMQHEC7Ccgymn6Dw1KCaAJaGEYyaS0f"
"GMHANOsRHFeGaBI9WT0HvejIIBd4xxYiNYHpYdkiwjtw6lxMdcI8QXfOFA9BDeRoURMGVA7g"
"xDMQAfOUYgMhPL5bmMoJFuvEamReMQgw2gzJgVAcKQYRwgOd0UgDYnN2HUQH6m68D4qVxdVY"
"H0nzhDkUFZ3cPBINmJSOaFUW49kLFdAqKxfUjn3dYokky0IquTqy0ygcZAcBkiwixB5eLArU"
"gw7ZicK+bEktwYD9z//XQw8NGjQolajC9uabbyG6cKI6uI9HRjhbWMK8gp6MwGiQfzS8Blxw"
"HLOEYchMWgWIIL7oHbkGRnjgZQq1cZZnkC81EThQ6Uk0GRcAER566L9q19K8t97C2MFE26Af"
"kesXi9uUfkDQL4whi1GbC8mLhmRUmpnVF/aKCMyxi3L0pDkZqgI7MWfwBR92YUtJHEOzZjW/"
"Jfq46eyRpw/CKSkAevBOUnRQixgwSDYcntaPuk/ItySDQYYDWAUSN0hojn3Mi9gGjJ7/kHyE"
"1O1ZSzJLHWcgHMjDw/KjmA2qYo0SJqUk0UzfzhwiP4I28pRyeUFiMI0V0UpNBiyKIgPLHhRr"
"WPtC1gIqE0dytLAeFUtiJQ3sDCcxjtVTlKYTbjSvNNHPcxLOUDbkzLhSZQuaiMg81u7nbW1t"
"CxbMB0RNwpSg1H01UkQIs9rS6izyJRJa03nan5TgHS3KWBrRgbfKz08r8SiKbcZSyFviTgnn"
"Hay3QB6udi3t2rVr4YL56DAVggjGZggRgggNXAmLTKJqO4stP8S9UczPfqr2lp05sjvJgC2t"
"iWcai0b60V3YGkxkEoojUgmHmPMCYgbCoBxg3MGulTzDhWmMC0jfwSkHWVoxEQDTVWhQEe1y"
"UidSUtFjOjAmA7Z5ctClEc4TFYOv4/EpoQtox4jYbWZnTsfGInpKAzXdjPwZzVpMII72lROn"
"FOqtKknkGGLWkmhLAFV3skeEN7WB+LfNTFHyGaIvwx26HOPBKLGBWY/I3rtIJHWqWFDUvAIS"
"9+AZPXpbu7e3LV68ePuOHU7RmvOInYLEjRzZSqfdPD09I3q1bb+IEngllL3YcSkUGTr2QkMy"
"rbUISvrFs6ueU8qpeWtoaNi5c1dURfBiHGvKSCwuyN1QkxGZ/9rz1yCJQab+uIqItGN52aue"
"QHlDWu0xoaVsaPM0wWAkHfKuMid3wx3oTzHPqm3NToDSTbOr5N2Z54Sc7El+0RAqQaULMR/Y"
"IaOUAktiSrfRBDSzCyViATySISYQMo181GZK+MVqNsI+hwTNgBOHBnNk+i0xuVBklB8CWgua"
"8a1voE+UQQRVmNn0ablM8ZBoMlahIVjE2E8B4u5gpB47agz9eC7Td0vBCwoWBARYvXr1gAED"
"oIbtrbfecpisSbNbn0nGirfh2WhPyxAuISk6EAyTPEOZEBA01TILK9vTpgY5sYnvOvMx7k4B"
"wJ5o6e233ybhnerHwsjoAq3oSJHB0Jf5b0R1hjMTqHnNvYwqyPeklEu8My1BUiyZ7xIoCVsl"
"ms0oKElJ4r0tg2ZLEhVFJVx0pKTdlg2NiA7Bp5OiBQrXOflpbCVbDa9JK8KSvKFhBPD01r2y"
"zDx1RDFxIl3SkuI8yXlLm64y5az00CyeKWn6CJMWVKhBJOLoRNJM2GhTY8mhGT4KJ/fyrdoO"
"1TUVfXXNNddMmTLlhBNOaG8+ePvtt9Pmh1F0hSnl/J4mXN6GuWwE7fDRCDbTEPBCFs+y2VMI"
"VNlyU5svPADugZbeemseOuXrlClih8P4p0K+rt02E60j1BjUSUAurT0znfkp25kGlM5XhWOL"
"XwsraPyqyQyZ92a/l0S0ejTtwsSWGO8fTVmqtsW/ne4R/mPiQ6zefJUI+KEPfaiCyDVsNe+4"
"t8dX2CHJHiscWzoZspdy+WqEuZ/m2IfNOffss8/W19cn31999dVLlizZX7OEbX/KnQ/rK3rC"
"vm+VtDRp0qRly5btz5mSrdJCLP18AMP/vzDgXg2Vem6VQRIm2HfJ6/IMouYQ7fdZbTXkVMh5"
"boXjqgTR7Kd8rOLohUHlVcbF3fdq258YHjFiRO7nLS0t7777bsm++7R9EGyow2L+1Z4N4it/"
"UdRSc3Pz8uXL97uWUikqfl9LWdEO5io4SS2Hl3/dj7rYq6FSY1QZxDLBfvHOOrlaKp8p/TL9"
"qsik2UjIl0ZW2fQ4M4L8WQVPlQeuSHX8g/5bC+TkHFJJmD2xhJRf+3M75phj8i/nL5iPULXu"
"SrZoGcjX5lc/xBi3Bi3EM0T7p2Ypj5hgwic/VTAObUUt2a5CkpXUsIbCb/lCil6xJxgoWhCz"
"5fMfCLHHVQeA4iMyfqj8/9/b8mXXvtRatjon7S9rzWx0+qE6V9MeBqzCJJUPoL9R0INgoVQx"
"RfQAgPJ8FLT38mXyV1AbFnatJCJ3JiqgpJCNoq9sOhctscgM7W7IBYOHo446Kv/97bfe1jPh"
"sWUr+miUejuP3rXrFyaghrNNGbEZVaideUqzsVKcIEkeemPGwHjxsSjliErb0UUt2TYrmpnT"
"WbzugYVF2iXJcKWYbrWVYzbd4uVAbMjKmUKyoNo2TD61zy5VItaeHFQaB+XpgNmOqdWT33ME"
"tCNU5Kp10aVRCNZXI/ksGg3XmY/87C4rp4beSrymM8idOzodn1z28Q/80MHgMnJVoBzBa/FG"
"3mhZcfTOs42yiI5GrMAmEHmTB0Dm68JY+Qcjk/BKORyiucMruNXRRx+dS/7OO+8g8rKZK0gr"
"aM2kEMqgZgaolHHrP0wijgwZsQAmR5QHlCtolDOMcKiGBWpjJARsSDQI4wntCOiPqqilmK8i"
"KcPKwARnZmQyQ7RIHxkcGam5HWPfTqoUZl1fKEgs4eYjy5cRwo044LNIbY5TFpKjqjMJeIwD"
"RixdLjo5I63NpDGiXiONi3HpjXXl4OQY848gwg6fsqVQJkutF2YDXfWSB14aKspqEMATJPlO"
"Fbp6MF2YWicdNjZa+J+P1gRA98EgC82kzY+CMbQbuVxEx+FqCm9hZ3I9NG5UZCDzJXOiasxH"
"Oo4q8kAO6KN1xgATCLIPGAQg/z/IA0Up6nDPOVycUN+9+/DhwyHb3nnnHTRXw3DyBXI5reFX"
"jRfEACaWxPcDpIgym0NzKweRmSxOiIn+I4yZjoJGCtWRjQYgtkeLGAOD2AyICL6+vqyl+e/M"
"jzPExL2NSwVui/CBCQtGF16zjNGyvDI6CcdfCcXyGX7g+xxL+BGylYX7eN6cFD2LI9eMsRWM"
"STQ4oBrdjFGR/dAcVGi4RItmTUWUkQIsQNzrdEjjirVoFrorJGKZOJygi56cIYvXO64kqNah"
"c3QNktyMbrVPTaIQ9lkk45TsssITRJIoB6odVNxIn8hJi15TpJED6EeBCi8M7V5xFI59FniQ"
"8LcGJzTiRSaNaJMlsJYNUEW9+z126m5dux53/PGjRh1x+MiRhw4b1qtXr549e3bp0qW5uXnL"
"li2bNm1qaGiYN2/em3Pnrl33PoBzQhvGnvzEOLlunP+Pb0gO97Q5D0ceeaRz6eUfa9eu3bBh"
"g+OrP5xzY4486sgjx4wZM2b48OG9e/fu1atX586dN2/evGnTpjVr1syZM2fO7NkLGxrCtI6p"
"TMJEqgHAPPzJJapO2I+jLIZf0QPg4MGDx5140hFHjBw2bNiAAQN69OhRX1/f1ta2devWDRs2"
"Llv2bkNDw4zp0xsWLbIYAXlmkY/VA/mW/YBY1NKqVas2NW0KbfTu3bufdNLJY8aMPnzkyMGD"
"B/fq1atHjx47d+7cunVrU1PT0qVLFy1a9NJLLy9fvgwSgyOgXmHD8jrtEiS7WgIX3wDATp06"
"DR06dOTIkYceeuiQIUP69+9/0EEH9e7du2vXrt77lpaWlpaW9957r6Gh4Y25b8ycMXP37t0S"
"PDyYW07UdsKb8tlkZChUqHSUKPOA3r3GjTtp9OjRo0ePPuSQQ4JOWltbm5ub33uvceHCBX/9"
"61/nzJkTLYYygIi6AOz86uIhaNqM1Wk5KMAxBSMAoPV2c2+pg3Dvr7fzsSQeUZcbrBQSPMl0"
"RGnjx4+HVKp4M5SN/MhtyyVJYhURtaCAM/EYJpzAAIQMDgDA42WXXvKlL3+53SeUhG3OnDm3"
"3HJLU1OT0bNMSx8uvfSyL3/5S3sy4K1NmzcpuJjPvRgJQZmZLOs71dWNHz/+rLPOPvnkk7p1"
"69buRN77GTNmPPL737/62musJn5UdvgzqmVNdsUjhL+vvPLKL3zhC8ngL7zwwq233gIejjv+"
"+LMnnH3mGWceeOCB7Yq0YsWKBx988M9/fqatbZdOQMuUNYNCIc55bCSSsl6kHdC//9/87d9e"
"cMEFAwcObFcSAFizZs0jjzzywgsvXHLJJeedd17Xrl1ffPGln/3snpaWlmxfpQIQChfJAbzH"
"q66c9PlMS//zP/9zxx13nH322eeff/5xxx1XV1fXrlTz58//xS9+QSyQCID6gFA7tcmWogAM"
"6Du5TmOPHXvyySePPWbs6NFjunatCaIAsG7dut/97ne//e1v29p2SxaZpu5iFgEteChYK/kC"
"O3XqdM45E84555xx48a1q5O1a99/ZNojjz36aM+ePX/yk59Uv2MyeOvmzU2iEPSg9zhlp+i0"
"4QFgH7wfr1O3aCUpIZT2lE/B4QDw7LPO8onqCjmWzfU85+o+FykVmTndm2FQqkgpPzx9271H"
"j2984+uBo2vf1qxZM2nSpGitQBVAj549v/H1fRvQZCOczxpqQATvD+jd+6OXXnrpJZccdNBB"
"ezRR2BYsWPCDH0xZvmIZaNVMa1BOEc0yGwsr33777WeccUYy5tSpU8MqDj/88D2VZ+XKlf98"
"++0LFizwSmj2vKMJgpajUS7plgIfgo0HDRr46U9/+uyzz85ztD3dnn/++dtuu40+sC0UpPyD"
"NMOQy5zbSlpaunRp3759e/fuvadiTJs27Z577tm9e3emGdAn44L6R3ypBQL6g/ocdOVVV15w"
"wQW9evXa09lla2houPnmm99//30xR+S/UZUGSYOn1MOAzl06T7xw4qRJk/r3779HkjQ1NW3b"
"tq2WW4PWrFlz5aRJXoURdyXKto//9swfMUVBtE5dksI1jMl/22d0m19kT7ov1nsEnDDhbHY4"
"IFOCHsb0hWDfdBNHQAUdmKaseASNJI9JB8qBpKQy0es3D/6mxtutku2cCRNUKBMTHnzwwb0b"
"cMKEc4BbLFr+8bME+U0J4Jy79PLLJv/D5J49e+7FLLK1trbe++///sijj1Kc8MD1p+fsQhDE"
"RQQnQg8//HC/fv3yAWvMiIvbzp07f/GLXzz66DSSxluMMfOZLg0CRFGSUeKcmzRp0uTJkzt3"
"7rzXwtht27ZtEy+aqO4gLExloZepTZcFPJS1tC/bk08+ede/3iVvKYK4cjKcBPzwB8kTYdCg"
"Qffcc0+fPn32XYyVK1def/3169evDx9Ncgg6o0qGfOtQFDCCkY8//vgbb7xxyJAh+y5V9W3C"
"hAn8J3KIJ7uZrrNdDgB3iQCU/tjTfeL1slIhNgBpsST7mk8I4LEuPCtLn/+OTpJYOpcRzI3h"
"2Vx0AoUbvz60IMMTBxxJqWRnMkAH4OUBRQjg6aEnnst3BOf3jrxA2CPM6lhnHvZhQEBqxYbn"
"diCxOiBwu2bkqFE33fR/hg0btndT2K1Lly5fvfbafv0Ovu8/7gM9u0XmoKAY5KGTq9T0PfiQ"
"fkU/3xc2BIDOnTtfe+21vXv3/tWvfkVPzNTGVDAWAKBelINAT98SGndY37X+29/5dulWqL3f"
"tm7d6sDZE9kESeedR0qoXegshV4rAEK/vgfvXzYEgAsvvPDVV1995eVXwsP/qMHOER+0mkB2"
"0AAjDx4+97nP7xc2BIBBgwZ94+tfv+mmmyUU0GNaPJg7iSlyISI/4k3PKiJAXefOX/3KtRdf"
"fNF+EandzSEaNkZ9QA360M1DvuALuYNOj9ICVmzIR1Bf94KyQOAgJQ+hAQ/gnHSgAfRkFY3I"
"KbIDh4joANA559ARD7jwP0SHgIAOEAGdQwRAdPI0Kf6vc+CQTrmFPx2iQwfOAbowNrgwKDoH"
"DFTEsA6Hzmlneo+3VatWgUOHzqE87hYRXP5cg9oH5NOF6HhAF86kOHQOAOGiiy/+6U9/sl/Y"
"ULZJV066aOJFdPLEOeecA1IgsvJJe4hBa0ceWbi2bn9tkydPPu/888hSDhEdIjhHuGApAxzo"
"DxfwhdipU91tt9+2f9kQABYvXixgRIcBOBiesBDQ5NAF/IFDRyA76qgj968YYbviiivYGgRl"
"gnX4GCCI4BywgKSyceNO3I9inHLqqSeOOzH4LiJP5tApaskhH1ZqsQAAIABJREFUCTjBRo7s"
"duBBB971r3f9r7HhqlWrgBxJxA1CARFGEB+IQ5AKSV4GEPiCIwbzu6BiecYv7x1GQxeeT0NW"
"cYxiGYkxHLQUlKeDgMrhIEiFQjQ0c0jEAiiD4lVeJ27BkvFzWnQRimVnLIW4du3aPdXviy++"
"+KUvfYkeFYzodHkAbu8HRCV+WiJHCER011771RtuuGEfs7Di9qUvfWnQoIHokM6RIdMPMFRI"
"MgonRx75gbi6bNdfd33//v0DKhyCU0J2IgaFSqBwEdQ2efI/jBs3br/LM2/ePAEjhgzIhYAc"
"1BLk49OXrLIPSEtHH3107969GXlBI44dHckLyYcd/cZQ37+STJhwTkg2iDDEoYEiBzgHEtkc"
"ObJzrl/fvnf/9O7iPTwfxEbOFeiI/JUZLGiMeYadjeMtL4/WBOydzjl6uqKloUACYbnMrWIi"
"1hOlGmFHGt7VOXQe6VGelLAhP4yc20fgzM28/GTe0KExj/dG1MuYnPSXTC0bSnknpQVN6kLt"
"jQjwla9+9bvf+fbYscfWotzW1tZ777338T88HvoCqHOGQhPR+69+5avf/s63jz12Dwb8w+OP"
"o+dnWAKfENKE2H/5K1+55JJLqozT0NAwa9as+fPfWbF8xcZNm1patnaq69Sje/cBAweOHHH4"
"qR86tcrJu27dul1zzafuuPNObldKL4yqQaAHo1NHesyYMbUsbevWrbNnz541a9aC+Qs2btrU"
"tGlTp86dDuh9wMjDDz/+xBPOO/e8/CbfsNXX13/mM5+54447qSlF969wcRZqEkTpmAUi6t//"
"kL+/4u+LA65atWrWrNmzZ89ufO+9pi1Nm5u2IPru3XsOGjxw9KjRH/nIR6o755w5c5wD752j"
"3qDTy5/4eg96foy+atjXoqXt27fPmDHj5ZdeevfdZevXr2tuae7WrX7QoEEnjjvx8ssuL56j"
"R8T+/fs3N7cEBcgTf+mdGwR9LzcPyBmNmTNn5if6du/evXTJkoUNDQ0NC997r3H16lVbNm/Z"
"vmN7XV2Xnr16Dh0yZNy4cRdddFGPHj1ySUaMGOGoR0AzebqylM4DcDUnDyxDj9CrZ887pkyp"
"ctL/vffemzFjxsyZM1evXrVh48bW7a09e/YcMnTIsccee/75FwwcuAf9KHauPwC35py9qIbM"
"RTqznCFnW1in1LFB770LzzFERL4mz1OO7vmKHMTQtgjdZn6kamC4UELzNBgu6zr//AtMLzgg"
"CkWVUTPdnFBA7soG3dLpFORn4vJJrWABc0aUvgrzcCtTrmvk86jhnA94AHz4//7fAw44IFHu"
"xz728eaW5tDFpIaCh3Ennfj97/8AAObPn3/9DddL+4ARAQBwyy23fvjDH05GW7169ac+/Sm/"
"e7eowbbHUe928ABw1dVXTf6HyUWT79q166mnnnr88T+8++4y5HGQlEgfwp8HH3LINZ+65hxt"
"LUdbW1vbVVdfvWnjRoBYcSCaD+qGTs49Mm1a9Ut8Nm3a9Oij0x577PFtW1tA1ExdfmrA9OjR"
"45pPf/rii8pF0+7du//hk598f/06yWpUNyDNb3khMyL4T3/6M1dccUUyzooVK37xi/9v5szX"
"AdNrVe3lFsOGDvvC5z9/0skn5ZKsW7fu6qs/ETRhnMnzdQvsAAAML8o+plXV0s6dO3//+9//"
"9uHftrQ0izSoJ0PggD4H3HXXXcWzDddff938BQsszpAMrx0r4Otl5eVTvXsfcN11144bN27X"
"rrb33lvxzjvvvDlv3ty5b2xtaRHTkF7JNci7+h18yD133533H9e+//4nP/kJlJOvQsICZwxf"
"Bt2R/u68447jjz++qJOGhoZf/+evX6MLwuKL5qjX5yZeeOFnP/vZSqEUANra2i6//LLt21sB"
"2ONNHxqCmeQ1wCBsDnKONraEnFUBPo3L5ys4HrGc5tY2ud5AbvOVdqSc3GUgI0B4YjaXIOR6"
"4boah2Q+DFlJaJqH3jVQlxINg1KB6QGcY14M3ylSeX/KecJQdEYG5b3gpDGHCDt27MgV3a1b"
"15atzSjrQujevccNN3wt/DpmzJgBAwauXb1GnjIc1jF4yOBiS+vRRx+l05G8L/Eh65cfJo5j"
"xx7zieCQ2TZ79uy7f/rTlatXhYQ/nFVA0JeHccECgP79dWt/+C8/fGPOnBtuuCG/EqVTp05n"
"fOQjTzzxBJ/YpyAn/Xkk6/jhhw2vzobPPffcPXff3bJtGwCEy6WR3lqFQcEBFVu3bv35z+5Z"
"OH/+jf/4j3k155w788zxjzw6jT070AyEhwzTw9iDrimOupNOKtDZTTfd/P6699GFPDMwhuQC"
"/LYkDyveW/Ht73znv/7rodztf/f732PICMHx9ICha2hOv4dR+VQkDD/ssCpaWrBgwV3/dteK"
"ZcvZcOCBzhaFEsyD39zU9Ktf/eqmm27KD2/avNmh854wLwlLML+kqHSpCJdgzc2bf/D974Ne"
"rhD+4+X5iuZlI0AJFYIHWL9u3bPPPvPxj/9dIsaOHdup2qRCQozIPqkWQwfeO/zYZZcV2XDX"
"rl0PPHD/7x+ZBt6HM6licZTMAAG8f/LJJ+fPnz9lypRK1y0tW7astXUnFRBUI3MiTQSIcu5W"
"8g9w/MYEz16DYkxKBejcr4tfPszNEkoZXcgc+Hwwp2NAJQ6njUgnpMMNL9zVcFSQU0lPnUFp"
"eCI6CB3qcHbEce8vvM+JO4fU6QegNgWV6tTaCN1MCKdwqHdJ3W+q48EBnaHh0wjbt2/PFd2t"
"WzduOtC8X/rSFw8++GDZ4ayzxjPDUQrtHF5+2eW5t7e0tDz956cpKmjv1YVmKXU6HYKDHt27"
"f+ub3+rUqVMygvf+gQceuPnmm1evWe2ogc5KhbB86h5JwuLQOQfPPvPsz3/+8yKSxo4di9oL"
"Jp04qnuQah5wY0ZXrAR3trbeddddP/yXf9m2bbtDdEAapZ4JABuO9Ajo/vu//+eBBx4ojnbc"
"8ceR6Z2cvZJlOgQjq0MAKF6dO378mQIHbQLx2ZiAITol4vDFF19KDp8+ffrjf3g83I9ArfjQ"
"EQoVMmq7myzgqHFeqV723j/88G+//vWvv7diBUMwdPuAmkpBPnDgcOXKlfkI27Zte3/NWpoQ"
"HK0i6INbdojqWaQ39bVwjKPlcL/POI18ARh67wCbmjbnkqxZvYaOd9Jio06aA0etXxEMcejg"
"Iddcc00+zoYNG77xjW9Mm/YoAnBTljyZTtY4JAg4h4BLly658847i+oFgPnz5wfKEKuQYslC"
"wOdtw4kN6ecB8XjQGZuaznjwiVlWowzLJ2T4RAr9YXqL7JYuuAMggxmY0NDp47+EK0yvQTJ2"
"0De3ay+RS1wn7xRHPqtN1z3Q+wdCvAyvRZPECx3xsvc+9D/IRSVv8ABYzBDr6+sdOI/0YqqT"
"Tzn5vPPOszucNf6s3/72d1TxewSEnj17n3NO4b2df/rTn1q373DhvRgUkSiMhASPAot3kyZN"
"6tu3b3J4W1vbD3/4w7/85UWg9AQ51IUvHN/SRTmy3oQCCIh/evJPp5122oknpuccDx02jDXE"
"5gF+lwOyiRBGV3D11tbWW269de6cOeFdEQjIURn5UAn6IFkDIE6bNm3ChAnDhw9PBhw2bBiy"
"Vfl9N/R+UarlwQM6p2/cKWyf/exnJ0yY8PLLL7/99tuLlyxpad5C2kAjEd/O9cD997e17Trj"
"jDN69+69YsWKp59++oknn/Cwm7BNjRi60MfUIJ6vG+NUAWDM6NG5MM3NzXfd9a/Tp88I4yCV"
"J3RsKI6oIHfgPBYvuV+0aBGl8EjOYN78hyZL00YTt1F46FBgK3CAq0YACK9co6uKgCpeP3jw"
"4FySJUuWOLoFFEWPDELPAZASJgT45D98Mj8luHHjxm9+85uNjY0g5waA8zBKXrkREMjAgQeY"
"M3v2jBkzTj755Fyq+fMXkIXCeYqQMgHLSEk8529aQ1FhjPKCALnkht9CH/QXJOK3b4HUq1wl"
"IJ2z8F7SZHaBwJcA1EQSkX0dEqjp6mvKrEUiYPd2dIMKv0iKkipPz3ZgLqRrwvlEIJfNQbzw"
"siYt7akOoLPlfDsI3/+EiFgmxJAhBsX26t3z+uuuS3Y47LDDDh02bPmKFUGJHuCiiybmdVNb"
"W9vjf3gchQ1pJSBEKu9iOPiQg/MTKd77n/70Jy+99BKdlCfG51MgVD+GC6OQvCI8HcKxCTw8"
"+OCDOSH2O/hgut5HGsP87AR2H/AAo0uuDgB33XXX3Llz0Qm707Mb+E3CDAKuN/i9WOi9f+qp"
"p774xS8mAx544IHIZgwrF5+RMhC5BkLAlStXjhgxIhdsxIgR8n1TU1NjY2Nj48pVq1a+t2LF"
"8hUrGhsbd+8OyPetra33TZ06dep9tjXkkGJyiLnapdV3VWljwdO3vqilH/3oR9NnzKTwHEiN"
"TC2dcLquMXw/atQR+SDvzH+HlULu7p2iRnt10iTgy+Oo4UixBes61R05ZvRRRx89eMiQwYMG"
"9e3bt1u3bvX19d77rVu3btu2rbm5ec3atWtWr169evUxpWf2NDQ0qLMiIupcfDqBTjMC4IjD"
"D8tv2tm5c+ftt92+atVKckpEJgoELuWD9VHtDeEM68svv1wkxIULF/DLneVUF0oLHNF5sS7K"
"8KAEhcqGjpvF9j2azM8IACCvEPdg324deIZTAjA+BGBOB7PXQh35JsVqjVEiHiBnbbIaMMmG"
"1ZTW/EryTs6pcFopL7gSkWkAx7HNvOq8EiGG0hs8fvGLXyhG7/Hjx//mN7/2AOhd5851F5XO"
"GLz44osbNq5HfSce3QpCovHKEOCjH/1oHlER8Wtfu/FrX7sxH3kft/r6eqc5BNK/FCBJpvpu"
"XYtXQT7xxBOvvPwKQYCtBSD9KkllORvjMxIhmZ8ze04+Zl1dnbNv/xSjoZwF5Ne+eg+AL7/8"
"cpEQ7XbAAQcccMAB9kmOO3fuXLp0yYIFC9+cO3fum282tzQzuUj8oDnorAWC4xca8hlldfxw"
"TH23+qKW5s+f74K38dXlkqIFiqfhObM4+ujCue8F8xcAWg8IEd28wdI7T2/15bRFYz527tzp"
"pJNPPmfCOSeccELXrl2raAkARo4cWUWZCxcshABhTkwDWxARyPl2RAD46EcvztP4Bx98cNHi"
"RYjybnTPqwnHSj/Ox/QIiLimdHFbc3NzY2Oj+HQY0mO4QN1LGWsH4lt++KYNDtlIzyihK7iR"
"GTukXACUZEhhBmjPbjFaPUdtyk0CS/MdKZwI1YVVOzlJz1TIYS/SHHkN6d7LT3xWiFYGdBqd"
"a0TjiJLTA5dd1M1S1fP9Dh6g0kmVrt1C8X3qh04966yzixA588wzH3zwwRCJzjrrrOJlE48+"
"+qgAyHMKZVI60mmnznUTSuX2B7ft2rWLwg2pVF4wrmcvRx4xKj8h09LS8p//+Wu6tYQjKYNB"
"XIRPaITz+eEJZdzaWL9hfS5PS0sLXYdEXX4hHRpZizFEAP/444+fd955e3qbUOfOnUeNGj1q"
"1OiLL754165d06dP/93vf79o4UKCPXIZKIU/skDAeJboHopRgCNGHpFrafXq1Vu2bGGC4HAv"
"FE+OJNoG18nlaab3/p357zghX3EYoDc9smCGz5mdunTucuHEiR+7/PL9cr/KunXrNm7ciM4h"
"l56kLaGVUGEAAEDXzl1OP/0jyQjr169/7LHHJMzJqwQ99zK4gwTIAAqLC4bvVLpFfeHChYAC"
"EIRw4k2G1hpJUmZOxRzSKRWNIcQHjl/D6yT7BuBZwD6dj+pq5+V2nVCuOuC2HVX83PVhMq0L"
"p+j4kRJUVVOAQXT2JI4QGvGdJnLKvmk7QHsN5EvAmANgKpQMJpzlAbWF962tJUKs74bO9erV"
"68tf+UollAwaNOiIkUcsWrwIAC699NJ8hzfffHPp0qVo5WS7EEEzW5x4wgkH7Pn9//uyNTU1"
"OdSXU1OSh9xtAA+IxdbYf//3f2/btjXsZAkw8AQ3PDgmscqBxnYIvniB5JYtW7ggUOTZvhc3"
"mYOYbsf2Hbffftttt91uz3Tt0VZXV3faaaeddtppf37mmfvuvbe1tRUADAD5vYBcUoDSIS/P"
"owcYPaagpYULFyIiX06FEqa98AdrPAw5YsSI7t27J4M0NjaGKxCRsycAfjYfpRWhh+T0Lwfg"
"4cMf/tAXvlAua/ZuW7hwIaJQF0cMOXNKG+VM4046MV/Ln//857ZdbVS9ob5KUI9mN+fmGPLZ"
"AgDAopUXLFiAaFTJF2fycNzhBErTPRMfSDBntcrLXj0GouRSha5npj4BsTkfSUgPZqQOMbCp"
"pD0JLEmgNXovM+dsIVFiE7L3MH1RikD9D+1qUhMmpMNmolipunqO9VLmSE6qWS19gdu3V+wh"
"fvGLXzywaoA988wzFi9ZdOIJJx566KH5r3/4wx8oZ1clqeBapoIbO3ZslVk+iG3FihXS/WFd"
"aeswKLjYGpsxYwZQdw8gXE8AUksQOLlw4z45d2nCPvmJIwBYtmwZtwwdc5KeP6C6RBDjwTvf"
"uHLV17/+9cmTJ48fPz4/NV/7dv555w0dMuS2226j6w0UKlqEUr9MYQPApeuokpYaGho4rXBa"
"mFBOwPTFsR4Ajjqq0Labv2AB35UgybOzAoQ/ueuJITH8yle+fNZZZ+21NorbwoULqV9MaKEu"
"CLfzJUMCj1C813PWrNloe2dSewLwMw64Cw5E+CbthdGjSkpe2KD5jnbq5FpI7uUgZaCANvUS"
"KuCWoZYmNhtFoGqOOQ8MGWrHgFJKEKoFTjARbBz1/GQHqeeRL5sBvQMJkW+jAQjXXvDdP3RB"
"DgLd/Ix6BLcHzPB0+psulEG+OQeMAHLdQTgOHHJqEG1du3Y9/fTTP/KRNPNPttM/8hFE99GP"
"fjT/qbGxcebMmXZWPTsP/CVfyFHj3SD7cVu8eLGoiu454iuCHN+fOmrUqPzAZcveFW3zJRhq"
"FACU+//4l7BW0jsgDh8+PB829Ox1R9CLJ6zhga0WwLN5c9NP7777c5/73P33/8fcuXOLpqxl"
"O/LII6+99lqRFkFsRksJODQ76KfRJS01LFwo4A43wQbR+Y5xuk4KeIbiK2sWzJ+PwFfZBM8w"
"AjKW6MogROzdq9c///Pt+50NAaBhYYPqJSwD5C5eko5gAGXYNK5sZF3yVS6UJpHNnXgqmJtj"
"ERCxrq7TKaeekgzovWfMIF/qgzoY845ce4Wqp0CQhkZYkbKHDCZoQ4El3YxH5lTzCsvwAGRt"
"vQeQ9CMvqrfNPUPevEAAAB/gwpdmEi97cHKbm9GI5DQSZyGKAJzqch6azkh0XvSi/v37//3f"
"l+8Ms1u/fv0uvPDC4qvKH3/8cZGNU1abxIsWaJx259q/Wyg3ki/5ciUEgL59+xZrrq1bt+pZ"
"OKCgB1JlOjazhsWweZnhtNNOy4edO3cuop5RU5tKhAb5WmYmOTZt2vTHP/7xj48/Ude5bvjw"
"4SMOGzHs0KGDBg3u379/v379anws2Omnn/7ss8++8cYcOfcvPX+6SsFL2kvJInro2++gXEu7"
"du1a+u5SNCoKKJS1mQ485R3FW6EXLJgvvBEDxxRFjPiuXbvcfvvthx12WC2L3aOtra1tyZIl"
"UuayIaUG0CuZAMAjFMvbna2tgYo88ikrbhwZr418WVZ8xhln5EpetWpVy9YWPSAmB1MORxSj"
"qazczCt1o9mHL6fjS8AySjGn/0LJ4sNj6TlXBzDm9+ZYRC/vZY4sapJMU7JrY98SFyZMxuPH"
"30suzysxYpj1eNVTmG176aTKhRdemLe6Xp/5+riT0qcJfOpTn8oP37x58wsvPM9PMgNpIaVr"
"4G1fHuG5F9vu3btDkyt89FAgnCOOKFwFAgC9evXe3LQZzKHqrbENaWCQgREADup7UP5EhuXL"
"ly9btgy5TWXO5QGAXGsUFzsQuVLo8LS17Vq0eNGixYtlJc65vn37DhjQf+jQYcOGDRs9evTQ"
"oUPzSBC2C/7mb+bOnRsBXVldTsNRm4O1VEyil+3c1WZnkSt1zEeUQm9A/wH5Gbnm5ubGxpVR"
"TC26AX/xta/dWIUN165dO3369CVLlixdunRz0+bmlmYAqK+v71Zf372+W79+Bw8YMGDgwIHD"
"hw/PqXnFihWtO1tlOdYSGOsoSFN8cOcBvQ/YuGmjHCXdOLrgIl8Yf+zWrdtVV12VD0gATtVh"
"oKF8Y05K6X7UhuDJJPyhNtPBHiYSG7KjqYAHkxHlTGAhkKcvqteZLF+bYj+xvrhXtGK7O5gR"
"DfR8shbTw+bfHQC0lggxZ8MXX3zx/vvvnzp1atKuKp4iePrpp1t37kKU3BRzm3uvVF9sgV13"
"3XWNK1cmrJ+atcixhuEKewjbcB8EEfP9ioUPABx66KHz5s3jT8VpMPo1zoo/+9nP5td/PPfc"
"cxiFPbQIsGnDaaedNnHixOHDhzc3Nz/33P88/PBvd+/ezQfEETfI5f3769atW7du3ry3wjcH"
"Hnjgueeee/nll+eZ45jRo6ESWYpgUagua6mhgXtbDLz4sJhyPRx1dKFeXrhwgXX4xK8Sc51/"
"/vmnnJIWlWGbPn36Y489tmDBgvyn5i3NzVuaAWD58hXhm9M+fFpOiHwFYiZ+ha3oEUeMHjVj"
"+nQ5WHprhlnMZpb3+c9//pBDDskHDIRobuCXgTwYxlCfjzfDL5YtMfqPkIYe4bzcj5qJLuWN"
"JawwhpVACDGpF6uIiHbHhAt5rHSR+eiSPfD5wgiUMkItjaeNGzfef//9zc3Nc+fOLRbIdtu5"
"c+fTTz/tUgFTTjN5MzQ3N+eXR0yYMOE3v/6NsRGkyvMVImRij2QryqVFAv1cKUM8/fTT33rr"
"rXbGrPDTBRdccOqppya/NzU1PfPMMxiH8Ego3iZPniwXe3bt2vXjH/+7pqbNTz31lBzgc1hk"
"wXHTpk2/+93vVq1adcMNNyR79uzZU61WIdZIXAv/FrW0aNEiwjxGlF5pzHK9PH9hZXaOhqmv"
"r7/66qvzfRYvXnzvvfcuWbIEoMJI2XcjDi9c2qnLKW1ZnIempqa8BXT2WWfNnDEjnhkra5n+"
"/bu/+7tKL+doaGhAOfESJQmF2FNhmsAYeTaQFcn8N7FhfHJdjsm+YxhH32NUMlffyuEi/bm8"
"S2XPL0ch+VQLId53330tLS2I+Morr7RLiC+++OLmzZvbSTXibdWqVTkhTpw48ZVXXnn33XfL"
"A5m2QDujJ0dVk4s82DlX6U0pZ5999pNPPtnY2NjORKUDP/OZz+TfP/bYY62trbm6kspg7Nix"
"+aXvZ5xxxtNPP61LqLh6my8AALz++uv5TsHE8RGVRqqmJcMgFTAXf1k8pbawYWEWU8vbueee"
"mz+w67XXXvvxj3/c1tZWhcuSbeDAgfkdTUDLgUoaSb9F2LRpU06Ip5xyytixY01tUT5aNufc"
"VVddVTxdCQA7duxYsWIF5gxkh/SRsapJnq+hwpYwSk3HZFtaMpfn8bUOV8u+yT7VP7ZLiH/5"
"y19ef/31sIoZM2a0+y6RJ554gvMdX5C2tID58+fnaULnzp1vuummKVOmvPvuu4XDKhnZV/qU"
"HoV0VV/8OwJ4GDp0aKXHt9TV1X3zm9/87ne/29TUVNwhF6lrl66TJ08u3ui9ZMmSp556qoiQ"
"5KuJEyfm+wwZMqRWh0cAr2Oee+65+S5Lly61bfZ2t6KWtm7dqo9Dr2Hr3bt3/qCKtra2cBlA"
"8ZBEvGKxPHXq1N27d9coxmGHHXbppZeecsop+RXm27Zta2xcGR7Sk06tH6KvFy5cWLzp5cYb"
"b7z11ltXrFhRZS1h69Wr1/XXX1/lWrQlS5bE97SXhqkc+H173+znLZ6gOiHyvsWUr4KYWOk3"
"ycF9Ol4cKnw4aRjooDohbtiw4Ze//KVk5tu375g9e3Ze98k2Z86cxsZG9qua6hQAeO211y67"
"7LL8+z59+nzve9977LHHnnzyya1bt9ZkOwQA6NSp0+DBQw4dNnTI0KFDhw4dOHDgQQcdtGvX"
"rjfffPOBBx4QLqPuYTbCqAr1ctgGDBhwxx13TJ06ddasWVXEQA/d6uvHjx8/ceLE4pnH1tbW"
"n/3sZ7t3ey18fDwEf+7SpUvRPbp3737zzTc/9NBDS5cuFSShPvowHtQBehg9ZsxFF11UfHrY"
"X//6V9PUstKUY1tRSymRyWEG6ZZGivXy8uXLi1kzHRcPUbxxcPSoUTNLWbBd1rBhw04++eRT"
"TjmleBVt2JYsWQJynzQPUCp29cPMmTMvvPDCfKiePXt+73vfe+CBB1544YXCYQAA2LVrlwsv"
"vPDiiy/OL+22W9bW1JEKfEfPQKi4a5FWK1B/JRcsZRfW4k6PwkmTrkwHNwAzIxFb5bMls0B2"
"ZDVRo9/IksZncNSoI/S1k9l25513Ru/JBjj11FPz9pNsP/jBD+a9+SZdEJ4nYEYW5Hsvwm63"
"3HJLlasRt2/f/uabb86bN6+xsXH1mtVbt25t3dHqve/StWu3rl379OnTp0+fvn37DhgwoH//"
"/gMHDhw4cGCla5XnzJljn6ckujj//PM/8YlP7NG76957771XXnll0aJFjY2NW7du27Fje/fu"
"3Xv26NHnwANHjhw5atSoY489tlKmuXv37rvvvvvVV19N1JKqCQAAjj766G9/+9tVJFm5cuWs"
"WbOWLVu2auXKDRs37ti+ffuOHQ6xc5cu9fX1Bx9ySP9D+o8adcQxxxxT6e2Xq1at+sY3vtHW"
"1pb+kHnGHinq7bff/vGPf7xly5YoXO+htt9+++2f/PjHm7dsCTLwjVz06wMPPJAreffu3a++"
"+ursObMXNSza0rxla8vWurq6A/v0OfiQQ4YOHXr44YePHj16L27yMcsBxQ6nIOTGiD/60Y+q"
"vGW0sbHx+eeff/vtt9euXbtt27YePXr06dNn6NChJ5544nHHHVd8Xney/du//dsM05GkTahB"
"KLASI5Dg7SeLyLfv+WSHnCJLJVz+NSLgVVdeqVxk/smzZEu8AAAgAElEQVRFrpD4GiYDAFkL"
"qDkgDcO6mGxciQd0enXYsEOnTJmS6QwA4Pnnn7/v3nuTsqBz587//u//XvTz5cuXf+tb/6dQ"
"p+pDdY2sUbKAI0eMuPW22/b9tcLtbtu3b//Mpz9tddq9vvvnPve5Kmnvft+89/fdd9/zzz8P"
"EJIx8wRiu/FXl19++cc//vEPTp6dO3f+8+23L1q8GOlZJWUsdu++N4pat27d9dddx1DH7t3r"
"926Q6/iRS4F/hBKnTJlSJcXb79u6deuuu+56C2LMFHbSSSffeOPXPjgZvvzlL29q2gQQu32S"
"askvCeVUrPXNV6pd8t3oYIiOkqSmOBhCVLAAQB1IRcRXJ6Pc1AlC5yoOJtPxJlknPecwSMv1"
"MbJMSuLhV7mb1qbHNCl6hJ07d0FpW79+/a9//WsRnjQLvm3XrpkzZxZvYnnyySeRrtgV1fKk"
"iFYwrxYJS/CLlyyZNm3axz72saIw+3Fbu3YtPx2ZJJ0yZcpe3xG8F9v27dvvvffe1157LTze"
"hgSRE6EKD7IeVj7lvV+2Xbt23X333YuXLNVzlkES+S+7+94pql+/fnw6FPdlENsyC4gKD5t+"
"9dVX/zcJsV+/fvbSU37wt3UyeH3W67NmzSqeotn3bf369U1NTSivzE24GdBwCgLIM+9jAhDm"
"o6d5MKMhhvBMTzuBQDJEM/pEDRclaAh0G7M4uU0P6UZl/lpvmJE7ZABQb9ujW2b0lrpwi4t9"
"cx9/z++6kvuqwm1E4YYW4H3oeDKYDkqvIkWBUzim+HAHAJg6der27dtZBgB6ESoioi30ZNu4"
"ceOrr74q60QnTyPm24qA16m7ICLIjWnTpk2z7ZUPYvPeP/LIIyRK0K1z/5tsuHLlyltuuXX6"
"jOnIOmFQiFaQYhkqSoYPH/4BybNhw4bvf/8Hs2bNDtPrnV58Z5fckYZYfsRALZsgz+Hea9v4"
"AqiaEJ999tl169bt3Zh7KYneiKmYFkCFu2t+/rOfL16y+IOYPZzEly3kGgZMwdOceh5xhLlV"
"z/HdoeFJ/Qj81tCwJP7gkF8Cyq9cleOB79ADeRchiAxy0ynorYIqYZ2TGzWArhMLT96kN8sH"
"DvfhhimpILkXED5RfoZADV7w4VFE+vyYyF66t2Rn9D1Fs5BfhgdqFk+qPPfcc/PmvUXPCNXM"
"jkZ86615zc3NyRX5zzzzzO62NmderQ4QblMyUvCTllFegIX8VDAPCDh16tQNGzZccsklH0Tt"
"vG3btl/+8pezXn9dEkRIVfcBbtu3b3/sscf+9NRTbbt2BdhJ5EZ51i3QkzdNk9Uh+krP8tuX"
"ra2t7fnnn3/44Ye3bd1Kt11JyKdyABEhPP0TMpDVvq1d+z6bv1SH17a9v3atcwwm8Q4PALB9"
"x4577rnnm9/8ZpWXMe3Hbe37axGAH9vBTyAy2gnp0o7WHXf961033HBDpSv893ozl3nyf819"
"JfKAaBDBwl502sGePqAfEOnuFD5bhQD01qKwGz8UXh47QsiwUlGWJQdpaS01Iz24og7ohY6o"
"GSxhj8tQEQ44GfcAliDlEz1lWN9Zilz8coFFIqC8xyvqM+gj84MozuOuXTsTja9bt+6hhx5y"
"jl9bAJQXSw7d1uZnzJhx9tlnyyE7dux47rnnQLKb8DS/gF6UNgLKc5PMwrn+Dy9M9f7RadPe"
"eOONz3/uc4NKT3Lfi2337t1Lly597bXXXnrppRZ6oaXcUOrBw7p16/v1KzyBxm5PPPFEj+7d"
"zxw/fi+YuqWl5YW//OVPTz7ZtLkJge9+ldgmNY1AQfDNhf0bb7yxH1ucO3fufPHFF//4xz+u"
"X7ceQM9xS12E8tGzOyAAwLp16/b0rvMZM2ZMnTrV0SNLwYNbt35dv757Mch/AAI//wcQ9UlO"
"HmHJkiV3/sud1193ffGhnNW31tbW1157bdWqVflbDCtJwo+sta8300Y5qxFaWpp/8IMpH/v4"
"5RdNvAjTk8LtbCtXrnziiSf/9m//Jn8T4ZKlS7TZEjYGCni+XZa5zjCT6cSAtIuArrHGyENj"
"buGql6tgdPJCCYgbiMjo1QNVQpl58uTJ9O4EIPhRBx1Bl6UFuFkl8qkgeS6nFumsAPqeDuBn"
"CTF9/f/svWuQntV1JrrW7kb3RghhgW7oYgECgRACIQmwCfg44zj25GJ7HMdxPEmlKqk5yZ/5"
"l5q/c6qm8j+XSiXxhJNyPCZO7MqkbJxhXHZsjm2QQEJIjdDNyLq50V3qbqkv+/xY63nW2u/3"
"dUsY59T5kS8VrP6+993v3uvyrGetvd+9/YQE6ZZaPZR4PfgLX/gC/bzW+kd/9Ef79x+AavP8"
"Gd9RrPdt3PiHf/iHVNKLL7743HPPIcJEVRYhxwWUQns0qghqpEUiolq2PrLlmWefffDBB9+t"
"MYnIpUuXjh8/fuzYsUOHDh18880rV0ediAtmmhR7ZlZZNDT0+3/w+7OcJyUi/9d//a8H3zq0"
"auXKD//8hx9//PHZV0XYZ2pq6s03D7788g+/99L3rl+7JrATV2F1ZedlBhrf+N8iUmtdsGDB"
"Zz/72YcffriU8vLLL//P//lPmzZt+uAHP7B+/fqbF8709PSbw8Ov7Nr1yssvX7h4seVr5s4R"
"oVnDjAivddGioT/4gz+Y6ViFzmdiYuJv//ZvX3zxxRyUVeqiW2/9/d//P2eXdm7kS3/7t//r"
"xRcDZqjDcBd3pnnz53/yk5/4wAc+cDOE+vr168PDw3v27Pn+9/+fK1eu/sIv/MKv/dqv3Xg4"
"//vFzC/ggpj1DoZVs3iXL1/+ix/9xZ1P7JzprHB+rl27tmfPnpdeeum11/bMuWXwT//szzqL"
"JSYnJ3/v935vYmIiEs6aqBC6JqE88n3hXg+2RVgADtUeAnVUqzRPb4DTuIqyIEJ5VT6OO+qJ"
"IP9NZq6f/4+f57NUGyMUwTkzIVH7CUDWmSimAASZll8Ug4nRiITrhxYJra5HgaiQ0BKi/VMj"
"1+XwMnIRtiURZjzHOxcaSBseICSkKehK1o4hLF68eMP771m/ft3atWtvu+22+fPnL1iwYM6c"
"OVNTU5OTk+Pj45cvX758+cqFC+fPnj37zjvvnD59+uSJE1eujjrqslNppkIUR68nEaCP2co7"
"E2w+oFtuGdy06cH3v3/9unXr3ve+9y1YsGDBggVTU1Njo6NXR8fOnDl9/Pjxt99+e//+/WOj"
"owIUZhuIpYB/yCnHOYReLEoKsEwvWtU6dOvQxo0b169bv2zZsve973233nrr3Llz58yZU2sd"
"GxsbGxsbHR09ffrMj398/Pjx44cOHbo6OupnTMK8kfGlBSQpevFT07+j0zyrGd0L604pFU22"
"CeCkoxywbzWPg8c1yKtkCuHZneeGSZL+yvT8efMf2/bYPffes3bN2sWLF8+fP19Vx8bGxsfH"
"L126dPLkyRMnT/74+NsH33zr+sT1xpATpWlyc1oHtqHKO8nUFjSAF5BV8sShoUX3bdy48b6N"
"a9euu/XWoYWLFs6bO29sbOzq1asjIyNHjh45cvjogf1vXLs+YV5x7z33/GHPuaxHjx7lIjn4"
"ZZZAOFPOjXmBOPtofVqaESSjpC1IZ/CCd6j9ieSb5BnAqO6jVfQ//sff6lItprR0EXtG9Uen"
"g+UrlVEhYmz1SIPgD8lx8mhYDGJZodm9p6J+lJfG4LGEcS4a5DBSaBHKACrxsCE14XPWBOsK"
"JA8AVkGEcYlyKhZV1JqkhpgStkzthm0GYPuHNCzcmOkpLCakkMNsEDiLy9jFvMEBNghbcOvy"
"WJ5muBEtBHYlNXc1dAX9wQcbI6JiXOE5KjEfSd9JEjRGHnKiXbvhCGgBxN/CQ3hcArmIzA3j"
"DDSOeNDpalynSXHsRgiH/9Lml2RjNEFCmFkRV8lKvOcfWkurZ2ujCqFrKn/m/rBp1YgDDQDE"
"v1IXN/ouUUUPCw6vEdGPfvQXPvWp7vHQ3/j61//H819OvaEINPVRaGJ9GV2ojvaKmE3l0/LR"
"c7u1MiLRcnEyek0kC17eyWClVtFBq9sVurP/piJp9hrxs1D9OFlc1Dc09G6XWtjxEtq0AovU"
"fKnkgp7PNRkKKe24qhSf+lAc/AcK4af3wDGoS1RQSSsADma2pRT4jdU8FYbiW0dphCQpWpwZ"
"eEyAMHyHaRq/swXsvx6BWX1z/gRL8F7EFalih2m7H+PcJP9RbUGURtFTkwilKnFPVe1/BFUM"
"EfGJuhR8XG9KnswUD1ViFal2ArLBogmYlpvQx1dW4Bilmk7frJIKfIoQgUbULU2hKloGnUJ9"
"RYTTY00mrma2UlnzxLBLlVpUapqaEkQ8L4pikqzakaV0+1qgZnGkLjzrnOe7sTSndmicl/2Z"
"6RSmFUUwJVcYxLRwGOhYweDdXZQer0Urjdn3mrZDURmjWOIGg+XMQoW4FQJRVwoAI/SNyOUn"
"K1k/EEhYrYND2cmi23f0qRrv3bu3YEiVtUyegyruSmFoZm016R1iQVFXobwAOqvowzZNLd5R"
"M+sqceinilZgnJ2oagrAMY017LpqUR0UyFDFdoD1DciLOO+C4NxSzB3sbC6ctqoIhO5uylMd"
"rDSvIrXGLrIEZJiYH4yLEwgKooEdk+u9qX4IWqrqS1S11EwqCLEKjzk1syCuuMGrFB6iWcUX"
"Tpnq+TQHSzDRQjxwxED5tNmlX9PUJyxBHJAFdNTnLCROotGS2oCl+DAV15tbMyXkQXoKVAHi"
"uDebxyDWqBdPimcUADifVajuG2gB0c6Vagjn4qmqDUjD4RXH+2CwxUO5kkyqrWEo1fSsRWv4"
"Itiun5ZBh2TsYR6Bvx1AOFpV/MfCSct4awCMFJwFlTehN/FA2qoF3D+ApYDVllLAqnwkCK68"
"3aOdmQWMXzVSJY42DDQBiLcbPEQBY+rfKf8o+WwXLQIup+4ATkBNfyXOtUNIcnFaIz5k7BaL"
"SOjje3LnzrtXd19JHBkZOfjWWzYz6v5EY/bI6IMwp6rVjg0XMwQeVlgBU9YGkV1M5DzGwvye"
"DNDxTRGUzI5K4xQwN/oYIMh0JiIyaHgIzmCWEyuD0XPneBFyTeJwaROq6cDkLX5GlSMrsb/i"
"7HYh2kS3EH8KynlGncgKsWc03FdCxP7fAsutpnOf16p+XKVZYwGfLlqY0NiQwMUqFnLB8kiM"
"AMCC8xiR6KS8Jy8DVTpGckeRiolNjcOgC2NlDXxLLEeRo/r51iWtilIEQY+aHIrRLKAS6RpD"
"nKL3DHb+NOEGuoJYXh08LRaKBy9cRO7pk7aFx5p5mlACx1NHECQrLApoVjB2pHgeCkA9SmRB"
"GJkPymzfeCIrTs6TWd5RGLxRsRoMxgNAcgzh4SsgS84BVPEasbs5GKTSZyMtIiji4RUJhP8q"
"arZq2ZQGDICzEtuMUga4mG0XdFDB+rElDw8npqKCvbgFO9X1KhAaVKSoFJ3ee889n/vcb0rP"
"53+9+KJDNIdcKS5GDcFUPKDJ9zBkJSvpvgLMEF9VxTa/FgM68XN6DQpVsBCsSgwWNm2eYG6A"
"53nQKUF8ZRBBNUo1huSOpwBy130gNvM/Cagmu8ZZtH4Dwl4YSXB6E7t48lUBWbXi2DmB5wvq"
"dZmQOLIqs2fL40KAiUjAEgAIqYlIFSRoq1t6gfE5ECYxVziTgC8VZZojal4rBYdlO5IZKWOi"
"ACABlazcOUxTzkwehqwakdPtyflXIsQOGC4BrEYAGRJ3Zq1gddhumqQ8yoISrqqw7ipxLiBj"
"G1c54GjiJvsD0GoUABE1nFB53hpuLm5J6UBqu4WFjIKjeo1qCi6FCZSoNptlkRR4WsvxeDxN"
"d3GLeqfXiFEwGgNpX7GL85OE1qWiFafuGbDAJwSZhcIaATqCkzrcdVwt7oxwJ7pnZBuweNBc"
"HipQQRIRTxUxpUZVDCdgCzyxgP6LVBxwWgYH/48PfeiXf/mXe+ejR0ZGvvPtbxcvaCCOO3Or"
"1LRzFxu4hx93AVImFw3ipoN7teqV7eyjSq0QpDBI4jrKHaYPJB4OnMKTl1nEMGEMwlNyFSsp"
"XmGaGhjnoa3y/xB9g3orMhbxLjjt88DhDmq+XhM2mRVz9ZHrRtDjsMPUE8+kPdcRKR55RAQn"
"cmEGqdZa0jDh+4hjEbJZdvMyAlQX8BlBGaTXbcBtLH8FGibASWX26mRdifIF2nXX8nAV1Qri"
"IRAwX5BeWmKhIUikRoRR+qCqVMZZmA+CCFdrEsCUhRBh15FoesYAw29SQlT3VcQrPBXBwCWU"
"j4hzjq2IM2rcMKV3wTqCiiFPKwIcNkahDmeJicUw1cMEYMcMFOGnknyoihcA4pwSBvKCUYKk"
"1loAvm4VFc/ybldLsau/AuFwLpI2ckfSEeFQgu+H0UWdg7CPkqIyRUHso0mBYnmlN47qoyBU"
"RXRgYHDRooXLly+/776NO3Zs73uSz/T09F//9V/bhmbUVyiJrgs9OX+LoiklrZ4yiWvc74u4"
"poE7ERsF6VytiAyk2aXEvIiyx+FawDUv9Ogg5EewFU3PFwR3oIT7Miy6iDERycRBvdwC7Cjq"
"1BZkRtlpU4AbNeAI3wAtYMQANuY9Wnm91bOL17dwuDPpP2xX4TZRaHD+phXADsUh/WOFy7HB"
"Q5agpujSBJhVBQbhvGDPZfNRXJi6hVc7uSX986sQZtIsIW5AhakU4D4CEnMFcCu/XDgSCsV5"
"ZBzl7V4SRxbhPYBUJWegszSkNjQ2plTAl4rjfUpHMwWu4ourq/gchueIlrAJA7AEoiN6eBRT"
"BVSCtlp1yqFMeAeYloBXqYjXfcA3vVoSNpCyVZHgKVEbwUQky/gWS0RRbfQBmdFVrX6EL9CR"
"qZMi0iIgwiUpJowAdfmwZZWKzLqYNdqbM0agC/MGT1iripbB8swzzz766KN33nnn/Pnz38vL"
"V1/72tfeOvQW6KCV7kE5wYrhMwIWJBS3wq+QPnE2xq4QGwtZF5DQomNiSpEJCOiiMtBH4s+H"
"i4hPA7OAIIPk38HPnEx4Ka3ahCM4troZV4Z1f9fFTYzxm+FWPGqSMyhKSpHjRiLCTNobkqCc"
"2PeNtSgJMAjKHxFTw+4wEjgojANsDgxIxOwVpqT8ujK98Xwkwr+kRNQiScG/qohZGgMksy9o"
"FwWiOA4beMMpq4pXOk3uzg3MX1E5Up72ZG4hLlrO1YgVX8QvrPQqx4rIcmy8hlTVH5DzOGup"
"Cg+TjERAGLWQXCSKIu62PpY0c8FAgQJopEUsF7hHFTe+4qE7SHuYGOfigg0C//wvt2pH1yqe"
"8aYCDvrsdCU1kae4jV/ACDgtaApuDQ21QufbIO4FhYrKMoPQs9Ub8hJ4CpdRx61gHd5HAXCi"
"ZhtC9g4UlcHBW/7g93+/76HV7/bzz//8zW++8E1lUYQnszv2QBkRkirjJqzCx2EDcvoKWHQq"
"E/pEKzBjGAC+RtCFZ3gi6rOWZu/UAbATiFEHC46mRH8t6JveFRmIOxWcJoiO+MoEPlbyzwFa"
"0Wf/vcbEBKZ9YNEVrgBCEpFRiJrEP5FcBsOBkjF9gUwyMWbWLnwwMS+qTVKJ+SuDAL9VwKEt"
"eohBEdytBlXIiTfuEqgV0crZPVbruCbER8KUoIMbShzjwha6JSZ26HuwCWlJqjitLqiR+FoG"
"Z96FlI5zlUykqmhQQUEPSo6AOQtKdR2U97w8ZTaQVeMkqxTACtM98RdDGfShs5rE4lGDRWzA"
"vEuqxotdggw7hxRI3+dYoMNUdySwQvZOdq0ZvDOXzkX3ciwMA0JyJo+JnERlHQOc5MLz6Dr4"
"R6npVVelLVoPvaisODkT0gFS6L//9x9/72g4PT39la98xc4gS3gbqS5xR6MIJ6h3+nArBFlQ"
"gAHGxAWYv/WkXlyORshYihJQHOaCoGZgow5pwosYrIWQOCheYwGZ8tU6puVmQQmEKUhWCOKi"
"bZiHzjXwz9ojPgQH8GZqQGFMCFszFZ4uIsie3Cbdn0DpaDsVDLV6/OFpswKnFIHoDd0s5VDB"
"IgfXb3RIvDmLGs701PMBfC/u9yICpii5cCRe5QVvgxfyeuX0X3Vs8MUcynzCIyaCJ6ukkHqw"
"JkaS4AgVOVnxeioRLNSGyEJlamThElN/ImFyAUricQ5rsVR5BfJkSdAYtwlwxm1C0EayoPgP"
"eH3N67nExaieqVRiXkHeJCqFLhdlDUHeSnrgZCOPLLzE2Ylg0W2SX2MHHLBi0K4g4iQwmzEL"
"0nDO4L4NXPfqeuUTU2yDt3uUQNLl9IAMW0SkbtvW/xTAm/+MjIz89//+18eOHcn0HbIICYnX"
"3ARnukO0qEAVWjx9F04ZXQ8pp4KgeWwcV4rkCigZIBZOxosZKGJOzf4xWEqAFXPaFFioxhQO"
"yXZEBIaJSl5MFVC5QVgkQp4NDiaf0xtN3lH5e43JZI5TwISBVNrwWMB0cEuicTTg46msqgMK"
"TanqwSIkZnBUBatmK+eA3GYBpp42cPWG11FjuJkXhViTcdgAPYxwxtMvEwofBC4aU47CnacC"
"PQvqbO44nlpHJ8StAQgSNSDk8SkJh3nnZ2LoUd2Imphrz4efHudIhIBR6WUuCzaBAlvUhttm"
"1Eu4VuxQSZfUWE5GkPOYE0EkqgcJgxuww1iBNoySuQ/0Geux2Ay9LydJKSyDi+EjKTFiBbES"
"QzHpoHQZfgbjhrenwdA5qGiR97JB0bVr49/85j+/+L//98TEBNbOI/OHlKCdsGnkzRhRARNX"
"ah73Cqmz83XOLnh+4NqEjzPUoXKW2XtNys6kQFAKdJxT5+6DqFaUmjy+nQ4jvHryltgCvclr"
"kpYK+OxL5a2V3gLlcj4X5IEUEahZsaxDXOMRRPgPNqv8FpVmjiYhrgNEU03yYRSCgjbS5Stp"
"KU6ldIYjT4Gn1tQ35leELGATH9RWIwn3SAqY8gssITFgaUeSsJFhCa4k1ZdihFQq1WvCl7hY"
"oMJYLx4YK81XDRbSfCtbwArjiDl5QUisUMQ8hcCV3ZuqcNFx9DvYsYT0U8RwW0ji9egFeGe0"
"8GXzgsQf6BYBHYIHucavrLCn0dMfsE+aI7uqwv2zJpmLqQgzHY2+azsk998moCTrC+QEUWqd"
"j05Zhg8MP7L1BudT9n4uXrz43e9+9zv/8i+jV6+IhFRdsKTPAmZtfULVPsaQEhIARE19TJV9"
"QSCTgFlKMDCIzuhhOBfBhL3in1QBzAYJgvohUz7bGrLT/OCgC2qMgb/Q8Bj4A4cicJakCjgc"
"IFpzr0parJLof+g0W3hIXMJbBL0lmgf8ZVaTP65GX5iZkQwUB9Ec6K3J5V3nMGHcQiSi4OmW"
"jOwUZEiezktVOx+Nx1EiMcvVjIbhMGgiQ1YUCbqC19RFoHGYWc9jkgjbhMCwKkAvZo58hk+C"
"9FodLOallZ0O2XCceV4jHEtq/E96aCP4rG4Ir3IeM5ltGmVuS7NFIg/CSgCADspBGv0J+cFo"
"WX8CduWhU141nAbcJHPybAXiDMg4TrBvIdJgvIwcNjvxpf/xpbHxsfvuu2/x4sUzbXJTax0f"
"Hx8dHT1z5szx48f3799/9NgxX6KXI6tI6Jz1rARUmofV8T0RUUTKNMRgCMm3Gq/3oSseYuzA"
"65DBOfCIQMPmyYlbIpYoz4LokUdIv+sPdebv83P6N8a/KwA9/ZJdvzaxrV8r+ZeEmZLwrxcx"
"Zhkc5ybxcw9L6COF9D8zPCyPOUoWPS3XjoA0NVmjN507awSeoLN9utkRVsvZe7vOEJNYZHsN"
"ZVT7WHltpNdXFjNKCsNMfCd3OiowbUOdJlvtvKtPH2Gn3KhnvH0NbfY/Z3hubhn88oYdbfqc"
"KPNN9KCPV1LniQn3c0Xe0fNDLzj0s0AXmeSLhHKfCWB6Hh/FmIaimPkE0+gzin5aq5IOqo+e"
"NOQg95nNaPq+48aZFqUBd4cVD2g1ipqBsB7QvauPVCTy0XRN23ynp0l2CXeahSB9GuvbGx+q"
"Ee0ZpCwoh2vbzcTFGstChCQqpSS/O0ANwtzWV5OyWiWleb6KzKTrRtHRKlmpLVFpTSU9HErv"
"i+FdeXaiftSpW6fOdTRaTVhXR0WdPnEUaW5qpk9qjlLw72qea2qeRNz0P1vcjEfnkXZspfFE"
"SdWKLjpDT42LYuoIj+sgYn/UCkmEmnt0mqpP3U8fmEwAhyyrz209XWocA200iWG355J63PDx"
"pmP8oTOKfvBQpNZBMha/sy0RzzzaHgBq+md/hmPO0mR0Jr9I1Ht9T7RAs3ozT+j+3NpS6/Iz"
"+kpEr274a0y/IwX7r3ZbwTyY9jSRhdv7W9NMNN3ndu1+kf7Idcv2xz7PyuGjebi2I5vhWcnl"
"aRHZ1vs10Bl3F12I272d5SfpSHMCrMHTey+9QWO9VzWsBIunasxlNwPrVVKfx3r0056r+uBV"
"bq8/UPcbCudopDXF/oPqA2h9ruv+TWzO+Ujtp++ZGu3hJd0gcBONdBBwNj2bRMgQtfk+92vG"
"m29sR96HGy+BT1240TP7fnGTUorPLNFzlra0/Y99V/sJr/8zE9HLmu3B+Zsa2bsec3t3P9yb"
"8Tkzyv6mUsHe+/RG0u8mYr1WOUMO1++e5kJj0jNeeuPGen7oob1BkH+qz3tTrH1md97ZcLrb"
"hRs6+Gx/o/mOZGb5JLiYrVc3tLsZbflGdw7OzobrzT9VfaLypnp2s58bduGnbOs9Gl3T1M32"
"8Saj281e9K7k8rOUY6fdm21d1V9ya3o0o/HN8G3Fv94j3OSmIiOd7ZE3aDFdqCL1X1Pitd+/"
"e375aTowgw+/x8+Ne9Jo46aa7NPmzJJpr5q9O30AsRc4qOEZ2zJr5zKZd/W5gbwUMy8+Md/n"
"2pvWfV/weteWA3E038x68c39Yr2bTc/5O7x5NtMTul9r70+zqnTmJu02LK2dFdg692cqnUU4"
"0xC4ArS/w/yUoMMuJznkpjvN6k09CaSwouexMHvmTjTNzv6Mtq/dP/tzn9LGoJla5lU6A/Ge"
"0fFu7tMlpG0oQl9pizdPMDptct12+EffkMHTNke4s6gAACAASURBVHt7kSdV8m+9AYYaLn26"
"qyK1t9jcOHjypZ47e5+XDEt5cAhWhvSRSjqF2+/oMBIVreHBPZ+ZVMa/ZkKXLi/oNtP0uFfT"
"/WxPey7p41l6w1RdNUs+oK8DYOVGjtvptcuZW3Cm727w6UpVW32p9lqIpv9JEFN7W5mVLPVh"
"PRoxPAMLO6C993X9T3vQRtvLtNusdOJPjKvPM/p8Yg1a34tbSGYPZgCgfi3nRnstvg/ZCd+Y"
"qW2aYU9P47nJg0SbO26EjUmofnPzUyFCsi3KLuTiI/Pfu4DYGUyD2Jq+l/RlHwdLlsUwhb50"
"LCncSQPMogvaq+OOSDDMNFYuvRMsMbKr8uoR5UvClMpMj4nJj86jhd8n5Ev9beFGw0EyWKb/"
"2DhqMhE01nGb2gG87geXsJutrZin1FYvYdfNwzhuiIhm0UKEybqTFYuPN+TTsRcR7Grj6qk1"
"OqQhh7ip83Wv9TaWn2CpV1Y901GqHXPqh1Chai57awGhK8Tk4rk3/u5FnQkCaCPJCPDSU99u"
"+VsgsB/FizTxatbMeNd5IjvbxZx+9zRX9jas6QeazszOJlzJ30elfYNexn3NP+GH/HdHmg0o"
"q4gMNoeo2qLGzKZ8Lxn/VghlmoSX7FwErw2LCJfrA9XiomTGnSVfzfy5iuD8aX8DpJM+pcc3"
"SBJ9Urw9nGxBRbgjQgoOiS50yYGKxCEZBKSAMm8LQmmhotNgRCSqj1+hZy0kWZNaO61iU9Da"
"KACS6fah+UeSdZIlgD/E70v+G0EEJoUCODbt0MVWQMq34Kv/7QIvYVh4r4XWx0CRrIcWUDzc"
"RXDA9ckLaoIcf9WMBiXWwdBPRtI0hubf+TcYWOPXGlKCBOgw1iFtVo3m39NiGxUEVb8g3rfo"
"hMeIkfZ6ve2nwk19YFdwaofJ/qmPpsure3rtCiT5f2sH2VDzE5M8bMgNeWxdmkI1yVkP8hho"
"B5qEldvT+FKq5JggHCFCUkMlnSHihUIqlpwSi79gZinXV6KkeL9jOXpSKTbsjqVBHXUyUHfs"
"ODSkyReFJYAAPb/H9j/ueEf4fQYyFcEmpBCuSyiW4LrPKCFHpdl5Ig5aUGwUjbCcVOZDTfVV"
"f6WgDaTJhbJZ4kfuhRc3INeMjX/6MaeEOslsE06l6xwfIPJW+wCyimezw207+C2HOUbTOGuE"
"QQG3xzlcLkM3jSwSQKnku20DgJowDjYKvXPDpfBZ6E8DnUNESSceC+mIzViTJqAz4rpI9e15"
"CIcwWOxga7s2VElqDpGQcWgycWV8QAlZRWKHrazMjDqFMYGe4DBbNWQSjuR2jo1o0ONmfJ0C"
"FroDHTSCxnV4H5mqSVZWJbxPGDY1+6vjSg65SMEaRpGlABMRnAAGx9O8K5QEKx30Te+9Ub7M"
"lJqE0UCsbnOpJBdwyRigIohs2CcPEnCPkLTfOzyNoaQNXi5VDgfWix0flFLWMKGsJjEhozZE"
"gyUQ8iFSgLahCI3fuXsUeTPtgMl5Z7ZBm3/GsNq6TdQVgodq04y/GwUncfg1idDaG7BGI8H7"
"yfgqzaQRBQhw+BJNgwaOLYk9UGiV1gwDILxhhhTfVpXOKClMmONi31XIh1N1eOM+PZhmAZoI"
"1atvlhPssnH9prd2OSEBQm+qRX7eg3lGKK5mtWE5f1AS3zUpDEtYK4L1CrsVoI9WRWyPZW5/"
"5D1A0DZnJLRwIzG2k+wMlsO3AZtxeKkjxMNv44wfg13fIMDJrZtMU+SyFnz/O5pT8hT4CccK"
"qIRpYPt+Fec9OZOj/6XKr+9oX0NlWnPbTGNjv8gYNzUjtLtBNUVpYEWAYAjQfiGEEOZoHf6I"
"2lRePb4r84pgDrRD8nxKE0ApwYkTc8M5F9xCwXVXaerFgmKu1LsE8DqsM5BgL7QFeFwjDHqX"
"Cs78SMGF4Q0bXpH/JFE2IS1OnlSN444B0+JYJZX7Akhl7UgBgxE/Aw1drtz33wWbX9TTRI4R"
"QACl4imqcwEJ2Qg7hCJhEW4zW9n9xglJ4hx8FbuJ++2Z+onzHN+VMXmLOm6GqsXdH1FHq5VV"
"GJ09EJegI9hyt/r2lmGmyW0C8qMyEqihriDz8iqC7UabehdeJ/a+o2JkmiaJg90n7wWnxQVa"
"G9Ll1+MlCjcLhhSEu8wawwKgSfd7vooVOMaHZEKBKEK513h1pmA7PfyOcmWD7UTPtDsujBAK"
"RUcCcIomEFO/qDZdVbFD2hzmuOE9wgzULgwozOGSL9InqpAgKmqIsGps+wE7iP2jsX8MiIzt"
"9YHsNGCcYauCTiaa6abHqeOqKulYR3Oc7Ft+cC6HTnAHe7aBqiZu7bbkwM1rcEH0nCJOACVx"
"ugR5GNQUm/FqwVGpTPIISqZXPy/KIaegtAA9BTYbQgYyKAyxEt6JaC4+MFCQAho7xhAjFct+"
"Cs8086dUZ6IO9wlHMGYwyqgnF/I+oKd5KQN5BVujTE10xPlCa0QDuA+y45UMnCnkAixAZYFx"
"ycUY6hXjIa91B3WqxZcFQNEUagwsqQwCeIABIcmi4uyflM6gHeC/upxqdF2bi0NWSgZBDCPT"
"EGV3EEWDkeb/VEm1GUnTBlpge9ZwxRl0FZGOAJdDpEMd94YsWitPkU3RG/vIUXI+ijgYIgWT"
"pqwl3jFGbZ6hwXQojceRhdtVaB690yCWRwVszwzeNpnFUyh/aE1EpA5iA1LbqqJUCJQmlvap"
"hHShGupEBXwL44TqCzMNHs9RpaazN7zT7jhIKjTNzCh7C9D1f1ZAAWJbSv+TdQbUpRPyQh1h"
"rM7GSzVlq+bBCrRQwQNExA8/ptFHLcADrHIjuCI8f1YS/JiLobwv6WvQGrA2ZrxNDtmm7da0"
"n8EKZDM1+VXk3WY1waDMKfA4w1PgKzi0Sp4yi+jkY7IqoO0xlo7P0yjJkxFZSHQgwLHQKoqT"
"wJWTH0QEsLjasiEyrcRUOWXBW91LLEJGSQNS597ZLi7swipsLpNGZpJoBaeahZfb88jMXf6c"
"6KBU6Tf+jQSnEMYCCLkmQWgQm4rzY+ivhEJiPGKI23EyQR8NgEMI/O47NnTl9oW0i8ZDQboL"
"tjYnHorE/k9ix/kxADLNMReEWZBe2NkVlXgUYyLTA3C7D0dplYmzFD/IAMWNEhtve9Mp8umg"
"t64VR6f4hvhmjurGqWbi7mTEbVKoIJaQJ1Tv7KuA8LFeapLynbSCE0G6hdWvqtjrO1cYEQdw"
"/KN3Jx3hYKGNybik6BjHSdGNHNKS5WnABE45sEIXinP5CExVWBdjJLFMBfItYJ8eEtXNglwP"
"bkDKALdAlE9dzJ7jjunRvERDkX1AazB2C0sK5mSZp/Dor+C+Xhp13u1Hbje012t2gtq7Y0iE"
"hoLMzsRdxA4bd4AVGKo5XuSO6DSiUBXWGSUf6SWuQ+feigOxMLsMklFjtEin3RadnEZeb8Fc"
"xPmheTmpYclzb8FmVQUBsCpK2fBdu9TcMBA7Iiz83k+bw7mTwYRhlxWnJhI9wTesqyWYsRmS"
"ndzpBodYAM16/1IBC+m5h0u3T2ExlQSXxxuTv7jJu91pKnm7SQeplYBWhmT3OjMgpmoq2OBU"
"vDhSE/QxiBCtXLiowFrtglgvnpYJ+D5yCB/eoBart7oQg1nzJEFyIVa4w99rIXIiKanplMzk"
"yq4es8AUFx0Ig0SAsbGUh1BEZh1ObfVl1tBdDerhCUVCMDvMDlnFwXQMx8XpUC6ZiKDCkym8"
"s2DEXucJTBRosgb0EVrdwkDuyDIhHI8kpcKfUZFBIRBH23tHMB+XJtrUySPbZmkvYSfYnkTd"
"wtEAo60dzg/dWSwPes9ShTuSY7K7jgcPvzAChe/WLZCY5t55KIm/8GMs/xARzc/HWXZC91CA"
"Q1HFQR6O7yGbqEEAL/GTlgJyUFmfsxJLQfiEyCpGrTQYkhlNBzUnIbnGpIqdzRZZWfE6Drca"
"tHu8ZA6/EkjB46kJIZ9UCkBK64tcgYV7DqPgzvkogInEumFNd3odDthL5kLvDrhVUBgkFL5Z"
"uIAtAWlU0v8Hs0I4VDAjgBUPCAHDxWxKXu7BdUnIXN1nq2Bvb/HA6gFQwCVxSNcgTxBLOExz"
"Rrj3ihyPIBIef+dorc5tVYBIzoOq8MxGVqf80BKy1fAOSso8oUR6UB3CwfgRzHx/0Yo9JhGv"
"xH1GwIQq9mLWOHVUBYTL6St92TsA6BHlqdVm4xXeyw2TEaFMd3lPdZ74h8gryeSQWNCvyH2L"
"+7ojpNcCLUhUTZvz10QYcTY9D0skJvoAxTHRHQgsgK0o+lUFvNVsCxmoUh3Kofjh4R7DfZ7c"
"z7WvWbVcF4WhszQlHtI9wBb1Y9bAsd1AkSl4PCfxrxi+AOfSkfTmoeIGAZCKLMjCjWaChAjj"
"gq4AtTT8lA57tMOEOJ9J7EF531M4jEE97pVq5ZQSM4oKhIv4ZXfVPI1l/4FpG9CHjgFqFdEC"
"pKXyZxtgTPbgZ9qoOyJL+S4m9N9/hxtkkFFl1EEk5XyOnYYbqyrBRzwylXh5yZ0EhctgC+Aa"
"JZ/mTToFXbiLcPohGRD5KVOaWkXLIBFVzJcVdb5CJ4AkfWB+CqylToXn+pbqJz5FrkVzARdy"
"HiR0PBa4S1iZJohxPqNeAK5IbgggsDzANglV7GqtbEgBmMID652/ZIqHDjq8VYRK+xsujBAD"
"BzFwBcFhlUzUVpYjHCDrsfZRsU2yYlgRnKdrZAIptE9teFqESOg9RfCsYuPTxNpTLd6ZscFS"
"dbhDuRcRRmHUlQbufkURwCtqKhOpRxnMI2P2iOqukezCJivYU1UE61rzLEBlAhI8Eq4ooODp"
"CRVdcl/Df8kzAch0aZXsVzXwWyB28kbYNqpO1vNK1/F6gmeuyshMUAaSxbyepcCAbVGWhg0M"
"iCzMSVBYq1pLLQbblcbDgxIgeo+SjhWqAG24RKFXqiOWJ3+eiXi4sxicKJRTZIZduIOfQq3Y"
"odGdFNGN6OxAgAIWUhp19gfBmaE4WVeXscu32gRnbCtqbWee7WIyI0w+liOriNe0BwUnSsJy"
"nK7GODMEV2Y1QlgvOBpbEc89tJgoLWoiQHvrjHbI3PCVWZ8pzmuDLLqxTuo1Xq8VGkaj6hYT"
"tjSlfO6T901N4yiYauXJD0jBvHzjh+a6+QnSPmV5I5idmy/k7Km7xFyFkDvgfLQQMeeDgCVC"
"c3VrL14hBN54Nd3XBTAMiJOC6lPh9OFI7BXGy1TXd/MXpLFemjUvxAk5FXK23jWUEtTCbwdn"
"RdHMe0cyU0NF7qEwMea4rjK4oULicHy3fX8A55aRvhA1PbMm4SAE+v9zutnurhRjYWxFGEAY"
"854rwlUI0p5eSSCgCZxYj9udOReUjyqJA/wgpq9NiYjkUWsC/0/1K6Vm3U88rmIiJ8h+RErM"
"XoGx836IurjlI9EHwANdpDPrW0WZMVT6A+IC+As8BNdUO46+eqqfiA+SUlevg71XvH0q2BkP"
"0MBtnNMTtdQ4L65Wn8RzwkehGaINavEelCRfQQQMcu9QXlEHKy6I1FcPP0wWMCBDax+YargR"
"T2J0MVZJeIKjIyMJEPoZGolAB5XR2h1PGGNxLQ0tWJlaT9KKpZStGXAyOoigAEPmqRiXJRme"
"FmlN9f0IFIGBIEgciJMnn4NLhSmhnB3gyGQkWTDEZt+mQ30Fy5BgUozIyqI6pFe8eFbFin2e"
"MtfKZTqOYDwblS4KYPT/OP7XyPS9N05XTTvm+1F4qInno6NixzRXxE8hzat4FHFKyH8rPBaK"
"wfAtqSejcLfxo6CDV1TOTSHTdImRWsEFI9cTpWWhz3HstIsOCbLazEwJi7PDQ2rFy4swlGa6"
"2sFEIIWYzHfVOfYgS0OOirkIoALiYKoR4GcEHof4CkVi5AL5AJppXJABVJgEh264R8JPK6Tq"
"1A2L2VF2rLRbyXJkKR5SYFop7owgAVaVZ0ehBkx4u6DF7VRUBhFTmvDC2pGkZ1vdy1cVVTVa"
"GiQjVxLoGR39KRhQBRmHjDgW16hjREQFQYVKfCk8vSC3QGPwEGNAhLaA8Xj3wJDOc1CQ18oj"
"b0CnkD/4ckYWMhE4xFdtpJsRoX3VvvUqWywXSzNTR8yh3skhFYkbbok8McIX3FvAjRD9ETE8"
"X0KCIBa5SiWnql6fEsGLLgQgkaAkmWNgogvWxuJ7hdo1fhRfxcy5aCXFjmWZoUayJlRK0ihh"
"q5gPqDFoB23h4rcKY8GwLf8gmiGsOrfz9v2JqLYAQCEd5Ow8gyxKtvQ5W/9pFCumnCtTmpQY"
"SUH1h3VeZmaBNZB9BWlPtuqPBObH64pMXsIWai7jaxI3ijBuLbB+x4FwbAdMpvImfFo2XLZ4"
"GoqPGiuMVWmwGq/xk/YRY42o1wBFm/bk0pTGXSICu5+EeLBGBEOAAVXkV+rhqsqgBWtkxSky"
"xzyxSCrzqOuhJmlWuJericaPpIOejITEGB1glzxO1KtIDmCYBbNpGgjJDRDc0UOB0qO82uMc"
"i9yEhAynlse6SFXW2VyURqx9MCxYgGsV8GbPi9A8OoQsx+3ZGVae4jfheikWgyOcSfQWZktY"
"kLg/Zevoi1uNIja4ESYi6qZTU8WKE7/QIaMx2Bye6CALnSNNA7UHjvrMCdcBMX7QJAkpitAB"
"vKxIFFBKRtjzr1nYCfhRUGchX3Mh0BcBoFXEtZfIJqqFSn17AimCWmhF5HAGI44BTtNQwwnk"
"ZO7sT45gjdbdyQVTGlboigDtrzojUVERB9jAfp9o9TQTOYdGZHC8pd3kB4cQg7NpxZGvBREO"
"FRwGnTBjjl6KnzsNjyuaxI5kzdXja3eAKBpZf1RsfL62aJSQlA2ReZQWEelmysyJyRanGgQo"
"HfCV/hAZNAHDTAHNSXMCSmrxSYICmZVbFqCSs3eYBGmJfZng1cu+qDpqsgSHHzQnKLRY4PG3"
"HQs1b30hN0X2p7BKVzO8Hb0BNgKhhGtPvehc8QY8pzYC8JCANG/0IklF+aGABNeS2xCOkEKs"
"uTqU0k3ELvdjoBlVxT8BLdA66zdAigo+UUUiOQpC5TwoaIS4bCrclcaLyOCQV8FK3J69NAOf"
"UEFxsvE6VB0kTdPDO2NUHnb86pjRYBlK4G9G0RTGL2DEjEAREKAlgh65eThumF04qIcFjwV2"
"kb9tqDArhhR/ZEXTMXaua+TI4vr0ioU5vjD+eJdZCAmBKHJzDWFAJRIomEobNYk24AAZgXDU"
"VchbxepeTkdVkyLcZrwlK6d4Jk/CAxFXHFBPW6JY8hIp+iYXCkDgKJUFpWFaItQ6wNtHU+N+"
"sdoclYMKCQRV/VxmGhvE7d7HxiXCmVMDgArLBQRPiWkwILCmio35Gnrg+oIWAVaEtcYMwtrd"
"Z1E/IfCRQNCvDXssM45KDUOKKGaCJD7WBVcBWH/KAgFhJg3W5jCIjNOe5Vi/RXnGLBImZC0w"
"MVKR8FBNZuRcEAgAG0xsJxsNLANBLCDUOahgbW1alxSaDBcW1HolTJ9EVMiU6LW8naGLKbkH"
"fqUho33vVHUwj5WxmvilDwf9dCOgOkSjbuPEszRjqVnR5oPFbRKQEpAYU4lN1UbdG53mpTmJ"
"LGDI0VE5dJqIeGJdZlIC9Pb7JdlBFlRgLlTigmLwIFcFIPvfxh00MwmCkEbB0AZos7ce7YEO"
"iVsA5lFNrMrVfA65VpdW4YpM0z+66pYBulTFO1ORYPmSp1ggQEIX1hpFSWUPcyUsvXadjTNF"
"NqK1KDZ3qLAlpQhD8C6iilzAQgUUFVpMxhsciJmeW3sB9aSACec5/iZlZwsTKhFr9a0bFaaX"
"pvLNtlGsjEDDJE9IeyOwEpoIgJqwhzkQaBJ0zocmG0yhyZzYA0kLWTAx5g9I0QRrBzLCaKhZ"
"Y+qniCL7QWwScAIbZSCUiMRkj0U8LSz8B1+DYPk+pUYFJdI0wDqrUUlFkszX3c1YocKOIToU"
"Q0JiZK2E2ZBlhB54oWazTewHEQygjTGSz5vEgm45NKag2/SVZopOkSZn3Aq+Zby7plXLaUi0"
"beA+Yiv1FK6P1sMOajJBQEv4hAfxFHWwvI15VlAt2nkSa8WbeYoYLcqOeMkrVXOya9AFwlIJ"
"thSEJb00GyC5SrhChlAJ5pqCF2ZpfUljAAWqexE50EFNlRqRZIvxoMEC0cG+0h9UWvyDmkrX"
"MLUQwoQYqRAlLJUUW9wuAqhYBWWo1RR34FQOVDmJTfKXhFrOHUSwvjfifo4mZN0JBAJVxGUf"
"N4Vl+p3pUSD8cAUUkQQET51sNi1yQk45GEFcFo9gLPzCGt3rGTFJDmMIvmQpl9GoOngCTBCa"
"r81yIMiVCAsSS27oFsfAB6cWc40UY7Kog4Qw6Hu0B+ahrOpwUJVTDahiSto3xyGW9gFqGFYq"
"ISz0BrpFLpDgPF1KfmdDslGExJhDJAUoKG7JoqUATTXhSsk/ETvC32IqyaerMagwEDigGhnm"
"MBS2FdWySBiwqjfAgDzTH0b+YCaTGDcpd5TLyUGDWviNjKBmGMVBKXAMpuHRsCpeqk9Gg04o"
"AxGgxd2C4wUGsxexNKwGUQyJZPwIQx3MxQY+F2DDBCZpPP8P7U1yXhDto5KlmAHJwMUIw7DG"
"LDmSpITPqRaRIMIySp8yrs0T2EBC4GS/NUCbphGji9Cs8bO3j7IYVmmYelBi1WQ2HpMlBfgM"
"FSyo+/gsxaChedG0tY9UF/MvM8VNI0g1xdAZQwBSlyaVo7VJbSNHYziJmYtmnicpTOeHJoCC"
"GWD6NVJcQaWLurdnhU0rFsbgOWHSCXvyP6lHTCjnHlXmB/A26Ir693nSPKOY7US8jOpAmBZd"
"JDHEtVz+5FaPWRHTdZ/KjQdbTfwoJrGakCqoFtfmsSoNX0pS41LfgjVx+BExLlGf6uhWE3Qi"
"iEePwzWTWhKjc3lhkYnAQs2CCJHxRBSrmD8KhpwXs6KiLZkRsYDCt2WDtHqEE9DSrE8DxI4R"
"h0E57pQkAcn3p6VINTlDeGLiku1zCYWB1cquFscR3gHYbL5JuSrrXzD21rLiEfZvLDGFj1ea"
"U0cKYHmAQIrFGmIgCX8Mt8ntNJEf38IPKnINZQRkeFaCqSTrzE1BTko66RYijUqaBzM5kSh3"
"csgs2VZBaGKYSY8GgbQ/UNZOkbejgU5A9YErWSgdTfNjMsR12yR6miwl4qv4FyZHOnpr+trp"
"JDWQVC9wgbSOKNklHDF1LQehRCzcSmKMjC8zwWcVBekrtEC1p6fyQqqlefPxVE0412CYiEQZ"
"P6YgUysBi+F8TSNd9QKjEvstySFoE27OwXfAR1N7DL0EwFZXLVFBwihNuSfU3IZXNu6a1s7l"
"VkPMrLHnY+OrVYORJ6uW1vyjP/3aS86HEbXFia59Nn3o/qN27msCdwo0mVeF7+T2u7om6dBM"
"Idoh9ff67AXNV9rzR/Y49CM/vSuM/AdygeCovDv+0/ChpoFuOtuEmUbW2rSREaVtN/H4ztgR"
"3DOLjdEiFLXBMlTX8yHA2GUsgCVTNyhMzhGl9JBv202iHQtYTYeTMjoD7HqOZ6mBPAmN2kDG"
"hhqnShm09IuyqcnaRXQ2HNlwu/RIVFIahsc0EoDlR4yFr7QQ0qMclP6zNSex1NRI+1DtGL1A"
"wW20zU/tDAkzDylXaX9n01lbufH0c5VBbefh+ltiVJx6fuyHfH0/HZDs+u8MHjBzr1rSmRPH"
"mLvLZlcyren7sI4yO4uu2AvytR7b8DjZQcR22P0jbG8/gpI0LQYnbwTaB4dj5P1H2gfcbqiI"
"PNKZRtjeAPft733h9n1cbOYO9PO6SKgsN0nDa8XU8ZLUSoKb7nNm6EyaPOE3M9yXwlDtjqB7"
"f56Ujht6CiAR7hvz0Gwkvb2YYShtd7BCNZPhzuUdk2yLGJ18iNeTwFpVrxlPEou2N+cScNdQ"
"ovw24wdP73NJN0ZpOqg+0twZm60z045388m39+McfSH5Bg1ljbYJfPq5McMOkvVDjR636IJ4"
"p2OtPlOVeQZdz/bRfv8zI5hq77c9Yu3/gP4/zB6d3r0F9FGG9Ep25k9Pf2YG0NnG3XngjUZ5"
"ow8YdX+DnvHpNzbuXEfA7zVmwhvkbxq4KSCftZ8t9M1itf0Mf7bnaxAGyXWZftbQzSVuyn9m"
"1eYNrCe+CkDsTXMVBCW+eI9Q2PvpekrtfjfTXXWG73tbvYkn/wyG1dulRDN+5mL71/7cXIdn"
"UsPP/nNzAfI9t3rDy7v+0Pe7n6Ijnp7O1lCPbb8nEShmTXqs9t03dVOXNI7dfwzRmZnG/lP2"
"o0e8+fmdRw72nKTbXPde7e7mXaYfGKa7exoq0UVftocrVaT2fWqnjXjirF2MwuONxtJHzPy2"
"q496w8f+bMCmbzOzP7/n19kudzX082je3ehnxg7ZP3rauRlZ9WvpPX36wVwVaedxU+/ayxov"
"67l8pkcqfp9dM+9iFDdoIKh0/nrGzvd/OL69oZpmCB615+nv4dO3E5pKZTOgQrpOBntm76Kb"
"nUfMAk/9sCXd1u/mLo6JE9D+ASsMLz1KWfdNNMyfWfp1BXGxC79QVrSgsRCd7c00cpHWJlJT"
"MVrtVVOSenNrFtAMT+25kRf1sQft3gph9rNR+5QkbpsIZHfbO2wxgz0+qarngY2BJd30Rug+"
"0/F9+jqL//WIuueC/j4cRiJYv9YdBKbye3rXfpVH03FxtNvPYdL12ulXOItK/9vSGPrDW+rB"
"DCGJ1xe20h1bH0CNi/AGT3bvRsb90C7Wltws2vcBDgw8rYJuvInP4ql46a70L5WcMudfuPs0"
"ms+ApDWeE8altb/X9/3Lb25vSP3vel0zsDxuaIEywRFCaD6ZlSp2u+xjyAnA3BUKk4qQLt6M"
"7vYx1px4DbhrlfmaTIKwnAXP8aczoWkBI9sCjU+jy9x2WGvziHQLPbGvmfp6n64WMw3sY9Xa"
"OkojzJBX9tVQfPeJvYCu3Ke45rTAm5GGZATOdHrYoSLJlAH+tZVDc03T8U5HW6uu+aZ+I8ra"
"aDGni/GtiroPVj6j19LaTml/1hSy8r0gw0AxJQAAIABJREFUwggRDvoBWMaXDt7404SnuwA6"
"2kGGHBqQ79Pt1O8eIc7QsZINK6OUf3AIQHwhIiZJt60uIApAm2uGkgrohl1PBdpgfycRrLLt"
"xGL3hLbV+KvELYGVKtKcsa18aoBJdLnVEesB9CB0rwQQ1XR3YwgJ9FsX6eN4tjIbvJIiiHYo"
"dV8dF5jV0nmiT4KfuLzEPkNV08CpDbwPkBUfO5coui/Akg5CQ+/JXwhYuI1LKPqgPp8kgQt0"
"U3S6jW+Sdul2CtaEMh5nh35hyVFSTcP9E1rZ+3JhI8mdNWa9/ImSjKXjfqHSRlL9saiqir/N"
"36dPouEFsU4oSS89mC2m5pV3tPs1+qenAMZjfts+hAzcsxukwz46HWGkfyv630ZevFulXQFW"
"6RJO27W1wb1WSuhLH4PpiCmRiKrJdlutoWdoLKJW2hrdD5kCLKDnZEeO5P4InxrqEyUCMTX3"
"hdbKt2i6CyIqj4BU9NifXQinprDakuJQid3nbpM2fezYEh5JOMMxwSUQt5lhghh9azHJFMl3"
"tpIcpVXiiB5Bw9GTvCCCuBOInAgt7vPN2kWl4iACTbHXdVF4cEl+VgIySEoJEhAwnKvjicUl"
"EQNR7Nhp3wFIITcJcaHfgke0TpVsJImt8lv2pCRmwzfYkhozAJpeNNMyvKcNqRkS4zhuvzVA"
"nGML0OU4mz/Yv8bpMvSRSHBZHg1OJTrj9+a46liaaiAwQ1qPO5ImHw/nay0069MPo4ELJ9YL"
"yaY+CEbLF54xBBXfdcEuKy1U1LSLWmVSGaSj9UPhujhrVcIGgNQJZjpOHRKyrqog9gXyNCoC"
"8LbkQ4FAEJqKbRCrWA9mD89+y1H6m9Gpu4r9Yzrg7cWWgiE7TqngRTVqDq5N02y0okwL+Ca+"
"oZggFnB/LU9+NAwJj8kShsvFdhMKa1YMOMce0EvFxgxATBWkLAiuicsl929ASkX8dRewPf7s"
"RxlKjimaQMnfO1OTbRGp8HDBgkucseaX50OGaY1uEfDDEFSnwwiWGsJhpCBiYDye+RChlW+N"
"QOYcfcK0ktSvPcJC5xztkRsqNco4xr7H1rP2LbXevoLIdz/dUFOdi2OLkNZT/gCGMJBClOFA"
"NSQUI6EXkySXxjwxXmqFhMWVWPFkSYaWs+Y0KmBjdAfuHBLDyRhCNDWbCSLAa1VE02ncotj5"
"JllDwlT+V1srSBLEKDQ9QpB3MQWKQUU2T2adbu4UMWE5xFcEJYbGLHu2TB41yCNRTMX+qm8K"
"tSI1aBc0BBQyB63iNAXuxv3IOjcbslq6ysNcqwpOeEuG5ELCpoKaZRRyLzywRAEFDnlUdwkf"
"Rat4WSnZcgSyxprBS6G5HA6zp5UInL7JiTem2DoqvYKVEloiUGEOT+/Sgg3i/dU4xWBtizD0"
"SpFGMbfDbiboKhJ1+rWyZzA6tCDVdySCXfmukImEBHr6nmax6yTdg2yfciXY8BLlppK1EPKs"
"Ee8n+s2jiqhBYoCIn5frL0pUR0PFKOyykvgrLYg+RnKpjICBHD5SbtQqNhpY+kyTKuBk/NkC"
"O7onsVlRpnYKboSvU/81yUSEDIueX8NAw7r97WnYVpT3jFSLcj9ef8dYfP8FwQ5SCF1RT6DR"
"RuaHPWozvJuTC3c+TSURUbzqkYr8pToMlASfaJuvPIUCvREQw1TrVdHqjCW9WEHpqyjcEnS9"
"Cqj7INSM2nI6fBZdwFmi9ifAl8cQ2d7G2DgtWYbJK8c6G2kFmLdSpsvif5wRxJgUYTLSB2oN"
"reMdTQ8jjr3JVkQc/gsYqzssWVeeHuLmce7tvtmQoyN0LC5P+8ZrNjA+snTl6YRUgzoaaugN"
"D0qhQcNdKBGFN2FISsmxCuggBJDAO+muFyYFzD+Rk7NqB9Mkz+OUlN8YsmppORpOIUbBQXI/"
"3VXSq5ReaiX9hlOiQY1HV9oYraOqOkOqilZSGkc+G9EwyckROg8NDps72PufhGKVGMItZZic"
"KrbOlbSnveszKZs1ak2t17AiVcnTmgVVrmLDNvQLsIQJFbHtV+x2pXZx+DDsyQkCKYbUrGZI"
"w8dVGemohyjbYldVsLTk8A5ZIqqVvM8Pa/Kpz1A6rEfQEIMIO25XldADQUoiha0sktkQlGVk"
"Bqli5zK7EOzkNZgnKouKCFVE4gAcgAhBG5oXEdWScDud7is8a1s9tlYQHLxUiI2ryOqIpBp5"
"ojj7Eq12xBfCBw93JVbARMSwCKkv1Bd2KB4t8VRl/DJ7wFYm/mZsQHQAhdfkNGxdsLthHEKA"
"H6hyNiOIz24f4ueQJaSPiFAiHkI6Cm6fKtpASSWy0fRjQxBxBauFEA5XxWSWkhI3MxyDCSFG"
"FEnwqMERYYR96A1okVtS5WHcbqcVvoeiMjejIo2D7WnaTxMnmRaCnDXoBzZ5YNb4hyEZDrur"
"wtMreHQHYw0DoDNxzwQI8kgJkKXFOMxVFRucQQxuZjYuD6yB3nBfgUAcKQNoAzm8KoKkj75L"
"ZPElNeTeOSVS1WQo1kuXsL+Qw8y9isRWcTjTGhzN7g2+Zl+5upkKgU8QTQQBX2mM/gQ30bwv"
"HyQWFqdEEXIn+4tlbKerEEYIBI2o4giBqtxpqnqgFSuagnJnNE0FuEp5Y+sucTygdxLX0+I2"
"JNQQf6p8OJjyTIegtRhrwle4XcE/peDgagbHZA+GKgV5a7Ij8qYwilKBC+ED4rt7ORkGTuNx"
"JTqQmY3vGa0eDxw7yUWEMZj8BRm3eocIpnQG7GfFzUyxT6pvuwgm7upAeKgwS9AOiKoynDvT"
"T/KHp1WilQsbaSO1F9upWf/zzAgV7uSoIqapsYOoU5ZCWytITwzePLxVHA6s0UXiVlHQXxQS"
"YyUWgy42mLPbvMDtP0Fhvu2WHVqUGDzJGhh/EeMhUHs6iBsTvLbpaHWsQDaGGBIh3X2U2TQN"
"jWV/JMaK+pMkCKmwZeFJkeJ1FyQlNdtI0CUYAfALNul7CTtg+TCYGtqB8wVOkObpUphH4+rH"
"MHhFzUo/guOfGxhwXuoDENSNMKlrvFaCmqD4huNaHC88iQV4ILELYmXjd/+0Xg4WJ6/Yjh/7"
"5fv5VuQ0PPncgUEqKBX91+cekCQ7WkqzPxUnA9VpVLJLllOLEy5BBFaj4kEE0QnTJ2a7LFIw"
"V7EaglORXKlkvAXWOQ3gOccCVPOEpF0Wwp2TfIeNUCkwSZxru2FUfyjkD0RskFaB9+SUriX2"
"tdYU0msmVlpRaIszPkQcSoO6B8Bz9BBs7PNO61Nl/z2awB+ErM7Ugd1qPE2NxBChTzSbJvzE"
"oyMYIR00gqP97ifEik2X+v9wut99QEFf3T9STUx8q0wRAwSJM25I3+i/yIEw3yuoR3lwMmet"
"UVYETDG+MRqXHEuKpvwqZfZFtVacZRmaSaeQ+YI2IRA6bUrS82zQZBEBCICrYaDFd9f1M7PM"
"vTgr5qqhpcC0BCkEYgxppoAAGUbADCyYic2ReJ1S8R9s0Rqwk3ie1RBB4GiHEuVQMljrXoHh"
"WfERJ8MVgh+nQ3nkGc56cvivAbdVB0kSXJDguKwzA64UFMkzCYT3Yvgh/jjwPmbeCntRpxgC"
"qwI4oFghlUmnw7SSkFBJdENYT408HPEL4gwaHTkJAAiSrk5aUgKmID2AbmjEQUzAPrwPmAmn"
"7HFymYONZjtJxumuHBPCgoqBMKQzeKn4cXF+ugfrjkADRx7SjUKfQs2AQQHuhFl6xUHmMHUf"
"GjhYBHsHbPGI5SHKQwS4ckFZIlXHFWfQoXpqx2VwStoAhBNLBfqwqxSUoYJbIlmgpyS3jJF0"
"14h4LAQJD5pPfsjo7H6BEYJyub0VVDHwYGCWRAKNnE3EolUNSocFK6B2WnFcl6hUwVGazjpd"
"/Eym3SKwVhCnS2nidiBAhD7h/BBSrlqSUuNNjAzoAlOxaAziQ5RlJSPCvapkPk7gAF9HyunW"
"SK0hPsK9JI4qC+SnHBiOWfVgfGfFjewF1F7IhCPtcsxNgXswfFfcj0BFuRLQoq6w0gLu5Fhm"
"wEa3weQh8VlEIklhoctUIyQUiSGbQtEhkHzxs1TSVupGJwV2C8phMIgk2CNVuIFdhNoYcyfw"
"lNQ3CzyIlMyqqRnhmSIo3iOwtrUKQDPMtXpwdltMbSKWCODdTExdqKjMSEESySQoMNF5jyIs"
"gxI6w2FgqkCy6rOmlf0SkKGKQhDZSUQV+AmR1cBTC1JyFzS6bK6eJhviZA+HAWWuHpMhAO+S"
"FZMipUMxkFRh3YamWAmp2vIUuB05KSRUJLRfBamEmUCUJyXzRalSS/FgJbR5SQoPQuQdwunQ"
"2NRfwfGc+KBeVoKYFTqLgbYPviBwuaZQP+LppMKDsBCf4L82ChwpziwcBpNIBAaknsYkEg3G"
"hCPhQWecd5klCj9AVW+55LlucbIHJRJPbBCswzrIwpS1wDnBPhEENTH3dO61ETdUVEraArRW"
"GdSwcXXsdshGmDEcMh7Gdbg1TDkgGMJzK0Hs93iuGml8OnUzpgvMkAvCkBiVVMwvIaazAAk+"
"VWM9tJJScO1ACfEjimuV6jMWTnqj0s5x0oI8uiHLEFtAUxKeAsgKfVixNkaibC+May6HknQY"
"AUAahAnmojDISkMUSAAiKo05lwxiDJmC8jTqTpYK1/hftwHFmwTFsIy0I1Qg/Bt4G/UZTdwG"
"X0FOThLFSrKgNuhs5VS8jb+gyA7MFalSGPq9Huae7WEYJ7GnJUoFMdxtiK4kcYYB9JDQtppN"
"oNjg9iV+1iYrqB7uMatXXbMwaqtL0EOqC4UVT5cRksAEcQ4cAdiuiApTiGBt012IoVi86+5c"
"vAd2H7tC/BJ1kxTMs6mzHyWtAPttIElwZCO0KMhahE7NUGrKcnLITEaUsRfkH2qqYoOqtCdQ"
"Cj5UEC695IbDIJGIwgbBGbm0BdVkkA3RIoOgLwkuWChjNorBCRsXRMoEZGbFsbc2Az/ZJl6L"
"QQ1GnK4jMvoJxgJrAOUm7aRN2+AKcjWhpmrBEjDvtmZGUIHkhbldqktoepRg+YwiLYgyIUJ0"
"mAXR3cKnJwBKO9YqlhM5ZDFO2qkdjnvqOrAeig8H2Y7Xrr1mL54rwQhQh3V08o5WrIdinRj0"
"swLGHaWwbbWbbeCPIgwJaT4nFjDXWG01q5umECidqWDjZpdUKmJr7g3yxAAjEzQR2EyDA4ok"
"k0QGGQB0a0XHQjSKYIPDlIHI9m84NPIh97mUDjLyeyhWSga5jXcc8Y+EQBxqCqAyeTnKI/R5"
"AIKgBhRlS5zNGb3JMdkTZC49hZXStFDpp4sZdGPmqYbOvSYnvlgCPkSmE87II+Ol4iRSS638"
"bEZH5lptZiIAAp5SNaKJOsXzTJTqDT8R2rkLHZMI7jUsFSoShYqD2zVc1Roqiq8tIg0Wzsp5"
"tBVF1dMBJU3aA8Fa0YjJDBNbDO4ifu5vYDZSO5wKpazUwrTIi/w7n5+oPMyRUmQ6A6VXFF4L"
"DrQPViFg8QABQCqqrzlNQ63Za/iCKWF4F6wP9RdO1ICn+lRLghg+g7gXpc4KFEW8QS9V3Yra"
"w8B98hJsntAMcKtREQUBIOiDw6JEhVKCgNw5qnk1W4HoqEWrn/RUGaxC1UIuLIy+iGEmXkXe"
"6IJQrmOgrboKaszxg8qwdKicOVIRHMPFzqhnNBgqzMa7KinUwY4M7mKZZmu9VSIJTO1CWWTI"
"baYjWO4AskFIUszvulpBDBSuDsgWoDxorCMP6yvxMnNh4bEQIwyBScw0JyiBM9XPaqGNZxNU"
"Jl9Ks0LB0eJhrVIKwqm5nbhXhvZAid3LKQ44MXQOART0QDBHopjSSCDniEsi53ITL6PjuJs0"
"yyjChhxsSrBfjyGD4LqgH+I60sgyvS3vKViYVFtjIjmVFsx4i1CkaTCcKxeQ6iBvPiY4oBkq"
"1pIIDUuVAaNabNGw1arCTEoh/R4bLuL1K7eICkthJTQkpkAiwz/MT9GH3QNoBuT+iM4VqoM1"
"wzEBAC551MarknvgrxLgGykAqnSYnmBU0EIbYR0zhTDPqCNlFCoa0TlQApUZ9Cmde+ckBo7q"
"pE2hvYqTOCtQRkFGEHbsD1sGgFwHgVAiG0IvkMcoCkGRFwiZhiOBMleRSg9O70wB4mDWLBCn"
"OIbHZ74KVVm3zPuLgTJ7axPG6ngPv1WELGWIZMGOsKuIJ0bu03F/FArYMWTunu5KQ0HRYhaK"
"8/6XCgKF0GAk2JqnLOANxtkdNhyhxBm4p/jgTKTCAG91joUCKNAOER2xlXYY1iss+TpxKhVv"
"VxRLWpNNiadPSirDuF9AcTTWcKlo6qZrgIzDqwaD4vkblvKZmBhQHUGqVAINsEkjylh0IHiI"
"gAYoj1r1u0PBvK56F+2BKDOXILWCoyAb6yeJj8gdOGPMjqlwAjERmrRPFdKRoGcu9LDxoKKC"
"zBzGAe1KAaBIBN+EhyJMu1iscz8X1EZxvESUB718ap4XkdEuyTNFQnsi+otiqVDUmI1zkcai"
"tOouSjKTi5mwP6gBZsJqKZZo+TjiFstIWcWB9iEb97UCR4FtSJrGB6w4jtXQrI+WLo2g7aFH"
"QJML5wdhbmRY4hekKoC5kYM/x5/zTg8ZkbUpDYyczztDEKvV0gpUhbpJDeKKcQQGoCJSP//5"
"z8u/ff6VP7XW5/7v5yJTVfGUGc4s5tkkfIY/VQuKXkJrYIBI+O/YIjUm+YVMm7DkDmREXLFb"
"i9MSlMPh1x6AfM2JOE9x0ECuw4SpAiwUSFRhYDg6HBiUDZsknCcqJ26IEEcERrdEONwkl2hR"
"mEqIWCSrjoYEBbvPIzJKTpQZO4CIKFFWj6OhKVm7IEpwaSBUDBi3eK3X0S8UXjh2QmzFaPx9"
"1YKoy1lSBiTQS8dCoElV1KSiPOOwir56M2Tl1fkPSGdUINxQSVHs0V48wvuYnFUJEELQB4gJ"
"SonxIaV2U4ucCPOxIjQ/8D9QS/Qk4nomDRKlsGx8JjNhSKcsc7f+7fOv9VFbpcJSjh8ylaJh"
"FeY8VDFIIBvx/7Wb6KMaroR4766q5FOASIW3tVxLaRaO0RXUPXyavl25pE7pwILZAyG/tmIO"
"axvsnoMwcuTUfTdLpa1r8DupzHsksmetCX28Bi582yaP1JpDXh+kyjmIT4Up6nuCWqVi/qdZ"
"W0U0oTpiUUOT+FUU0wUYF0Dr4MzBCur5QnaZHxAFLnVFlBQAvFovSieHRgFwNTFtMvMSAk3w"
"pZFCiQgK49lo0NPqeklC9ViMpTCpqqOIyVxZGPGuVYjwATDsCIsWwPwVYY9pNWw9RUcXKp0C"
"bzz4FXhsDfCObP7fPv/aHysxkeEMqmQTjewzW0bFygSEV2EBXhUpL9rHfYAvhaUTVFlB6rRI"
"QhLrRItwjhwoKwLPVnbQ72ORHCAnzjAj3DI7A7AGosDvqohKVKma1QshGQwkIruPtokJ+foK"
"opOH4tdYpogZjpRENRpBIkgOFTKPYhPEl8eMel5CnXyz3VMpxwJ4TAVi1HP9cRFS7RGqocCc"
"cyNqBHonUUIZlYESqQNKfO3btpFw0toUeFYC8fjI6I1Xditml7JNoGxP6UQPyeqs5IDVYumI"
"cpFktg6dtGxp+CDKEn/93HPPPP1zd6+5O10gwrn4KGeKiFy8ePG//bf/Vkphh2ut09PTixYt"
"mpiYmJiYyJvil1LuvPPOpUuXLl++fPny5XfcccecOXNI5FV1dHR0eHj4jTfeeOutt65fv85m"
"VbWUMjAwsGHDhscff3zdunX2/blz5773ve/t3r17cnLSWiilLFmy5IMf/OADDzxQSrly5cq3"
"vvWt3bt32yNKKStWrPjYxz62YsUKERkcHDx69Oi3vvWtiYmJycnJycnJqampWmsp5dKlS1NT"
"UyKyfPnyz33uc3feeefw8PBf/dVfXblypZSybt263/7t377tttu+8Y1v/OM//qOIzJkz5zOf"
"+cz27dvfeOONP//zP7927dqcOXM+/elPb9++/fvf//7f/M3fTE9P33777b/1W7+1ePHiL37x"
"iwcPHrzllls+/OEPP/HEE9/97ne/8Y1vqOqOHTs++tGP2niHhobmzZsnwAFqfOATn/iEoYsC"
"ZLTgL4n/Fb9E+WX7cUrgiIALpPfKpqHmdzOozvXshOSWpPtzbrpp0N+fbO+U+CNcrWdInUf5"
"0xo5gHJpKZ3nuqc0D0xS1k4rBdd15dwOLV8bA8S/oYMy42NyJ6T9tb0sxF3SjQX/KzGUjkCl"
"pO5K81O3T7x71u/6X9RPV/1FJ6lPpfS2RsOV3uEALJRyl07T7E1/46E0aYkq+tWvfvX5559/"
"9dVXa63L71o+Z+4cXAJ5iaroQ5sfEpHx8fFvfetbBiVTU1OGLLXWBQsW2L+JyLXW0dHRCxcu"
"nDx58uDBg7t373755ZcPHTr0zjvv1FoXLlxYSrnllltWrly5efPmJ598cvXq1dPT0z/5yU8m"
"Jiamp6et/XfeeWfPnj2vv/769PT0smXLhoaG7rnnnq1bt05NTZ04ceL69euTk5NXr14dHh4+"
"dOjQbbfdtmzZso0bN953332nT59+5513pqenL126tGfPnmvXrq1Zs0ZVX3zxxWvXrk1PT09O"
"Tk5MTFy/fn1sbOwjH/nIhz/84QMHDly8ePHSpUs/+MEPlixZ8vDDDz/22GOHDh0aGRk5d+7c"
"K6+8snbt2u3bt69evfrVV18dHR3du3fv1NTUk08+uXnz5tdee+3ixYt79+6dnp7+4Ac/uHbt"
"2l27dtmj169f//TTT589e/b48eOHDx+emJj40Ic+tHDhwn379h0/fvzs2bMbN25U1Tlz5txy"
"yy0ism/fvqzygU9+8pOtBfQ4roQzpK/7+VOPJSSbyF5c+lzQ/ti9OUC659ES/1v6uYTfLB1k"
"nKG3HH4fB86Xdzylhc0ORjp3JFDNgr1Af3cgCcDs0xu20/tD22gj1I7XdwCyj1byV9kQeu6b"
"YVRtjOwCdQemO3fyy1YdbUuiFn+wpKw/QuGSzredf4qykegEMLXv8Gb9U1vfUVHVZ555ZufO"
"naOjo9/85je/9rWvnTp9emjRwmXLlrW3yEMPOSC+8MILhiYTExNTU1OTk5OLFi0aHBxUVSNu"
"Ru6uX79+/fr1WqtRyFrrxMTEhQsXfvSjH+3du/f73//+kSNHLl++PDg4aLcvXbr0oYce+sAH"
"PnDnnXdOTEycPXvW7jJgPXz48Msvvzw6Ovq+971vaGjo/e9//7Zt20opJ0+enJycnJ6evnz5"
"8uuvv3769Olly5YtW7Zs69aty5Yt+9GPfnTt2rVa66lTp958882xsbGRkRHrnohMTU2Nj48v"
"Xrz405/+9JIlS3bs2HHhwoW33357cnJyz549ly5devjhh5966qmxsbEjR46Mj4+/8sorCxYs"
"2L59+9atW19//fUrV64cOXLk1KlT27dv37lz56FDh86ePXv48OF33nnniSee2Lx58759+wwl"
"ly1b9uyzz05MTBw9evT48eMXLlx45plnVq5c+frrr586derUqVMPPPDAggULBgcHHRCT1gwQ"
"+yqvq99+ftwFAu0XZpvQ17m9tXEaesczGuPv30zXZGUm2JmxHcDQzNfznzO6xw1vzn9Ln/G2"
"cNFPhnGVzCzozvNmQczO3aXbhsiM1/eAVJ940yusPty0M4x+emtQscc6ei7ukUP/oDJzcOrc"
"NgNip3+2qumniDTAW2+99aHNmz/60Y+uWbPm0KFDX/7yl7/73e9ev3Z9+fLlc+fNs4sefPBB"
"y4JXr169ffv2HTt2bNu27dFHH3344Yc3bdq0bt261atX33XXXXfcccfixYvnzp174cKF0dFR"
"45KENn6mpqbOnTt39OjR3bt3v/rqqxcuXBgYGBgaGhoYGLjrrru2bNmyffv2BQsWnD179urV"
"q3bL5OTk8ePHX3nllYsXL9pTNmzYsG3btsnJyZMnT1rye+7cuVdffXVsbGzVqlUrV658/PHH"
"jUsODAwMDAyo6uDgYCmllCIiExMTV65c+cQnPrFixQr7acuWLUuXLn3jjTcmJibefvvtAwcO"
"PPDAA48//vidd965b9++69ev79+//8KFC9u2bXviiSeOHTt25syZ06dPDw8Pb9269amnnjp9"
"+vTJkydPnTr19ttvb9u2bevWrcPDw+fPn9+3b9/Q0NDP/dzPzZ079+DBg2fOnBkZGXn66afX"
"rl27b9++n/zkJ8ePH3/00Ufnzp0rIm+88UYOhgOf+tSnZrOI2T8ymx0nEyl9L+pa7OzPmckW"
"+8X9nttnQYSeXt6ch8z066xftP5fZnrWrD3IPtftSheR+jGlGZ8w01NnGrL0KHA2PdxgTDdx"
"i/QG235xc/aodsNQ9lP0U0OxM5t3Jy8SVZ2cnDx27Nhbb701MjJy9913Dw8Pf+UrX7lj6dK1"
"a9cqANEqcYsWLVqwYMHChQsXLFgwNDQ0NDS0ZMmSO+64Y+XKlXffffeGDRs2bdr0xBNPfPCD"
"H9yyZcv73//+O+6445Zbbrl27drVq1cJjpZWG+s8fvz4nj17du/efeHChTlz5ixevHjevHlr"
"167duXPn2rVrx8bGLP81JD19+vSuXbvOnTu3dOnSW2+99d57733kkUeuXr166tQpu+DEiRN7"
"9uyZP3/+ypUrN2zYsHHjxrNnz95+++1z586dM2eOEbFr165dvnz5rrvu+vjHP66qIvKtb32r"
"lPLggw9u2bLl4MGDly5dunDhwssvv7xixYqtW7c+9NBD+/btu3r16vHjxw8ePPjII4889dRT"
"ly9fPnr06MWLF1977bUNGzY89dRTk5OThw4dOn/+/JtvvvnII4888cQTR44cGRkZGR4eFpFn"
"n312yZIlBw4ceOedd3784x8/9dRT99577969e0dGRlavXr169eoGEFVV9b0BYj+rePe//cw+"
"fR7S3/OlxE8z9qwfiHaY07v99I0RN7ynP+37/9tHZoCp99LST9vUDHg/q8xneJTM9uPsDc1M"
"ZA8dPvz3f//3f/Inf7J///5HH33xWpYMAAAgAElEQVT0P/2n//SLv/iLH/nIRxYuXPilL33p"
"l37pl1R106ZNBmFjY2MkepzGUfW6of3DvhkcHBwaGrrzzjvf//73b968+emnn37iiSfWrl07"
"NDQ0OTlp8xgZGa9du2ZYtmfPnvHx8VtvvXXevHlWztu6dauInDlzxuZtpqenz5w5s3v37osX"
"Ly5btmzJkiWbNm26//77R0ZGzp8/Pz09PTExcfDgwaNHj65YsWLFihWbN29euHDhxYsXWeK8"
"cuXKpUuX/sN/+A9Lly5V1XfeeeeP//iPv/vd7y5atOihhx7auXOnYdbExMTu3btVdevWrTt2"
"7LCM+Pz586+99tp99933xBNPzJs3b//+/aOjo6+99trKlSu3b9++aNGiAwcOXLly5cCBA/ff"
"f/9TTz116tSpkydPHj58eGxs7Omnn16+fPn+/fvPnz9//PjxHTt2PPDAA3v37n3wwQfvvvtu"
"Edm/f3+jpy9/+cvyb59/+/zb52f5ybPQ3c9//s//+eTJk1u2bPnQhz60detWq6/Z59KlS7/z"
"O79jLvmpT31KRC5fvvxnf/Zn4+Pj169fN8omIqWUOXPmzJkzZ+7cufPmzZs/f/6iRYsWLVq0"
"ePHi22677fbbb7fpAnPw6elpg6Tx8fEjR44cPHjw4MGD58+ft0w2Q8HAwMC6deseffTRe+65"
"x7Y+GR0dffnll1966SWb/OWjja8tWrRIRIaHh//pn/7p3LlzqmrzNk8++eQzzzxzyy232EzI"
"yMjI6Ojo6dOna62f//zn7777blX9+te//pWvfMXKi9u2bfvsZz87b968F1988e/+7u+mp6dL"
"KY8++uhv/MZvDAwMfPGLX/zOd74zMDCwcOHC3/zN37z//vtfe+21L3zhC1NTU3PmzPmVX/mV"
"bdu27d2794tf/KJNvn/uc59btWrVF7/4xe9973sDAwMf+9jH/t2/+3eHDx+2C1asWPHZz372"
"ypUrU1NTDz/8sIg8//zzWTua/47Z/tk+tt5C6s1e33t35y8snZi1tZu66P+rz3voxbu+lUtB"
"Ov/7M3jwexXmTz2Wf9VP/4f0sbsZrp2lkyr+SspP1y0RqSLPP//8s88+u3TpHfGcnkeqyCc+"
"+UkRuXDhwn/5L/+lYisn/qMULgcLhmgXDAwM3HbbbXfdddeyZcvuvPPOu+66a2hoiJfZf0dG"
"Rvbu3btv377Tp09z8Q3xcfHixdu2bdu8efP8+fMtKX7llVf+5V/+5eLFi3bNwMDA3Llzn3zy"
"yccff3zOnDmTk5Pf+c53vv3tb09NTdmv73vf+37pl37Jlu8cPHjw29/+9jvvvGNz4j//8z//"
"zDPPlFJeeOGFv//7vzf4W7ly5e/+7u/eddddBw8e/NM//dOrV69a8fR3f/d3b7/99n/+539+"
"/vnnVfWWW2751V/91Z07dx4+fPgv/uIvrly5MjAw8JGPfOTZZ589fPjwF77whevXr8+bN++z"
"n/3shg0bvvrVry5cuPBDH/qQQf+RI0eee+65OXPm3HnnnZ/61KdWrFhhy27+7u/+Lith4NOf"
"/nSfUnr+FO1OpJZQgbQX5spYLpGVWBDeeyW2Ym6a6fxRBG/z9+neLAlNUZUbXDLrrf3/JTP9"
"3jO2fs/u/D1T73pWP9m3vQXRzhNv9GmqviGfvsPpK768Eme2AfCX7g038ZlNft2rOtf2Xm7T"
"zzbtj32ste1QesxMj2tubG/r00mTXPIbW9pTim7atGnBggXNHT1OIaoPPPCAiIyNjb3wwgs2"
"T8KPgZR9aVmwzYHYT9PT01evXh0ZGTl27Ngbb7zx8ssv2+zq6OjonDlz5s+fLyLz589ft27d"
"9u3bt2zZYrntlStXRMSaunbtmk2/XL58ecmSJQsWLFixYsX27duHhoZOnjxp88hTU1PW/tDQ"
"0LJly9avX7958+aRkZGRkZHp6emxsbHXXnttdHR0/fr1y5cvX7t27dGjR8+dOzc5OXnw4MFj"
"x47de++9lnS/8cYbo6Ojly9f/uEPf7hs2bIHHnhg27Ztb7311oULFy5fvrxr165169Y9/PDD"
"q1atev31169fvz48PDwxMbFly5bNmzcPDw9fvXr18OHDo6OjO3bsuO+++/bt2zc2NrZ///47"
"7rjj6aefXr9+vRHwn/zkJy+88IJJ4Nq1a0eOHNm0adPChQtF5MCBA1m1Boisr0ivUgFD9I0Z"
"y9q9XpIULB7UeA1sBU2G18CWNK2Tkc5KxK6JhjUVtR1seFHT315j7vm3agvQvmqnB5CaJgM2"
"Oq4vpZ+QtCtPPLd11s6Qu6LyQacqYw92hUeKpgV52mcQ0sSs9LOkeJZ+p6A7kJmQtdNgF2l7"
"PqFFiK0P/qePx0rrfTue/ABR9VeVtOex/UMcTCn/3VppIwqBPOI763yO3zxbp/t0/0/rWHr/"
"/fcbIP7jP/6jLbuxNTd5CU7+0qAwgyahc2xs7MyZM4cOHdq1a9frr79u6e3Q0FApZcGCBWvX"
"rt2+ffu99947PT09MjJiTdmqnZMnT7766qvnz5+//fbbFy1atGrVqscee2zu3LknTpyw2uL4"
"+PiBAwdOnDixYsWKpUuXPvLII3fcccexY8cswf/xj3/8xhtvrF69evny5Y899tj4+PjRo0en"
"p6fPnj27a9euVatWbdiwYceOHT/60Y/subY286GHHtq+ffuZM2dOnjw5Pj6+e/duWyG0cePG"
"vXv3jo+PHzt27Pz581u3bt26deuhQ4cuXrx44sSJc+fOPf744w899JBVGIeHh2+99dbly5dP"
"Tk7+4Ac/+OpXv3rx4sW5c+eabG1a3FaPDw+/aVshmt0N/Nqv/RoU2AOLjjUOZKay0qBU58Ow"
"aI7dsT9rpahISeYFQOusq5UEdY3ZpkDa9Vxrpkgy5d41aQXWSZTX9t89VpvA0UGj5SSJu/ay"
"6ehzRhUblLR+WATrh1pukQbT6Ke0EgaOZKFHTwo1oNJFaagLPeyCgfagEkJGA6uOCA1GNxSz"
"iy5JgtyGTRUZQ9GAlE7A8v5FwDZY7J33xvXFB16yqtuL8G8Q85IMUFphaDYWDkGakJ1CnYm1"
"uNu1T8y6hZvZD/ffv1FExsfHf/CDH9x1110rV65cs2bN+vXr169ff88999xzzz3r1q1bs2bN"
"qlWrli1btnjx4vnz55dSrl27lhdaGywyE5+enh4fHz9x4sS+fft27dpldb1bb721lDI0NHTf"
"ffft3LlzyZIltl6aSxrPnDnz6quvnj171maZ16xZ8+ijjxpc2tsmNukxNTW1atWq1atXP/bY"
"YxcvXjx16tT09PTo6OiuXbsmJyfXrl27adOmNWvWDA8PX7t2bXx8fNeuXaWUjRs37ty5c2pq"
"6tChQ9PT02+99dbJkycffPDBbdu22Z9TU1N79+4dGBh46KGHHn744eHh4cuXL586derEiRNb"
"tmzZtm3b22+/ffbs2TNnzpw4ceKxxx7bsmXLm2++eeXKlbfeektVX3rppd27d09MTMybN8/Y"
"okl+06ZNy5cvF5Hh4WGoW4qq/sM//INX8nxPkeaVMvFtA3OhQ8W3yulTAlFWSvBKVLfwpyLi"
"m//xJ8XLTXittKdQk2uWyrfWaupMv+IOq53dLit6i4k7jDJf4e9i2aycuP+nF6XS9c0AQ45R"
"be0diPQpG9XminRr91/RuG8cwcGqSO2Rg93IHQ9TU814o1cQWjSmfdUSXRe+KhlCpVZmrxSb"
"zeD1vYp355phKPY6m6XqN8Mv0q8k2Fu0S91uHpreTU/jUWxJmkbnD1LsMOyvyiehwaj8tonr"
"EyIy55ZbejqvIvWXf/mXRWRqauonP/mJ+bBBW3rlWZQxTsQmT4y1GUCMjIycPn361KlTly9f"
"HhgY4JLAgGORUsrcuXMtgb377rtZmvzRj3700ksvHThwwNq3Gw2VPvCBDyxevFhERkZGvv71"
"rx86dIgNLlu27GMf+9iaNWtKKXv37v3a1742Ojpqt69du/Yzn/nM0qVLL168+Nxzzx05csS+"
"37x586//+q/Pnz//hz/84XPPPTcxMTEwMLBq1arf+Z3fWbJkyfe///0vfelL9rbfjh07Pv7x"
"j4+Njf3lX/7l22+/PTAwsGbNmt/4jd+YO3fu3/zN3+zbt6+UsmHDhs9//vOTk5N/9Vd/de7c"
"uUWLFl2/fn1iYmLOnDkDAwM2Olsg+ZnPfMYWNn31q1/NvjDw65/5jOcnyH8igIrPLPmr/B7H"
"PfSZnCwse0THj4mg9BReuC9ACrxSJPGyJrFhVhisQiyy4qnCHbwQi7sRmlQGYzMywoSmaJPL"
"q3oaZrbBRF/QMy9JgVyqtpyUibBLy8SoLjo/w0GFsvOBSRqq0VQBFWqEmZ4ueK4pKt0UrLPE"
"nudo3sdst/+/zL15dKPHcS9a/QHgCgLgCoIrSHABd85wNlm2ZcmSLI1txbGSyJFkyZIsyfuS"
"5Pqc+/7JOfedm/eHT05yjp8TS4n9nv1iKydWHG+SYnk0lqx1RprRcMjhNtwXcAFJkCBIAATB"
"7/1RXdX9AaAsK7k5F7Y4JL7+eqmu+nVVdXU1kcYwVBvkI2AtXrDkySqtU8T9kMk5hGHgObcM"
"qsoBUaNK7QetBaCSuioHICy6OajxUV+JLpqRrewQyaW6Q0UNg6gtMvRyoJ4ZlEfYIGannmga"
"siF7AILnmnulDUQIAHH+/Pm//uu//t73vvfjH//45Vdeyc/Paw60GEKJmhBGMNiO8IexhLj5"
"AOTmI5YARkneBXY4HG63u6ampr29fWBg4Kabbjp16lR9fX1JScn+/v729ja7HfFFxNzh4eGh"
"oSE8SVJQUODxeDBC8ODgYHV1lU3y1dXVS5cuJZPJ6upqt9vd399fU1MzPz8fj8eFEPF4fGho"
"aG9vD+O0jx8/Hg6H19bWAGB7e/vSpUtlZWWNjY2nTp06ODiYnp42TXN1dXV4eDgQCLS2tnZ1"
"daETMBaLXblypaGhobOz0+/3Dw0NpVKppaWllZWV7u7uEydOLC8vr62tbW9vT0xMdHV1nTp1"
"KhKJLC4ubm9vT09PHzt27MSJE+FweGNj4+DgoKSkBLfdcZJdLtfHPvaxYDCIAZLj4+M082AY"
"hu3ee/8UQOrrhhRH4DkEKe5sFmZYBZz1hNiDeASY2TUo0w1FHURYREBjfYvAqNoUy8ilUbcw"
"dVlAoNB7YBhK5jTUIE4WhkXopOlN7+ARYWC3mSaomjWq5EOJtlCSBUxUyc9CWEgmwYXEU46J"
"h8ZYQS/RNAptFjQPnAEk8zoAgMJSoqDBI5JdkrMjiDBCko9dcQyO2E8GQ/SICAAV6qmZzIZg"
"7FFk0oaveEsvgnPFbchViGeK0FQQkTQzVGhTLjsmycNrqN4LHRWZZJStnGSEV3kqjFXTekPN"
"MeNpbA5CCHj22Wf/4R/+4cO33PJHf/zHN930oby8vB/96Kmi4sL29nbF0ALa2yUgRqPRVCq1"
"s7OztraGEXYLCwtzc3Ozs7Ozs7NLS0uhUGh1dXVjYyMajeKOBypBCAGmaebl5VVWVra0tOCh"
"t+rqarvdvr29HY/HsQyiKgZsv/3222traxgBXlhY2N7e3tfXl06n8dAeAuji4uLVq1fz8/Or"
"q6srKytPnDhxeHi4sLCAwL28vDw6Our1er1eb19fn9PpvH79Ojo6Mda6paWlvb3d5/ONjo4e"
"HBzEYrHLly97vd7m5mZ0C25tbSWTycHBQTwrHQwGr127huHi8/PzXV1dAwMDkUhkaWlpb29v"
"ZGSks7NzYGAgFostLi5Go1HExM7OzqWlpWQyabfbUT00TbO1tfUTn/hEeXk5n2WemJhAZpCi"
"+fOf/xxNAGAjVzOYQNfkBdsE+IDNaWRyk1MAk81stRGwHOfwlGmtVGY9NnA0O0dZErnMImG1"
"yYXIZS7SaMyMYCHBZjSWYpPZ+o1eUoC0hsDqXhBsL1p7T8/kKDUTlfugl0XCZdjNOuWpL4o6"
"VitWUFFKWq03x1UC+SZApWcGU5iaVajGIvQOqXFoZLVYxfIZOkA4dbiZ5XjhjlFm7hwUIw6i"
"ydDbpTxvXJ3JuZl1htC8Kmr4iqhCugREpg8H9HZplPiFST+YLtTPLA+JoPyvJtUgwAR4+KFH"
"PvWpe26//Xaet3Pnzv3oRz/63vf+HxwbJlr62Mc/BgA7Ozvf/OY3Y7EYYhZKih6PDQB6LA6C"
"u8PhcLlcHo+nvLy8vLy8srKytLSU7WUg+3ppaenatWsjIyPb29t4OFpf2aurqwcGBliTikaj"
"L7300qVLlziwEXPb3HHHHRjhvLKy8rOf/WxxcVEIgRb6DTfccMstt9hstnA4/MMf/nB1dRU7"
"2djYeO+995aWlq6trX3ve99bXV1FwDp79uwHP/jBZDL5ox/9aHBw0DAMu91+11134ann7373"
"uysrKzabraam5oEHHnC73T/96U9fffVVTMDz0EMPVVVV/eIXv3jppZewY4888khBQcGzzz67"
"sLAAADab7UMf+hBuVQkt280vfvELTe6F7d5776VlVdcINb2PlBI2lIAWUFCKDCi1BUAAOcnV"
"Yq3MVGk6qup1VVSabMCmpVIJpWVrGHq7ynTGbgLrm7on31BKhFIdpG7ENgoNgYx6KkGLPEiv"
"OMhucoXcMVIl5If2qsleVhq1rrdq6pF6mSxhzVlgCCEMTkNIzwVIv4Yw2Esg5YRKC007Eaxr"
"yjtwyMiVozF0w15XsaSCJJQSzdOt9CuhexekJmfZbJAbahbWAta4DM7jAzKZq2QJVvclj0qG"
"AUFLgGQEqZ4ZytHCdBTWfRBWVlndxqmmcagtadSjmUUlXrBdLtVzg7iBGFawIiqn05AjBfj+"
"9//fRx97zFlcLAzpuShxuX784x/j9qZBo2prawOAeDz+zDPP8AllPYkW75zw4TzeYj44OIjH"
"41tbWysrK7OzsyMjI5iIYWdnBzPl4ODdbncgEDhz5kxTUxNu/u7v7wNZ5dFo9Pr162NjY3l5"
"eRUVFQUFBW1tbT09PdFodG1tDQE6FosNDg7u7OzU1tZ6PJ6BgYG8vLy5uTns0sLCwuTkZEND"
"Q2Vl5cDAQDQaXVpaMk1za2trcHCwrq6urq7u+PHjoVAoHA6bpnn9+vVoNIo6aTKZnJ2dNU1z"
"YmIinU53dnb29/dPT09vbW3FYrHx8fH29vZjx46l0+mZmZlkMjk8PNzS0nLixIlUKjU9PY2b"
"Kv39/R0dHevr69FodG9vz+fz1dXVYUzi7u5uVVUVAFy/ft1Q/jWw3X/ffRzjQpIofSYKvhAG"
"BZmQ8gvBrjGDUiNpdhLg3p5kVDIa6CnluwIAQ3E+gZTCRMnthDXKRgT2/Mm0TtJiI+kjBwDZ"
"jTLuVL5C1g9LGABoWGXIARuM/0LZlcqnKqEZhHpX2UdCKwJCEYK9mAjbhCdKViXAkh1PKIGD"
"UDv0BOyS8Kwv0JYqrUyak0HKJk2QnHCFayA0lBS0MpFTANc9EJRYlRZC+bdB9RPDsHGp0FvQ"
"GIRGbtr5RVoaTHPmBol5lB1W8OBBpTciWFIQLWhpMwj0kDuA1xBQXhRD0cxQUEn1Aa/0cklR"
"29baMsY8TnYzrlNENfa+vPXWW3U1NfV1dYzGIyMjS0tLd9x5h+CmQbS1tSIgPv/88xxyiEE2"
"nE0rbf1gGQQjQbtEvFe0t7e3vr6+sLAwPT29sbGRSqUcDgfutJSWlra1tZ06daqsrAzPFLNr"
"MpFITE5Ojo6OOhyOioqK4uLinp6eQCCwsrKytbWFKBwKhYaGhtxud1VVVWNjY0dHx9LSUjQa"
"RcS8evVqUVFRbW1tZ2dneXn5xMTEwcHB/v7+lStX8vPz/X5/f3///v7+zMyMECIUCi0uLra3"
"t2OcIO7qzM3NRaPR7u5uRk8MNmxra+vr6zMMY3JyMplMjoyMNDc3nzx5Mp1OX79+HTGxr68v"
"GAxOTU2Fw+HJycm8vLz6+vqxsTHDMFCxxc1oYjhhu++++0ixAtBd8EoHAvKgKYkUzKmK+VhD"
"4WVS14OAXHAKCaVMSgRT7jpa3KXeBRoMKCggGVRiSXs6JFagpEvwlUVkVWg9l5ij7xSwl0hT"
"QCXeCmEhjhZxroEauZ64HUYPVm3YDya1IwWYys2qCzcLK2jyTMsRg46CdFbCFBoaakXh1Yu7"
"QHSXiEg+Y0ZOSy3a5gnBqUEk4F0ooGWM8IzWNGBvNaloig4SNPUlRKq+BnA8DjMeKPWSp17o"
"20Ok0AtQfZXrDYM8+1Z5gaUgDLloymmTvdBWeKXAS+jVd8DoP7Jftrej+8lkMpmsr6//zhNP"
"FDudTqdzLx6/8MaF73//+4989pFaXy0odRpaW1sBIJFIPPPMM5j6MAP7OJ4GsY8DEhkZ8Xde"
"Q9CMRYdaMpnEyMSFhYVEIlFQUFBQUGAYhs/n6+/vb29v39/fxxBrHRbHx8dLSkrKyso8Hs/J"
"kyfdbvfs7GwymQQAxKNwOOz3+0tLS48dO4ZeRUTMycnJaDTa1NSEsDgxMbG3t4dRNdvb221t"
"be3t7S6Xa3x8PJ1Ob2xsXL9+PRgMtra2YtobxNy1tTWMvEFfaiKRuHbtWltbW29vb15e3sTE"
"RCqVunbtWiAQOH78OOuJk5OT6E/EYO/JyUmXy3Xq1Knq6mo0mSenJnX1QDz77HMqToDcSgCa"
"30MluDZBj34AIUtR4nuZ+R5Lk1tQFsKp02qSXKXe4mgSoXmLpF9Kd2Sa5PpCfjPZPcmeIfmL"
"UM4VsFQo/Up0S6nsp3QLsjNNXTWkPFDoQBIy+kbeCIXkkuqLSY+F1meNqNJbpY9OTx5usttO"
"8JBwlFRCu0hEev9kum3lBwQmBM+fnFzlHlYkVze30ZRrM5HhoQXqvj7D8k/lRCawJr+ppAmY"
"7FLT41lA5ian/tMsgzUQRgDXzUSR7ECjV45P0COfiBOIq4jiJncbaFJUFnK6/IdvcACKd6EU"
"10D8Dcp1DqCoCjzpIIQJ5tk7z8I7fp579lmN1OYdd94JAJubm1//+tcdDkdlZWVVVVVFRYXH"
"43G73S6XKz8/3+FwsGfQNE0MMcFd2lgstr29vbm5GYlEUJUrKioqKioqLCwsLCy02WysbyJu"
"VlRUBAIBv9+PzkQAiMVir7/+OgbxYf0YeVNfX3/LLbfU1NTgDvivfvWrt956Cwh2nU7nJz7x"
"CTxmMzs7+9Of/nRnZwdf9Hq9f/iHf1heXr63t/fUU0+Nj4/j5k8gEPjUpz5VVFR0/fr1f/qn"
"f0okEoZhlJeXf/rTn66qqpqamvrBD36QTCYNw2hra7v33nvtdvvTTz+NYYxOp/Phhx+uq6t7"
"6aWXfv7zn2Oo+SOPPFJbW/vMM8+cO3cOI34ee+yxw8PDf/zHf1xbWzMM4+677/7ABz6AgPjc"
"c89JkwQn+N+f+3c9fI24KiM4kNKQg8neZ8n1yFMmSRVjGXMc85ApZYPFlDL9o0hIKSO+F7Tt"
"AjpCURp71Rvssb5VI0jWlPOeNhlAqGolwxI8m5Za6A5gU96pYZIEkcxLEmq0UWDFOeQlIANJ"
"O6NBxnYE0UiKJt2qwvgjiUBPgVswFWxJ4qkeEBHURTGm2u9SKwhJLdFVrXQaygjTgvQK0Xix"
"kR+NiSSPqfJENt5846WPewHaUgPaHgrVrvMXrUs0VlrRgJlMu+FG42juKi2H2sIONBcMgExl"
"2n4h5QEYHHntYM7Rl33qgjl49eo7A2JvXy/QFg8AfOSOOwAgkUhcuXLF5/PZbDZ9fScjiSZP"
"j/6kAhxbAwAbGxuYmWZnZyeVShmGkU6nMQM2mt5YD+54dHR0VFRUIMDF4/ELFy5cuHAB38LK"
"bTYbJnN1Op2maU5NTf3kJz8Jh8Osip4+ffqjH/2ow+GIx+M/+9nPpqamOB7ojjvu8Pv9h4eH"
"zzzzzKuvviro7PP9999fXl4eCoXwIgEhRElJCWZqWFhY+O53vxuPxzH9xAMPPJCfn/+v//qv"
"Fy9eRAhGBHzxxRcRE10u12c/+9mampqnn376pZdewqsRHn300UQi8eSTT25sbOTl5f3Zn/0Z"
"6uC/+vd/p8UMBJi2+z/9aWkFAvmO2L8M5CsTwOYiSJcd2QXSsiIHtjRAQToV5VdkQEjRJJ8f"
"GUhoiQgAAaBsIZCBDtI/BLS6A7CjR1mAZK8JbTACQLDFoto3+B/5HI1FaXkpu0l/h92RaDTS"
"M3LgkY2kBYfIAQi2KTVLymqvCeZh8iySQQ7Kmy949LK3oMoIzfplX6KkoXTkymZlCfL6C5Yr"
"3ZFgaDWRsahtQaDVSH9IOCc2EBSdJDmMZ5rcwdLyBXbtAduYku4AvNNjgBA8T8QttHvC5dW+"
"iJCuB40OhpwVIqPBRKbQS32uDWU2A7lpqC2yxDWvCCiHieJADsNi9y9S1Vtd7av2VldXe73V"
"vupqr6/a5/XhVz5vdbXPS94IySEtgQAwdQSrpKADn0mfDHzM+BiGUVhYWF5eXldXFwgEmpub"
"S0tL8/Pzk8nk/v6+vkWTTCaXl5fRp1lQUOB2ux0Oh9/vP378uGmaoVBIzwY2NDRUUFBQVVVV"
"Xl5+6tQpzFCNBvvi4uLQ0FBzc3NZWVl3d3deXt7CwoLD4cjLy0NfodfrZTMZD7QMDQ01NDTU"
"1dX19PSMjY3t7e3t7+8PDw/X1tb6/f7Ozs7h4eFEIhGJRGZmZnp7e/v6+nZ2dhYWFthSxobQ"
"dh4ZGQkGgydPntze3p6fn8coxdOnT3d1dQ0ODsbj8WAwiPkQp6enmYcBDNsDD3xak1IpxSTt"
"gjZHpSSCFgirizlLKvCei4Izbb9SgYkGO7y9p7YSqCBQrSQNQigfvMbMJKyC0ZU2ilUsn0RK"
"wW4eAYQRCqgE7UgS0hFoCOWBIuQ2WIjlWAxVmfIQanCsgSwAU1QQ4eSUCFBLhQA1IYZsgobM"
"Sw2jGsfUcZcNWiWk700bqoV61BHZaeDpIKpy30FNGShUEyDkVpwu1Lw5BdQP2jAm8KDVjpYv"
"cp/KBVKtsaB6KQiNJcwRo67SONAAACAASURBVFgCo9UuMi+T5BhWbEH/J2rTHo10Csp1mP4n"
"13WaAJ5pxswMb6icXuwZvwi6U5dhnGaeaSOECAQCAIC3lDAI7uzsYKa/0dHRq1ev4sUpFy9e"
"fOONNy5cuHDx4sVLly4NDg7iNVLz8/Pr6+sYspOfn88H1zCVVmVlpdfrxRSE6IJETyV6JzHm"
"eWZmJj8/H/OJYY7FeDy+srKCvsVUKnX9+vWFhYX6+nqn09ne3t7U1DQ+Pr6zs7O/v59IJAYG"
"Bjwej2madXV1+fn56+vrGF6zvLwcjUYR/urr60dGRvB09vDwsNfrra+v7+3tnZqawgSOw8PD"
"Pp+vqakpGAxi2Damie3q6urp6dnZ2Zmfn0cEbG9v7+3tBYDJyclUKjU2NtbV1XXixAlMrL25"
"uRkOh0+fPt3e3n7lypWenh4ExKmpaYYOwxC2T3/6AeWqVmhIM0fTyMEyPK2a9LBOiOykrYv8"
"r3WLgaFFE28GC/odQZqBUUZ9MAMxmCiFgOBb6GzPYgBcK0Mar+8KqTQ4UWImsVMLCxEaUQBp"
"yQMGhefcBOi1M+2suye0AilpAyEpQL3UZUnuDUhNjKSRN8gtYxMEL+pNHjUIWgY0oFZrmYqF"
"JqDX9Utas9TuvsJ9xjiKR9DoK+kmK2d7hDFPG4zssGCWERl10AKJZGDiY+csWyxC4jZxFNcj"
"CMaVOqCUAkU/AljZZ+61Wll5ug1cXIGsJG6dg4CAdyYVWNL6JoRobm5GQJyZmRkdHb148eL5"
"8+cvXLhw7dq16enppaWlcDiMWWri8XgikUB1L5FIxGKxSCSyvr4eCoWmp6fHx8evXr168eLF"
"8fHxlZWVvb09vL8FAFAfRNs5Ho/v7e1xBlmExd3dXcyfiIljCwsLca9jbW0NN5EBYHt7e3Bw"
"EK8iqKysPHPmzPr6+vLy8he/+MWWlhbc/9vc3HzllVeQSfEnqmz19fU1NTUdHR0jIyPxeDyd"
"Tl+7ds3tdjc2Nh47dmx2dhZverl27VplZWVTU1N3dzfqiTs7O1NTUz09Pb29vdvb26gnjoyM"
"dHR09Pb24o5KIpEYGxvr7e09ceLE3Nzc2tra6urq1tbW6dOnm5qaMOcYAMzMzMhFWAgQwvbA"
"gw/ovKD+FbSG65hBkKVYRmhfKGEWhH7AcqjJuaG9pcxDVRFhgbDoi2rxN1h0DIYvxVGC66GB"
"stksNKVEUxWFVodQHefuWIgjTSiGKaV0ggIssIxTgoMKp7EW1zGQia6AX5GHFGYAqXqQZ0No"
"L1PNRDwNYECbQtaQ5AY3jVmp0LIJJc263qv+oPETYS3lMz6gaAOCtXjFHZKJmD4K0mjCeRBC"
"bQDz6+R+EFoLVvBEBOOuZ7CKorcKTmJaMNhpEqLYUJsuWvsNVbuhJolYKEMsJBIq7REQEKPR"
"6N/+7d/OzMxsbGwkEgkOrMHYmoz4RN4h4Q0TjsI5ODjY2dlZWloaHx+/6aab0LKOxWIrKyuI"
"p3t7e5wYwul07u3tsQtyd3cX9U2Px+NyuZxOZ39/v9vtRhjCYjMzM/Pz8w0NDU6n8/jx43hb"
"Hr6+vb39y1/+Mp1O4/6Pqd2JOjs76/P5Kisre3p6JiYmYrGYaZpjY2MYjtPb27u4uBgOhwFg"
"bGysrKysubm5s7NzaGgILySYmZnBfeeNjY3l5WW8g6Wzs7Ovrw9RMpFITE1NHTt2bGBgYGJi"
"YnNzc2lp6fDw8PTp03V1dXhSZXp6WoYggzCEsH3mgc/QdEgYAsVWVmlVHKNLCUkOgD7npJfh"
"zY/EaHowGr8JHMUBhnodsn+VS6rGp8LSoC5z7BtUT4GrI0ECC4pQr2Wd+vlmJXMoGJoMWsCc"
"/gdCxgRpciklweDSmgoiK2Js1QwwDZCxlxZ8t1JBU86VtmOlhtDa0+irKYiseevfKmOcSyhr"
"V6OO0H7Vf4CivraWWt5iBmSblTlEb1THa1oQrA1ngp3Si3lQzNa08oLyVeoE5cbJALeQWYDe"
"NvadPa8K7rRCsgaL2c3wzs4GHEFTUzMAJBKJc+fOMQjqkTeId5wQjH9mfDhaG9VAp9P5vve9"
"Twhhmub29vbKysr29nYikUgkEtiKx+N55JFHfD7f1tYW3gGAkBqLxUZGRlZXV6urqwsLCzFA"
"Z2dnZ3V1lQO5h4eHnU5nVVVVcXExomEsFsOwIaAPYiIiMqbU9vl8VVVVHHeNuzQ2mw2N9JWV"
"lbW1NQRKj8cTCAQ6OzuvXLmSSCS2t7fn5ub6+vqw2MrKSjKZHB8f7+3t7e/vx+ic3d3dmZmZ"
"gYGB48ePYwz5zMwMqrp4Amdmdpa4HEAI22c+86DSr9TcKObiNVlXbSTvsPhI6Zfiz8a3Ksxx"
"WsB+Rr1WcjZlyJSFa5SplgnXYJVkK1yq1y1smfWleiK5nw9CsZJl6OKlhEJ7X+D72aCtdQT4"
"B4krKAWb93xopVDTIDKN4MyxcNMW6knCaj5XwnXtCzlLam8FqIv4NuG4AXq92jc67GcOHmce"
"uFkqp0ZGRFGqNmuF3A05WkZr/X0rdRUTgLRoqTB/KS1WELwxwsgtSwLHGBKOsXNaYzTVYw3R"
"eEa5AzyDRCdNALTtJbbChRCiqakJAfH5559n7GON75CuExBC4C1RZWVl5eXlZWVlpaWlJSUl"
"RUVFhmGkrJ90Ol1dXX3y5EnWEFGNQsXQNE2bzebz+Xp6esrKynp7e+vr6yORSCQSQX40TXNz"
"cxPvR/b5fAUFBcFgEJM7JBIJ1EMxQaHf77fZbHjMJpFIYHiNac1Fhh3Y2dkZGRmprKysrq5G"
"lXBjYwMVt8PDQ9wqQWsXMbG8vLypqQlzI2KuisXFxb6+vr6+vrm5ufX1dbzFtL+/v6+vD72o"
"eLRmYGCgp6fnzTffxGQZp0+fxrCbudkZxXEgxG9+8xvsHgdAAYamaeEz2pFS4EAHoAA99QWF"
"Hlge04FWfq6HHgohgKMltDgcGRcnsFb1torhsxaWBUAAx5loUTeghabIdV91J+sN+iKjW5kf"
"jiPh6BYAqt2kKD3BgUd6F0HFD0GuzpsqpoWi2ATHd/CMWMZCPaI5s/yhh6vwrGYOFwdEAZiS"
"ZrKz8kuRXZFOHX0urJ1RnMJRe3oxihri6ZLhQ0wknWQmcBhWxlzgbHFQFFjGxjGDWrCXlVLE"
"BmpIKqIMQFWgZpjapUBFJULETzgMAI72FCZNm6koLD/CMv0fuvlmAIhEIn/+53/OKpUQwm63"
"19TU1NfXe73eioqK0tJSFOzsoBzEuGQyubm5ubm5ianAnE7n3XffjQ3Oz89fuXIlHo/v7+/v"
"7u4mk0nU7z72sY8BAG7CHB4ezs3Nvfjii6FQSE8dVlZWdtttt+EFKfF4/Nlnn7127ZoQArdN"
"qqqqzp49e/HiRT4ljWiI6SEAoL+/v6Ki4sc//jHGdRuGcdddd/X09KRSqR/+8Iejo6NoX3/w"
"gx+84447Dg8Pf/CDHwwPD9tsNofDce+993Z3dy8vL3/729+Ox+OGYXR3d99///2pVOo73/nO"
"wsKCYRiBQOCRRx5Jp9Pf/va3Q6GQzWY7derUJz/5ydXVVURzzCAJAC/+5jcyaA4EANgeeugh"
"pR5oOoBSPfTtM6EIri3LvEwDaY7awm3RqqTmBbrJxyaJtUJSntRWBtchSImxqEKas9+yKCv9"
"SjOJc+gW6k2mh3W8GaqnPjBtA5L0GsuGdnaj2JhFv8xQwbSB0RvqqBq9L3/VlDOtLpoHi5sB"
"LBSwvEODZYUFuCY5J8DaomzdYP0paxKzaaw6oFRBOZQsMhlWcmQNzdKIwV0X7OwFakvSmdyt"
"+mFJxU+qLp0clp6SCqF1Q3CDctYzd310DqCvgPgbNH7RnDqytN/vB9IQhRBut7u3t/eWW245"
"e/bswMBAIBCoqqpyuVx2u53Pk6gFgtx/GP2HWf5bWlr6+vowZBqfRiKR7e1tjHBMp9M2m62w"
"sNDj8TQ3N2MNWLPb7e7r6ysvL0eHIz6Kx+Ojo6OY6augoKCrq8vj8UxPT+O1Kvv7+3Nzcxgm"
"LYTAL7lvfr//xhtvxM2T8fHxZDKZTqfHx8eLi4vr6+tRJVxZWQGAhYWFg4OD1tbWnp4eTPll"
"mubo6GhdXV1jY2NLS8uVK1cODg7C4XA0Gu3t7e3t7b127VosFtva2gqHw319fd3d3UNDQ/F4"
"fHl52WazdXd34w0zGNYOAHPz8zqL2R566DMs0TzNGqfT/AAJOj4BofhKHofV0VRvIhNbCeM0"
"fgAL1mhwZmUrUPVl7IiwTOjORFBSocu2Xr0ui6A8i/oQQG2gC0FQQJUCQLYVnYME2pfWMoZe"
"GCxDzKznCPRQ71oikcAyMAVjan2xNkHYaa0bhKCtZWGBG7UlnjVz+jD0djLhP+cwtLdUSFQG"
"I2QSQHIfGDkeamUU88n6tVq5YxkdBwtyav1jNSHXbLxzX7ikTm4L5UAIIRobG4FuCr3jjjtu"
"u+221tbWsrIyQciiwx/+PDw8TKVS+/v7GHGdcbaPgZIDudfW1iKRCOc6xBv1XC4XZ4o9f/58"
"OByuqqqy2+2VlZX9/f12ux33JbCe5eXl8fFxr9eLB5k7OzsXFhb29vbw5uji4mJWKnn/uqqq"
"6tZbb0X10+Vytbe3j4yMoMU9OTmZn5/f2Niom8nz8/OHh4eYLXFhYWF9fd00zZGRkaampsbG"
"xsbGxsuXL2NPDg4OOjs7u7q6Ll++nEgk8Mhzf39/MBi8fPny/v7+7OxsaWkpnrHBe7IYEHke"
"bQ89/LBaoY6YOmaaXNuGFue/NqHZ6z69wAKoigqhu7OFLj2Z3KIBlqWQ6gFkfKH/qVZxoG7o"
"wHCkqFoFggDCqrzmRiqLXKl/Mvqq3tLpkovkR38y+89AwY60bLnO0XSOcWR0gvoIGeX0Kcwe"
"mIVcILK6xN0HS6OZzWesnSAsm0wK0iwzq4NpTi3vKCC1znGOhSqbabh6yPo2oyGtq0r8qKsI"
"iHa7vbq62uPxCD4sRhbx7u5uKBSamZkZHx8fGhq6cuXK5cuXBwcHh4eHMdvr0NDQ1atXx8fH"
"Z2ZmVlZWIpFIIpEAgLy8PKwE8SUej+P+Mh5z9ng8mPUAAC5fvjw0NDQyMmK32ysqKmw2GyZt"
"3dzcXF9fR4TFM8UYbFhUVNTb24spc9xut0EfbC6dThcXF9922215eXmCzho6nc5gMIiR2AAw"
"NTWFmNjb27u6urq8vAwAs7Ozdrs9EAj09PRgtsR0Oo2Bh36/v6qq6sqVKwAwNzdXVFTU1tbW"
"1taGJw6Xlpby8/M7OzsbGhoGBwfxogJMxI37QgCwgIBIc2J75OGHLfNmXRCl7aeMRgvYgFIY"
"QGS8p3MxWHELtOdZiJDz7wxJhQzeydGgtTzwSHQhyZRnkeMlEqHM7czsXurbk0eXg2wtOru4"
"xaNgLZJjAaDfs+EsswTZvUd1LvM1puKRk3QUTbI6cXSxTNg4ov/CyivZ/HbUJ2uWszabs4ta"
"1WvjiEdZLb0DhwC3RjyVIUNZ5RCVDg8Pd3Z2cM4wEeH6+vr4+Pibb755+fLl6elpjK3Dcx0Z"
"x5NxVzqVSu3u7m5vb4fD4YWFhevXr4+Pj+P2K25rJJNJTiuLgFhXV4ctXrp0aXt7G6+pQyhB"
"11swGCwvL5+bm0PsS6fT8/Pz8/PzjY2NBQUFzc3NbrcbL7Dn7BIYHH7bbbcVFRUhkg4PD//i"
"F79obW0tLS3t7OzEy6EAAHPSNDQ0dHV1hUKh9fV1IcT09DTeh9XV1TUyMhKLxTj0uqmpqaio"
"aGRkBHXM6urqlpaW+vp6TN04OTnp9Xo7OjrKysrwaM3y8vL6+nowGMRhLkiTWU6d7ZFHHrFw"
"Q7ZXSWcNyOnUyZr73CyhMwAIfd3OFl39Ff46e59YMIPxD8h4L6NrhngPI2Dxo8ayJPnIunL3"
"AHJp20eUz3qWA6GOfJF/fQdMs7SSq2dH9zELqTPXrCMqOIrqoJbYHI+zkOOodeMo0H9nsh29"
"UBwxbCths5cfS8lYLJba39/fT+3v76f2k6n9FP+Jn/z8fJ0xGBC3t7dREbt+/frFixdHR0fX"
"1tZQ1+PdW7RGOREOR9volrKeLTEWi5WXl3s8HnwLL11Bba6iogKvoxNCXLhwAXETAPAqu0gk"
"UlNTg7myu7u7w+Hw5uYmqpM7OzuYKNvtduOu98rKCgYqCiHS6XRfX191dTV6NkOh0NNPP725"
"uTk6OtrR0cGYuLu7i3piSUlJfX19V1cXhmcjUJaXlyNQYtjN/v4+Jj1saWnh/Inj4+MtLS2B"
"QMDpdA4PDwPA+Ph4W1tbR0eHzWaLRqMY193f348nVRYWFi2s9Mqrr5K3E+T+WdYOXqbTwrI3"
"CvrzHIVzvn9khb/n5z2+9i7ezPU8a5/2P/4x9S3Rd9uPd1Ov5bXf2UruOrLeyuqM+uI9U+U9"
"s8y7qFnfpn5vXfldzzhs4Chu4d9uvPHGd2791Vdf1d983/veBwAHBwfj4+Ojo6PXr19HcAHa"
"bkYh5YMlemQi3gLK/jvOiMN3RZWWlj7++ON47u3NN99cXV3F7Re73Y6HPbDyqamp8+fPr62t"
"IVxiDQUFBTfddFNPTw929tKlS+fOnUPtVQhhs9luvvnmkydP2my23d3d119/HY8ebm5umqZ5"
"1113+Xw+0zTPnTv38ssvo/5YVlb2yCOP4G1/Tz75ZCQSwVzZd999d39/fyKReOKJJ3DvOD8/"
"/8EHH2xpaVlbW/vWt76FAT1NTU2f/exn7Xb797///atXrxqG4fF4vvCFL5SXl//0pz998cUX"
"cbxf+tKXXC7XL3/5y7GxsUQicf/9958+fRoAXnvtNX0WbJ995LPvuGz+js+7UIrec+l3UVtO"
"ZeY/t5X/xA/kVif+k+p+9w//l0za/7ZUt3z+V/fy6PqPHTt29uzZs2fPnjgxcOXK4N133/3g"
"gw/efvvtFRUVCwsL3/jGN1pbW/UqUH/Z29v7/ve/z/eZmKbJJ475VimDrsrDApxSu6ioCO+W"
"QqchOgrx6Z/8yZ/U1NQUFhaur68PDQ0dHBxwVYlEwul0Op1OIURZWdnAwEBpaSneTy+EwPpn"
"ZmZWV1fr6+vz8/NrampaWlpmZmYwxSGeWonFYoFAIC8vr66uDg8R7uzs4Fk6n8/n8Xiampqi"
"0eji4iIGBo2NjXV2dpaVlXV0dOBBFACYmJioqanx+Xzd3d14n/3h4eH4+Hhra2tNTY3f70ej"
"GPObdXd3Y6ZFtvFxS31xcXFtbS2ZTM7NzfX39wcCAYwf6uzsRB8i3nnAE2f77KO5ANF4T6zz"
"3t76r6jsP72R9/pi5nv/JaM8sp3/qtb/d/38V46/pqampramxlfz5JNPfvKTn3zwwQcbGhoa"
"GhpOnz7tcrleeOGFO+64Q+8Yerji8fj58+czzqiAdmoArdfGxkbM54qQh8hYUlLyB3/wB729"
"vXh/3smTJzs6Ourr6xsbG/v6+jBN7AsvvLC9vX1wcIC1pdPp9fX1sbGxhYWF4uJi3BiprKzE"
"bDdLS0usSUUikZGREczP6HK5enp61tfXeadldXV1YWGhra0NvYGY3hXP0oyNjXm93vLy8vb2"
"dowMF0IgVnZ1dZWVlbW3tw8ODqKSOzo66vf7Kysrg8Hg4OAgBujgCeWampqysrKrV68CAO5H"
"t7S0dHd34xbz7u4uJpTFsJtYLBaNRre3t3t6evx+fygUamlpwSsElpaW9GmyPfrooxkzlxFw"
"k/noHeb83TKXpRprneqOck2dym723eDK7+J2HOdRNWW9bQihn8J+h0qtr1srguyi7zDMdzGU"
"jEeG5cCI+hYg1zvw+yCChQD8ovzF+s87vvqf9Mk5UZZbSIUhtAxdRhals6hiPQr0zv096qlh"
"qdXaKNb/P//n//z617/uKinhJx5P6be+9a2HHnqIXwACxGQyef78eU7SBQCCcl83NDT09/cP"
"DAw0NjZWVVXFYjG8GADVNLyJOJ1O19XV8Yt4v2hZWdnu7i6ezEP3JYY3G4axs7ODXsidnZ2x"
"sbG5uTmXy+VyudAybW9v57QOuEc8Pj6eSqXwXDDe3zQ/P4+Iub29PTk52draWlBQ0NLSglfX"
"o4KJkToVFRXBYBBTeAEAJuXu7u7GQEgMMMT4xGAwWFlZGQgELl++jKkopqam+vr6KF3NlBBi"
"bm6usrKyoaGhubkZNceNjQ3TNIPBYFtb21tvvYX3qebn57e3t2P+noqKCgJECkEwrIBoHOmw"
"1883Hcn0BuXysBTLURyy/lBspHaQtfCwLKnPFf6V2VBuftX9/hmlLDvJWSiZCV5ZB+lyva6u"
"Ac7+aBWi5EJmmxmd5O1RUEVoY8rgDkIOnAPru1mnaUWmIGsNyDdVThlL/6lWmqnc3AFCZNNL"
"b0Z7qI87R3f4V233T+MUyzIE+jcaM2q3EWhdtnQhUw6OYHrIYgTIIHz270899VRPT09jYyNX"
"Ozh45ZVXXn3ggQf0XmIuFgzM5gsDhBB5eXl4W3wgEMBAP8Q7j8czNjbGmxj5+flCiEgk4vf7"
"8UgGqo2cXlsIYbPZKioqWlpampubi4qKQqHQ1tYWl8Q96NHR0XA47PP58vPzi4uLu7u7CwsL"
"5+fnEaCFEMvLy/Pz883NzXl5eX6/v6KiYnJykq+gwi2O4uLilpaWvb09tJFRy0MVLxgMbmxs"
"YBg2njvu7+8vKyurra0dHBxEAJ2YmOjt7a2oqKiurr5y5crh4SGmpejr68PbXVZWVkzTvH79"
"ent7e11dXXl5OQfi4O2m1dXVly5dAoDp6enm5mYsg3GIoVBITTcI22OPPaZP2RHKmw51mTBH"
"8TPWVKiMUZp06i8ZWnijVgqUeOXWx47W6zJ7n/NjGZy1ZsndBmhqRQbM6T3KaigHPAoQ70rp"
"4MQ+1uosI2IIsGCBQPG2BNrlgrYMOloXEEMcsS+r5lPdXcLgbRDFAPOycnCWEJnzyxeCqZXY"
"Sl5VA+do1ShGmp4eDWOBsgx9NVNdxbIysYQwBOertawKhl4R8BfECRnqI8XAZq53GYqoUP0X"
"8n6VSCTyve99r7y83OVy7e7tvfzyy9/85jfvvPPOG264Qac8AmI8Hv/1r3/N0dQdHR233XZb"
"Y2MjnrJglAQAh8ORSCSi0SjGV3N2a9TghBCGYYyMjFy7dg3VwIKCAjzlYtLFzX19fV6vNxqN"
"bm5u8umXw8ND9DPijStCCJ/P19bWhnciYxPRaHR8fLyurq6kpKSystLv94+Pj2NETiKRmJiY"
"aGpqcjqdLS0t8Xhcx8SGhgaXy9XR0YFXRwkhotHo6upqd3d3VVVVSUkJbhMnk8n5+fm+vj6f"
"z5eXlzc2NgYAeAl9a2trR0fH6OgoDgo3nf1+P7oRhRB4k73f70fENE1zYmKir6+vtLQUaRha"
"DilOMoTtscce5+kXujJgkSvO1KSvhRmsIfSn2iKbtahr8qfwkQUGWKuyYoR2f7j2qlJdszHR"
"qlFlgZEmsrrCBGDhcuCmDTlaHSdERo2qy6CkUHVHWP9UI0ZQUSsCD1GmeSU4yqXHaQsVN83E"
"0HAk813QaWohMo0UdEoLnVBCCJ16AJm0By2VMCfWUQskM4IQgjITs4rLkbza+IDnWJJMRXYr"
"QhIdLOuFoMfK9kCbLpPjNfTXiaT/YTF/+Bctx6G6jxaHamiiQcx74sTJcDj8xBNP/PCHP3zq"
"qadeeeWVsx89+7Wvfc2Ot8tT+xj7kkgkXnjhBQAoLS298847e3t7Oc8rKoYHBwfr6+u4B+J2"
"uycnJ9nDiBvK0Wi0sbExLy/PNE273X7+/Pn5+fnr168PDw/Pz8/v7e3l5eVxbGB5eTl62ba2"
"ttbX1/lESiqVmpubm56e9vl8eCtLd3d3PB5fXV1FYu7v72PmhbKyMpfL1dbWhmfy+FEgECgq"
"KmptbcXr5AEAzeFAIOB2u7u6uvASKCHExsbG7u5uR0dHbW0tZuFGAzwSiXR0dPj9/kgkEgqF"
"UAEsLy+vr69HoxjTQS4vLx87dqytrQ0vF0yn0zMzM8eOHWtpacFA9P39/aWlpRtuuAE1xOXQ"
"ssIAELbHH3+MWRgoCRLdUEYiRPgkCCqVwFHgKiEaL/nIKxrUaLaJetF6UJqtMsBGNcUCFDcq"
"ETboa8WoXJ4aJKuGX8nIb6oDqVIQaPxaYZBgCao9vr6VIUSTYyUYsmeCC2M2Ux4JC426YFWT"
"N/4f3+uaiftMe5kwhy41Rsln/URDFKkI83d8ySow7XRUMbTFDEAbKE9RlgtBAzhGc93qJn1Q"
"ULtKqZSjNUj1VfaJRDawIiZd9Sq4crn4aDBsEBE1pY8uo1XeIKuaCAotdf3XUACvVmMQfGsf"
"3U2rJhQ0MgphN4z33Xjjn/7pp27+0Ic+8YlPfOWrX7nl5ltsNptcbYnjEBCTyeSLL77Y2tr6"
"8Y9/vLS0VBIWZO6sa9euvfrqq9euXWttbXU4HPn5+RiDjSHWeMeeaZq7u7v19fWHh4e4rYzb"
"LwcHB3hR8ujo6NTUVDKZdLlceIakpKQEA57X19cjkQhiIoYrDg0N2e322tpaPDridrvx2gAh"
"BN6i53A4ampqioqK2tvbJyYm8Owz7qU0Nzc7nc7W1tZIJIL+xFQqhS5Cp9PJW8kAsLy87HA4"
"8LQyn1dZXV11OBzNzc3BYBCTGwLA5ORkMBj0er21tbWXLl0yTRMPI7a2tgaDwUuXLiWTSbyi"
"uru7G3Nl459+vx9dkMvLy6Bxju3xxx/XRBgnl1I1sVDSzAMhiq7gAWgGtUyYTtwP6pJa1guI"
"g1myqRpdAhl/WDAVVGhSx0JpOYfMoss3YVqYXb4le6FroyCtQc0/QH+onqmeyyT7EuskOxvA"
"fK0rQyRdwMoOK250k41Q8JjtnqD1ihQyHqXULilmHnRPLqc0ZcglTUpqpHQIicco1KSDknxQ"
"7zEheCINYVi6y2PSrqfRkFAhLq+ripnIk4pYaICwvMe0VDjOXMozLiTqsQ9HJp4jJtTdP3L9"
"YDRkuDeUDqAWaUOjQklwCwAAIABJREFUj3XvimY8kzq8DNFxVeqiEHl5BVXeKq/Xm5+XT3PD"
"urAhBGBQSCKRWFtbu/nmmzlnjBACD6799re/DYVCe3t76DfExDMej2d2dhZ1Q9SAUqlUOBzG"
"LP9Y4O233zatmSAw98Hw8PDm5mZRURGWLCkpQc8dZ/dCGJ2ZmVleXm5qarLb7VVVVU1NTVNT"
"U/v7+zjv8/PzeIyvuLi4o6MDMRH1RAyQLi4ubm9vX15eRhsZ7yHo7u52Op1tbW1XrlzBYzMz"
"MzNerxcPmaBFDACYUNbr9QaDwbfeeiuZTB4eHk5PTx8/ftzr9QohJiYmAGBxcbGmpqa2trax"
"sfHNN9/EU9tOp7O5ubm+vv6tt94yTRM3owBgZWVFLXqGsH3uc59TNppg3UgKF02TlCCrePFt"
"KYKlSwjOGScUXxGb8KXrLAZAqWE1n5DGd4yrQpNvTZdjiLOapvpNQ4Za5smMQ5tCKaKqdql+"
"KOYUSnBYK9G1CtLgQJmDEso1SrHYszgYqj1ZnSDFXFFUW5Z0Dxr1VqUNp1mTYMtrFwGwMJT5"
"SsNBInINPEyiFbcsqMM4FA3vFdjJ5lhfFgqCeJljhwBoGpN1FQSqH+RqBVq/WQsmm4FIKkjv"
"I22S1jhQnGhoZRjX1KonR02zJuSybsgyBJnKhpIrJ2vnoM2j0FUCJp6+CEpKSubWlXbiVxCG"
"4aNDHZidxaTcM9vb288//zzun2AqBwCIRCLd3d12u93hcOzv70ej0by8PLvdjlZkKpXCwEAA"
"KCoqWllZWV9f57ulGBZxZ3ZsbCwUCpWUlLhcLgBA32IikcBLlvGtzc1N9gAWFxcHg0HM6YAE"
"XF1dPTw8xHtUOjs7x8fH8dH+/v7ExERHR0dhYWFnZyeng8XNlr6+vuLi4oaGhrfffhvDvCcm"
"Jtra2txud0dHx6VLlxBzJycnMTqnoaEBVUJcM/r6+vx+//z8PGbYnpyc7O3tra2ttdls4+Pj"
"ADA9PR0MBhsaGux2+/j4+MmTJxEQV1dWFEyAsH3+c5/jtUtb5ZUtqIxGycTkRdc40yBWQ+GQ"
"XKjNv0FrvizCcKQhhqGKAljlncBaxxJSAehL5EG+8wwbUb3nXwg9+RoWlm/QVDUWbVYOSVqk"
"yku3fLCKZpFtiRAKwQyhvAr0K64nIAhRDeoQwQy2p5RKaS0quCYIl1qQ0lAMghYCSxqOKqua"
"ktjEuqPiDjrHC5oFq4gn10MN6lgvpwVBzq7FDwJKJ+atdeDVldHf4AKMWYozgKgvFU2miMJo"
"wjkml7LItUknAvEyqfWcGFvIRZYUXmB+JpVUqbJqT46YkPVZ5lqaOqKXQQhMc2CAAMBTboeH"
"h3w2TgixsLDw3HPP4THhVCrFsdDpdDo/Px8PgXg8nvn5ecMw8DY7zFa9s7Pj8/mcTqdhGDU1"
"NR6Pp6ioKJlM8j0q2F/UHHd2dsbHx8PhcGVlJd5O1dra2tjYODk5GYvF8Kw0plkNBoMAkJ+f"
"jxsjsVjMZrMVFBRgQA8e8uN8NgCAex3d3d0FBQV4kR7uzOAV0p2dnaWlpS6XCw8mHx4e4gUA"
"LpfL5/MhUKbT6dnZ2WPHjlVWVhqGgfsk6+vruMfd3t7+9ttvY/bvxcXFgYGBpqYmvpsFXwwE"
"AnNzc83NzVJDXF1Va6gwbJ//wueF0nqINwkESBEANs6ANBn20CE7kXIF5FJUGoREDQlbggVc"
"qoyGxnm6LOirt1rKGXWFYM6ju+e0vUe+jY0QmPoCJPXk4wPO888QRDgh1Uo6wA0STxWzCxYJ"
"3QYzeOg4bJAoIHUZUmW4DEqnwRjBNyqRvsBGPfVC+hmAyhMeKiPUkNCk34NFQ1CanABa+oBU"
"HYZNQWOV3ZXzhxQBXoSUA44WGoIgXiiFUnI1V4Ru5Ru8HPHIZecITdROCz8SPOuEpLTuKFue"
"OYE1S1BUVCyjTaHEOyKhckFIgeF50pYrySy8zuBAaDEhFsdSpE0IIRJ7CZvdZmPOoRURO+qt"
"9iIgYjwdAFy/fv3cuXN44CSVSiWTSQyoBoB0Or21tdXf34/GACaFxTQ2fKZlZ2enpaUF8aum"
"pqa1tbW/v7+rq6u0tDSRSKCyBrRzjc64kZERTKeK5+GOHz++sbERCoUqKiruu+8+PFyI47Lb"
"7QUFBVNTUwUFBQUFBTabbXNzM5VKYWLt1tbW4eFhVPH29vaWlpa6urry8/ODweCVK1fQ5A+H"
"w+g3rK2t3dvbw+DEeDy+traGljtSQAixu7u7u7vb1tbm9/txn0QIMTs729LSUlFRUVdXh0bx"
"9vY25g1rbW3FLZe9vb3d3d2urq7W1lbcNAeA1dU1KWSGEAC2z3/hC5IbBQi6sQMIKARJi7Ym"
"03QbJM4EEIZhYWvQdA7SiViEtYWdlBSwYKkOldSEoWple1I6ewjnpGJDvssMOVA6DAMLsL+J"
"HtOCLRVBya9EMHZbqS0MRhWDUYtFFvGQRIMUKNb0VANKOOXbinwZ9GG0AzZghbzSBnSkU/Cq"
"XI+sMilLjdwLPBYCddLnDRJlhiHBeiMDiIUZ+F9FaOCFjpCD6qU+s5LLtiv1hYcvNLRkS5Rd"
"jBl9F8RzBk+SYWme9FSaJyG1RMGkpEnnjgoCd3LUEISRCkzPiL2F0HwfEutMM/3tb/3ff/EX"
"/+3v/u7vPvbxj7k9nieeeGJ+fqGjs0MzN8DrlYAYDodN05ybm/v1r3+9v7+PpzXS6TTqXNi3"
"9vb2O+64Iz8/H+HM5XJNTEwgGgJ9YrEYZl4wKXcsABQUFPh8vs7OTtT1wuFwKpUSQvBFBcvL"
"y7Ozs3hNCt5PX19f//GPf7y8vFxQXol4PP7666+/9tprDoejsLCQDz7jwZXq6uqCgoKmpqah"
"oSHcftne3o5Go52dnYWFhbW1tezTnJmZwQjBtrY2PIRnmubGxobNZvP7/Y2NjfPz8xsbG0KI"
"5eXl8vLy6urqtrY2tKbR7YjOxIODA4zWnp+fb2pqqq6u5hRhKysr1dXVdXV1dXV1eKJxbW0N"
"iF0NMGxf+MLnpYTL3XogBYQFi1ZgOfVKFWEfOO2sGZrmb1kTQTDUSJ4hDpJgw7IFbE4oW125"
"YQxCX0IJVYCkUDagkEI+1N4D2pkA9pBJU1zqjiRVUqkBHggrmoL0VPIaaqAhoVpXZUg+AFh1"
"ktimaSSg/dBVIFoJZOW6XUeiSVhCqxdBgNLJFJQbSg1RRJPKECk6jAq4SCorGugdMJTWROMi"
"2CJpQ31dbrAwkWgGaDxA7yA4CeYLqprm1iBSUbeVKi4ZSRAKymXIAOs8kIdHEGFZixQGo7zC"
"Yh3AATTMB2IYZktB/Ct0zha0LoOs2RAG/N23//6ZZ5/5b3/xF7/97W8/dc+nPO7SjfWNp/75"
"qT/6oz/iOgQYXm8VA2IkEnnuuedQN8STJHhEzzAMt9t91113nTx5Mi8vj8MS7XZ7IpHAmBjO"
"BwEAkUgkHo9vbGzEYrF0Os15CTFlg9/v7+/vdzgcmGwVYRR9fGNjY3jdihACbVXWJfGCgbm5"
"OSFEYWEh1imFDWB1dbWgoKCioqKkpMTr9Q4PD2NPwuGwYRiNjY2lpaX5+fkTExOIrXhYpbCw"
"EI3fVCplGMbs7GxzczOe9kOLGOGvt7fX4/HU1NSwM3Fra6unpycQCIyPj6POizdM+Xy+WCyG"
"p2jwxhWOQwyH1whHDGGA7Ytf/AJhFwGlHI20uTSBl1PL9zgC8aVQRgVBIUsQuZ+0+oEnnqCP"
"QcxQQspSq4OHVCgl8FrCeuQPkn8hDB0NJYiwYSMslhlrjZIQbJ8JtqlIaiSjy5P1UnRAMoB8"
"gTRV0rYJKQg7DB6jBGVgk5iAQFMQleklrUsigDK2LLMBpIwpdwDVoF1FjGBgaFPIkyqbkO8S"
"klMPFEOodtS6ZzDmK/gC7Q2hd9hqpDJRaAGSLKQxhKAVB1czRkH6yiAOYHVNV22tSi6pYkIt"
"xFIoqAPMpWR2q95a+it5Q5cRq93P+A5CiP/jv//3//E//s9bbvnwd7/7j/fcc09pqSc/L/87"
"T3znsUcfF8BGucCTtul0emVl5dlnn93a2sJjedhMMpnEm4XvueeeyspK5C6TUmGHQiE8dsIR"
"3VjA4XBggOHS0tLU1NTY2Nj6+vrBwQEDmcPhqK2txauN8YAw4trh4eHi4uL6+npDQwOWBIBk"
"MnnhwoUrV67gFnBBQQE+Qtngd0OhUHl5OabUdjgc169fx+/n5uZqa2tLS0sbGhr4zoBUKrW4"
"uNjf319UVOT1elGtQ1w7duxYcXFxVVXV5cuXkTKhUAidifF4fG5uDtW9yspKvNj+woULeNcg"
"HmEOBAIYc3NwcLC5uXnmzBkExLVwWPO/g+2LX/iiFHS5PrLos4nIbKpMP5Jr4l9DiQdb1aTM"
"EZaxoEtZYRQWSpJ1iCKHlpI+JQzE0FK/MDTGlGs2gbpuglt1V0YPibYA+APDaZVfi3cPaTCK"
"2/XeYO10Uzro8kRaNZVWokxrCKMCE40MLSAhQ0Bl0tCYQUMnIdQuCCEIE5gMALbm1DSBULPF"
"VrIETYMaN0h3FLTiUCckp5B5LnVu2T1SM0FpejTj2pSgz0bQ2qiWTNDMEsJt/hBbEgYz9uqL"
"OXGOwZu47E+BjLWC58OQ8KhRl4aqvcPVGxzkQKPmnSutC+Ts/Zu/+Zsvf/krJSUl//APT95z"
"zz0et2c3vvtP/98/ff5zn+epEQCVBIjnzp0bGxvDDDccTGOaZmNj4z333MM3TGErs7Ozb731"
"Ft42x2HViE2FhYW404K2MCYKw/uLR0dHcV8Cd13sdntjY2Nra2s4HN7Z2RFkGm9vb8/MzGCS"
"m7m5uTfeeCMcDuMuNqKhdlEl6Nb60tISOhPr6urwUDP2eWpqqqurq7CwEJ2MmAwxGo2mUqnW"
"1taKiopEIoFIl0wmI5FIT09PRUVFPB7H4B48ltPc3BwIBPj1mZkZvAGmsLBwZGQEUdLr9WIg"
"zptvvgkAGxsbra2t6EMMh9cVSxvC9qUvfYmWacYJ6UcB3l9h0GMWlZspzOHAnKoWS0JCgwSD"
"VQr6T2EHigCxlNQYZS28ZyIZ3iAZECj0zHRCb5mHIb1LLD1Cta6c3aqzhtAKZppoTB7WXEDK"
"suallP8C1cJueOlCVfuJLHuI67p6oZQRuU4ZgtqSCGbpU5agKjCWfWFdWj1H3yItO6BBLfXe"
"oOUFFFXkEA2mHCGuknziHCD00Y7KGQr9BS0HVJ+0P3gtVB5mnkJFXB4PDkFZKPyCAkmlAdBM"
"KxeJoBWHhYLKECILqfhqAiGpJWigio1khWQgaBMpq/vFz3/e1dXpb2x88sknP3XPpzyl7t/8"
"5sXZ2dlP/emfUhEQQqDel0gkfvCDH6DfMD8/H/d8hRAVFRV333036jiokaEjD0OsTUoHy5oa"
"QhU6HzmBmEnXmWIa2snJyaWlpeLiYgy4wY3gvLy8paUlVveSyeTU1NTe3t709HQikUCzHe8e"
"4ERkfGja4XBwwtq5uTlMCBYIBPBOeiFEKpUKhUK9vb24R/zWW2+hk3FxcbGxsRFvYR4ZGUFQ"
"Xl9fLy0t9fl8zc3NCH9CiPn5+WAw6Ha7McAQ29rc3Ozu7q6rq5uamsI9+rm5uYGBAa/XG4/H"
"8ehLX18fpuDdWF/HBRKVJdsXv/gljftJTePNQsXvCkcYEqSIqt0EIEWI5YTBRkIZqwFqwSW1"
"RfGh9GyTagLams7sbAC3AFJwDGC8Y9e8kSkgZDSSV0zwmPQOADkWGUJZX5EYp6BSjRL0LoJe"
"XNsulWIvCcRLhGFYQUYDGqIuQ4lu3OvVcL9YgSQth1cEORdsoUt4YWhRsmvwMIFdCIaqQtEE"
"GOGkcCqa8fCACS1fVPqcqsJCIvkKrU+gT5hyrFBbBq80+iCVP0ASUnmALIYCIziQ3kxTIudF"
"e0GuCcrBYhmThY+IAuRuRfkSh4fmd77z913d3T//+c9vve3Wy5ff/qu/+qvHHn+8u6tLITuI"
"ysoKANjd3X3mmWfwuDHiTmFhYXFx8e233+5yuVj7i0aj58+f397eRgBC0OQ9aAZEjtHhCwYY"
"N/GX3d3dqamplZWVsrIyTAmBV54uLCzgOTwAKCsri8fjiUTi8PCwoKAgPz8fz8OYlDkCAJxO"
"5/vf//7m5uaZmZlUKoWXyq+trXV0dNjtdkxmg9i3s7OTSqVaWlpKSkoKCgrwnDIAYLg13q+C"
"LkIhxOzsLIZw41YyllxYWDhx4oTb7cagcVQAMT1EIBB44403cA1Aw7mpqQkNZw7M3thYp9UZ"
"BAjbl770ZQQb5iXiDVYHiDt1bYpATvqyFGOSOqbWX4v0aegqgU0olqXaDL0dgSqKQeu4RZSE"
"6qFERIIE4mWUM+J66rpEO8IAfXtYEGhxK0qW2Z4kIQWSSg0mDMZmDSq0quX3mc9UESWoBhdg"
"bFSNM+4aLON6BfoU8GOpMTNK0s4TUV+ujUJHZwYIjUJqvBm9pxkAXpYsY1PLj6GmhpjL0AZi"
"QTaFaVot0iQl5Y6zCmmQZ2E9hkZ9/VdLE/Csa71mJzQRGwj21ZuABKS+a8uOZdDSdQNC9Pf1"
"Rra2/vIv/zKRSPzkJz956aWXHn7o4Ycffpi6i1UDBpokEolz584hGmK+GZfL1dnZiakf0KW4"
"s7Nz7tw59I6h9Yr6Gsdd22y2qqqq2traurq6mpqaqqoqvM05lUrF43F0TYK2/RKNRjHApaqq"
"Cq8S7ejowGtYPB5PYWEhHj4pLi7GyzxRN8TmhBBNTU3vf//73W53UVERbpjE4/F0Oh2JRGw2"
"W11dXWFhodvtxnucASAUCtXV1ZWWltbV1eFl8wCwv7+P5+0wLn1qagoA0un0xsZGb2+v2+3G"
"W/SEELu7uzabrbm52e/3Dw4OouY4Ozvb39/v8XgcDsfY2JgQYm1tDceOmW84MHt9Y1MTPbB9"
"5StflvOgMEAKABsNhDUaY4ECFgVnSmiUDiCdSoIMEk2QDEt7OrsKicVsLINmqVleo3YMjfsJ"
"hEG1z/sP7B6TkCJIM1ECqJMB1HfsMAVDdZjB1CJJCveEIL+TQnQhDF6R5LAV3UCwD56a0rcx"
"NEIIVkK06VTSqNABqAugtryQZKzo6HitqXLATWvLI6lzil940SQkpdWDScG9IiIROBhCsY/Q"
"Sum9Yjy2jFNz4BAtGLlodjnqh1cXoWOgPmiFoDon6DYCf0sRWUJyFzECra1a94HGjMjKqurp"
"M2ce+syDH/7wrX/8x3/8jW984/0feL82UNmdivJyAEgmk6+88orNZsvLyyssLHS5XB6Pp6ur"
"S9AWysHBwQsvvIC7xmhWOxwOtnBdLld/f/8NN9wQDAZramq8Xm9lZSWe/A0EAp2dna2trS6X"
"Kx6PY5ZDIIMXtyyWl5cxuDovLw9zFx4eHmLyxOLiYrvdjrqn3W5H3TAvL+/MmTPBYBCdlUKI"
"ioqKzc3NUCiEXVpcXMQk3j6fLxKJ4EYKAGA6azyqfOHCBQwmRwjDKJnx8fFoNCqE2NzcLCsr"
"83q9fr//6tWr6DdcXFwMBoMYv43qJJ7U7u7urq+vxx1nAJifn0fDeWtry+fz+f1+ANjY3BSk"
"3Bhg2L7y5S/TKkrsrkSPGVBjR52PFGMyb6LgGIrNCT5IAEAXCcYuUKqfUqzwC1p7eRdGAO9+"
"y+0/7rFh7ZJCNEHmjCb7zO8WDY53KlloQJCJLusBJXqkIhrcM+0ltYgIkkmyShV2KZmmBlg4"
"NKIqDYT2hehUnoYYmqYO7BzQCA/a/kQuQOUeIHkNRSRmBd5D179TU6/boqoEYQOvUYJZTo2S"
"FWrSUuk12uXTgFrxgdYDJoL1V2YlNWidxLwEZPaXfuXRgqXL9DvPpbZ4EanAot+qqQAhhMPh"
"qPZWV/uq5aat3mcAIUR5eTlqiK+88kpeXh7mpCkuLq6pqeEsD6ZpXr58eW5uDtHHMAzcFUGt"
"8NixYwMDA2VlZXzHAFj3OrAbeF64ubk5kUjwqRgsGYvFpqamfD5fcXGxEMLv90cikVgsVlRU"
"xGergexxn893ww03YN9MuvUFD1nz6T3TNBcWFnp6ethw5hMsu7u7wWCwsLAQr9DDwni2JC8v"
"j3M3CCHm5uaOHTuWn5/v9Xpxx9k0zdXV1ePHj+PFLHgrQDgcbmhoqKysrK+vv3jxImqdBwcH"
"wWAQoRDTZ2xubJLLBIQQtq989atqFVUTwuCoLErIXFmP/oD1N8WbLGxHFbVAGEsKMNICCKXP"
"4hsGAbxm6Wf2hvekCZsgs2VC7SwrzwJQoCBT4qRVolTPuV5GK01aqQyQgILmIgM5PZndIzBQ"
"PzOIzMKmhkHYo6m48ltp/mkyLTuq7EXGPSBjIYtgSoQzmlCwqAiiY6WmS8mKDJHNXvyHvmJk"
"PKK2j/hIDtKASrE616gtf9qKxq8r1OPnXFb3Jls4SjMftN5I0iNPWySBWIQmv6ysDJHijTfe"
"yMvLQzUN5Rn3UgBgd3f35Zdf5o0Rj8eDOFVYWHjDDTdg4A4Sh72NuO3AqMe+v6Kioubm5oaG"
"hs3NzVgsxjomJl8oLS31eDyGYdTV1WFwOJDbUQhhs9kwlwymJsMWl5eXMT+rzWarqanBbK84"
"or29vUAggHc9Dw4OYvm1tbXa2lr0/U1NTeFNe8lkcn9/v6OjA9VYDCTEg4MdHR0VFRWYCkwI"
"EY1GUUP0+/1vvvkm6pjoXiwtLd3d3cV3Q6EQXt7CgdmRzU1JfgAhhO2rCIgWtlBKgIUzLBMO"
"Vt4kTUlbNRUHWYQ7AzFI5HSxZlbSeIvDTJSqoHG5hR+zpAPIYuL2Ml13sufasHgkZA5ldllr"
"Faxt6jWD9leGfkr/ghA8xEyiKAuPiamciga3odVppbU2paqvqn1COgX3OjQYwsIHVq1WrQrU"
"ku7x0EkB/Eq2eZxJjozvpHJoJS5HHVi0caGgF7i3ygZgdVAnsbVxtQRkTF2mcKheWpYoVQcj"
"tFVohBDiIJ1+6aUXPR5PYVGhRXW0EgZAAuL+/v6bb76J+8uGYaTT6ba2Noazq1ev4oV5drsd"
"c/0LIex2+5kzZ1wul34NKeZ3iMVia2tra2trkUhkY2Nja2trf38ft4OxVUzblZ+fHwqF8Iwz"
"fmZmZsrKykpLSw3DqK6u3t3dxUPQAJBOpwsLC/GiPiEEtnXhwoVXXnllfn6+trbW6XQWFxcf"
"Hh4iJGHumbq6OrzMIBKJrK6uYuuhUAgve2loaHj99dex8ysrK21tbS6Xq6Gh4fLly5jMYm1t"
"DZOPNTQ0XLx4ESF+cXHxxIkTRUVFRUVF6KBMJBIOh6OpqQlREnF8dXV1YGCgqKgI15XIVoRD"
"GwQI21e/8hUpkYptdTyzfHieM0SDXgQlQhqrQgaHaWyiQwkLNxD+gLWpbITW+mURdMtjAtNM"
"/MjkV27aYvKBcnip7yxCSgQTGTVZemsdZhZtIVs4LMPRAAM0NBfal1kVZjQDOYabWTZXJ3Mo"
"o9bSSrs7on7tGyJoZpO5G87iDQ11LMEM1rfYQZvZW3YrGBkd1fV87pBhRTphbTVjiBY7X6se"
"lEAJIUDYbLY777zzvnvvd7vdIvt1jWtKy0oREAcHBwsLCx0Ox87OTkVFRW1tLUcdvvbaa4eH"
"hw6HA7MQItx0dHSUl5ezimea5vLyMt7ticodpkoUQmDaRCEEAorNZsO3qqqqGhoalpaW8EwI"
"qp/T09OVlZWoJ5aXl6+vr2MwUCQSiUQidrsdzxqapvlv//Zv09PTuLc7Pz/f1dVlt9vr6uow"
"2gZ7FQqF8OR1fX395cuXsQOJRMIwDL/f73Q6E4kEbhkDwPLy8qlTp+x2u9Pp5K2YlZWVgYEB"
"XCcmJiaEEHjEu7293efzjY2NbW9vCyGwoZKSkuLi4qGhISHE9vZ2eXl5c3MzAuLW1haxpQAh"
"bF/72tcyJtcigvp6p/NWltwa+pprrSiD6/XKmH013tfOn+hMz2adXkWm1IHysUP2U2t3MkVJ"
"K0NGUg6FJucLxNGgupDVqC7goL8lW1TSYEGCHAhPLwBkwa4m9xmdU5SxFJbVGJr9mNFty8hz"
"klRJfTbZsxZWQgbGDcguxRBiZPTBYvtmLEJEDKupToS1zqQ+KG0uMndQVEH2beRAfq17Gd+z"
"6wN03fiFF17o6+/DtP65KwEhDFFaWooW6+joKO5azM7OdnR04O3yQojNzc3JyUmENq/Xi1E4"
"xcXFra2tbCCnUqmpqalUKuV0OhFxAACPG+fl5eHpEY/Hg3coI4LgVgnegrK4uMgQdnh4ODc3"
"5/f7i4qKMNfD6upqOBzGlA0rKyt1dXVFRUVCiFQqNT09jUlxUJFsamoSQlRVVaHhLITAnRmM"
"8daPrywvL2MuHFT98O7TWCxWVlaGmRCnp6exn7jljVsuV69eRR/l6upqR0eH0+n0er0Yl5NO"
"p2OxWHd3d3V1NeZVFEIsLS198IMfxLgiTNNN8wy2r37taxpk5eR3xTwi+x8r23AV7ygU6nsj"
"87FCyCPhzPKrVV4yOT4LjFXv9a8z8do4kg65OwQc6au6DlZhy+Egy9GrrN9ztmgZFeSsI0v2"
"VeieHlCU1Zujpjir42Bp1ziy4BH9oj4YkPXtUe8djcfv8JbePdBNl4ynADme4XNtDjUhgYw+"
"86OsaixENAQIn6/6r/7q/6qurnY6nZivMJ6IJxKJoqJCvbOeUg8i2vDwsM1mW1tb29zcxDNt"
"QkgDE08E86m4dDrt9XoR+wDAMIzJycmDgwPMy4C5HjAUxmazud3uysrK8vJyu90ej8f39vYS"
"icT+/j5G7QAABlHPzc1hkjEhRDqdnpubQyem3W5Pp9Oomgkh0BBua2vDEJ+ZmZlIJIJGdygU"
"wrywbrd7c3MTDWTDMJaXl3t6egoKCmpqakZGRjAPGB6dxiYKCgrwThUc7OnTpx0OR1VVFV4X"
"JYRYWVk5efKkw+Fwu91XrlzB16PRaG9vb2lp6erqKp7mXl9fDwQCHo+nsrIST6rs7+83NTVh"
"xmyEV/5QKABq6m/LAAAgAElEQVRyJu9Z0kzSf7zzCpZ/9Ok28Cu5WaMbgIbF2aeQS+XTVDWR"
"49DIoWcJy3cG/VQvqsMtqpbMagx93GBo3xmqgJGr7awPjRPkoWAipRCG/EkoRNVlVwvqARxZ"
"SNZCjw2946qrulNPtxuF6okwqJu5nHkGHRDRSSGsiAdcj6H3nT9GRqWQ8RyI7hI7Ml/OXVXG"
"wyNgEbLr0L+QFGHfkP7UMKxlM6oD7LKhyvJ0q0kXgkwbI7sG/s0QDz308Ojo6KOPPnrmzJmB"
"gYGBgYETAycGBgYyuJsdhUKIg4ODxcVFm83GZz8w1yEmH3S5XBjmsr6+jsYyfjADGF6/h1GK"
"BQUFiKdFRUVlZWUVFRV5eXm4y4HmakVFhWmai4uLeLlzQUHBXXfdVVxczIYzRoDjVnUgEMA7"
"Trm5a9euYYc/9KEPcQaKg4ODc+fO4es333wzRgXhKeOXX34Zy99666180HBkZATvSj558uTt"
"t99+6623fvjDHz5x4gQeza6tre3o6MCS0Wj0tddeM02zs7OzsbERvxwbG5uenjZN8yMf+Qgn"
"ofjVr34FAC0tLT09PXxyUdFX4ym7EEKYgDsi8rkJ6nfg3+U3ln8yHmrfmQDCpD+MzKfCUpXI"
"rkn+YeZuAMCUpUxt185UDdHOo6XCHJUJ1cqRTeXqB/9N3+otmRoxs/phqcwUOmlyt5BZCz4Q"
"GSXNLHLmalnRLaNJ9W/WhGf8LbAW6cPK2WFLpbnrymzFFLknSGT8auboYHbDRJEczSuKZD3R"
"W9enMBdFzIzGdG5QZWVvM2v+53/+55w1CSFYdDLaC4VCGHGN9+Shs29/f99utxcVFeHucywW"
"i8fjvM8LAOFwGM8X42G+goICvBsAb2fGG+y2trZ2dnbQlC4tLcUrRsPh8ObmZldXl81mKy4u"
"Pnv27L/8y7/gKT0hxOTk5NjYWDAYFEJ84AMfePrpp/FIjGEYly5dCgQCTqfT5/P19PSgKofG"
"/uTkZEtLi9PpPHny5Kuvvmqaps1mu3r16okTJ7xeb1tbW0NDA17Id3h4+OKLL953332GYXzk"
"Ix9BTqN1yzg8PLztttvwQItpmq+//vrJkyeLi4s/+tGP/v3f/z1i4vPPP//YY49VVFScOXMG"
"EXNubm54eLinp+fs2bOYcYd9rIagaTPBFHTO2LI2ZukH7+FjWUWPeP67a36n13WlVSv8+3YX"
"MqLL31U/rPqErlaz8nb0x8jxm3rbqinTl9rbmV3UFL4jPtob7zQluVrL/cnhL/5d1ULGv7le"
"/t1qeS4t8YiuGDlI9U5KuCWC0Po8uxoyaY5yhchWcqjQhhDv0z83ql8zOicB0zT39vbW19dN"
"ymQDWrxLfn5+QUGBECKdTk9NTRUXFwMdOOEIaswY5nA4UDfE/WjMUbi/v49xhel02u12l5WV"
"bW5u4vm/nZ2d0dFRBA6v13vjjTdizWgFv/rqq3h0r7i4uLe3F/uTTqeTySQmZxVC3HTTTeiO"
"xP688sor+MuZM2cwbyN+Xn75ZSz/4Q9/2KQj2LOzs3zhPdPBpIM3lZWV/f39SKJkMvnaa68d"
"HBw0NDS0t7djsYWFBcwndsstt3Bb586dS6fTFRUVJ0+ezAjG1AlvCCFsOafTUuw9fI42E3/P"
"mt89wLyX5sgA+r26lLsWquKISt5V3TnftkzQuzPnf++G/wP1/z59+I9Unm1ev+O8GdmM/Q6t"
"G0eXMjL/JPw2fseofk/CW//8/9v77jgrqzP/5znTOwPTkKEMMjQBjUNXFAsawBgxiSaoGFfZ"
"xMRETVwTI64lKsSfZTWaxFUJq4vuRoMllmBHVwVsCCgDDEyhDFNgYHq95/fH9zzPe+6dAYYS"
"s2bv48fh3vee9/TzPU87z9EVi9DTJC6ELEeSIf/CYwa3JKv3jLUW5ggIhklJSTgpHBcXl5aW"
"hkgwCLIN9jAhISEnJwdOObAd4+qCHTt2oNwTTjghNzdXUaypqUmPEo8dOzYhIUHPRxcXF+/Z"
"s8dam5qaeuKJJ6pAXV1dDYVjQkLCxIkT9fmmTZuqq6uttUOGDIHYi/q/++675eXl27dvx4EZ"
"GHBqa2v37dvX0NAwfvx47Z/Vq1c3NzcD/hQ0IaSnpaVNnToVBVVVVa1Zs8Zae9ZZZ6kjZxgg"
"MhuIzNSDANFLYiJLzORd4hX+q5fsoPloDbtlFylO2/18ZSJ70LIOXptDerHbQya23HMBZn8l"
"91wlFSOtE8tUCdGbtw+fejMbeirz0Lte1QC9rBh3S3sQ5c3+mxI5b/aXxBsEfegp+A6p8yPz"
"Oki1SI6LaPyFtrY23KFMRMnJyS0tLczc1NRUVlaWkJAAxR8RMTNkUoQLw0V68L7OysrKyMgg"
"on379u3evbulpSUuLi47O7utrQ1+Np2dnYBR2D1wrYq1FtKxAtnatWvHjRuXmpoaExMzbty4"
"Dz74AK9Ya9esWTN9+nQimjJlChhGIrLWfvjhh4jdP378+A8++ADgbq1duXLl7NmzY2Jipk6d"
"imBf8fHxHR0d7777rh6RBuAyu2CLuCMFv3Z0dKxcufLMM88cNGjQsGHDSkpKEOh7w4YNo0eP"
"njZt2nvvvdfR0RETE/PWW2+NHTs2IyNj8uTJKjL7ew/5J99ku9NdTx73uM/58jQdVMjaj3Jc"
"v0e67PdYnun597BXTc9xaHp4Yz/84v5/claEbt1BkW8FhskIVX0398Gwinevh9oxTcS/HCH7"
"9iQJho1mwNQcrHMO0AEHZ68ixY/uySL3Y2Jf9j3gVGNmQ15f9pjgoFX0Os0bnG5qCiO1843J"
"xksbUGTnByJHN0WAuDb10Ahv9sgHIuAIhFN81ng21lp11QYEEBEufgJgIeACJOWUlBRjTHx8"
"fHp6ekZGBhwS6+vrYT7OycmJj48vLy/fu3cv7mlBbrjLFOweM/fv3x9mWTCJnZ2da9euheQ+"
"evRoGJ1RNz2ohzgUyrVVVlbiOubk5OQxY8YgH2vthg0bILYPGzYsOzsbEbaJCBEbNZkK1DU1"
"NY2NjRpZh5k//fRTxJtQuZuIVqxYQURJSUnKJNbW1uJgzPTp05VJ7GGeeN4E3nARjKj+K2Eq"
"HONZVntKsb+54Wcnip5wrU14HRUFPKQzYbl7gpPzSYlc1v5H40324P3gYq0elgcaSlJEt1zd"
"W93hhry/3ZZSWAERbaYw43FYBkYCVfVkQw96O8x4JhgdeVzMf9FDXeoROv1BEjM6h3VmWGK/"
"Mt1LkyzCcqSg573/A8T0jMM976CRFe9eNgVPSUc1HM9NWEleLkTcXaMR2eFMOqE9KGXWkHlG"
"H7U0NVt35jdy/NkDRNzBBP0dIsHgOe4MgNaPmUOhUF1dHX7Vm+NhmAYyQksYExMDYRlRYXJz"
"c9PS0srLy6urq5ubm9vb20MSlBsHYPTSAmYeO3asHosGkKFi8fHxw4YN03hi8J1EsvHjx1uP"
"Pv30UwCKyrzA1jVr1iDnqVOnZmZmQh2Jc3vt7e2wVsPTGwoEtR0BFjs6OuDhOHjwYMSwsdbu"
"2LGjpKSEmU866SQYo6y1b7/9digUSktLGzVqlABi2JBGbsomWPFGBrZH0DPkb9hCkt4Ec4MD"
"FCHXzyyzTudd2IToVif9nwAHEtnBZan4p+tpP7gW9laQElyzQ3hhp0wYH2nY4890gbgUxuNu"
"yGu+lzYcXzxc18Vp/I2DWIIEGcE97yijv6sFg2WCZewzXeFF6v0w4b+6sBMs7TZev3r19qKP"
"yd5GQc9IH3iuPT5QaHQOeTPIT/ZFr5uoh56LaHb3BFJc+DxATbw+6YaBFDbndHJoMFgtwog3"
"lQnPQYBYZnbYhJZZJrcdhkKhRYsWjRo9elhhYUVFBTPfd+89Tz75ZNA6JuYAd1TkDIVCcKxj"
"ZvBZbW1tuAQZyNLa2oobSiEvwwEb+sT4+Pi+ffsCQ2tra2tqapqamlJSUgoKCmpra3GrMtAH"
"cjcLIHZ1dVVWVsL7uqCgQON1E1FraysiV1trYdBQTNy0aROq0b9/f0SpwFeEAguFQtnZ2QMG"
"DFDW77PPPsOLwClUA4EU4R0JXAYbCDd15dQA95999hlqfvLJJ1sJfPvOO+8A/qDNBHe5fv36"
"UCgEg0/A7snU9XdjhUB/ZyefNTABNGBdkjAt3mwzxL7h2tvW3QPSLVjBMZij3hcBQbflgpc0"
"UiFPxHf7sQSv784X+CsmDOS9JR9E3g5izxoN3McchN0hv8PINZm068h39pNDiIF7pq5Tbz9g"
"KVBKlaF23cnkusFrWiS+UjifLYjl567/BqF7gi70T1KH7VLhUc3cyiePYTZee6SzMEcCRtcw"
"UzApTFA/16lkAi9IB8lG2Sxv0ir2skCTdw4vbLcP32P9njBhvWO8x347McElRiMq5YZMJqEx"
"Afy7k9+6UTDLFBW+UAq89557nn322YULF8JDkJmPHTZs6dKlwe6E3hLyOazq6mrYeZW9amho"
"UP5RzwvjLZwOxNk++GMTUUNDA9DQGHPsscfu3r17y5YtLS0t8M3GPVaqiwyFQvv27du8efP7"
"77+/evXq8vLy7OxsvVTPGFNeXg6gycnJSU5OVvF29+7dgGZmHj16tLYCoSIAQ8cff7ymb2ho"
"2Lp1KzPjJlL1tYQYTkQ4WqO3FGgd9BhiY2NjSUmJMQaxG0huYkHksZNPPpnkogW1dwsgBiuP"
"AvTy1gEZdzWIsCYyWyTeNVIAlBxn4u+HxMaEb5Caj84PN1/IQwFZB8GsCNvsnQgfdsCUZUF4"
"UWXdCxGIGbY4wvgWWdTS9KC1RuJB+FmrW7Rm4BDBW8zkMvTCl0kNwjDB7x9lQxREya3IboFv"
"9KMPcuR6TAEoiOsg+eo2Z/SgJTI3wFLFMe3x4IN0pTES/tZrQdiLsgFQ+M4k/l2ex4oxwuzL"
"8Oo4OL9vhd1gq/OueGDZIb0ZZjy48/o5XO1DEkTNhE078jk8aS+RMr4+9rpciQ2Rdie6SuSV"
"8J50O6phNk8+9dQ9995z/vnn4xwxsznhhBM2btxIQf2IPUQMSTAbSKMI6A97AiI4MLMiC3yS"
"8SQ5ORkrPzExMSkpiZmbm5thSLHWjho1ipnXrl2L2DbIHGgIdsxa29LSgrtZgIzr16+vqakB"
"gwZSjtUYM2jQIGZWaN66dSvqr1FwUMONGzcCm0aMGKE2E2stDqWEQiFEr0DNYRdCsB8YiFAu"
"wuVqeFp0F4zIxphJkyYpoK9atYqIsrOzR44cibK2bdtWVlam3WsC/sUwm1gMl6AlbKQYEUtk"
"mKx1rIxn4WQOTG5MBmY3MTjji2NqkM4QW01H8FiFNdgQk4FI4Dm4qn2P5aNXsqZga4gsG3YZ"
"Ov6X2JJ4mhsWu5511kH8rDuwNcSW2LC1VqehVM/xZUTWMpNl5GKJGC5hqI7Viknj3G/SRpZ0"
"1msdE5OxcCy30reGyVrXu+rky2yZjCXyzY/SXstSoHWjZLXTGGKX61xiZssYTxLbmiYmr6uZ"
"dSyZyFoMnDQKUGLdq0RkEP1OBlcbxsyW4cLt/OekHA7cCNzckq5hJms1OqYkICvus54fPssE"
"sEzGyuirFZc9a7DY9lkbzFJ91smB0wmYTXqqQOedYbYyh7VaYeRmD0uW7gOzzE1pva2trR0+"
"fLjcksGGKS4urqOjgw2WFRsZRTdHxUoAJmvz5s35+fkArAEDBsCuwtLLe/fu3bFjB54TUVJS"
"ktpAoG2sq6vr6OgYMmRIenr6qlWrcJu7agnxGVnhWKFKtR0dHTgpSESA8piYmNbW1ubmZng4"
"DhgwoLi4WKuKaKzMnJWVlZqaipv5iGj79u1w9ElOTh44cCCiXgPKW1tb4+Pj+/fvn5mZqSf5"
"srOzzzzzTNUbRFx70N7e/pe//AVcZGVlZU1NTVZWVlFR0WuvvYYWrVu37utf/3pKSspJJ52k"
"Ss/3339/woQJwdixDqBVgc5jg6DfVjaOhSXw9j1PK0gqKbIIDZ50i83eAZDumZ4EKSKMCWqi"
"27Zh47T3sgGDSQlYKdeUINagu53TeOFRHRei2zv+JRNwoBSoVl0yMsrOBEpHEp46ULQGnEeg"
"TCO54oUDPlG+Bmo+73oZ7VSSAIsqibk7jYXf8gxZAWev3KfYwSQPoI+8RsLby/uuHsaLlI60"
"ZAhvOMVeGFvq/jiZiV2wQY8/lFsgSPOB6KzuCkb6Qh8GvBxpN+uwS/c69wEVyFWLEExTyUT5"
"fCP1N0ZsGjowTqXnHe129oJgTioZ1rAVmH/ug5vcGCwnJ8jU1UH0g1sSsxk8eHDxF8UkIXGJ"
"eeXKlYWFhZ6Qoxt24AutTGJFRQVUacYYXACPOadi9Zo1axQyEL+gpaWlqqqqoqKiqqqqqamp"
"b9++gwYNWr9+fXl5eUtLC844+9cAAGt8CbqjowMBCsGdIb5ObGxsbGwsAllDLejqTy5iq9Z8"
"0KBBygl2dnaqXF9YWKg1b2trAzgaY2AY6erqqq+vr6ioQJyb1NTU9PT0zMzMrKys7OxsXAYw"
"ePBg9AAIp1CSk5OPO+445anh+jNs2DANELlp0yY1QBl2gi7sCPoxmJeyiJ0A4kQAmQdusMKm"
"oC8ys8wbVh2QK1altAAGvBD25KwaWKEG0MbyooAStmZj1LKgshRmOCZlACrkJB0Rtb0X/RPG"
"SMAOfXQdyUXRmPbCXBsD4Day6ESICzKT2KueJUbAWxY/GSNBGvUItnaahtZ27Rd1IwkQYkGS"
"QxfjnaaW++SMqlRJBTm5C84YaUCwgqUGTo0ioyeNDEbQSHtk1EXe9HKRLI27CFZ7QGzJstMQ"
"BfYcD0lY510wrbzv0kJRRcueGuyj7M1W1wukZZDjxlk6Q3YaV0M0PMzHx1sfOu2Y/PnrewaQ"
"zDIH7LL1QOVw+eWXL7hpwUcffkRENdW1y/785xtvvPHyyy8XDYmTwd1y9YRKUGdnp5osmPn4"
"44/ncEDcs2fPli1b8JYxJiEhobGxcc+ePXV1dU1NTR0dHVlZWVu3bt24cSO8DuHaAicbNSJL"
"Qwg8IxwekQZqO8VEGEmstX369PFGi2ABR2648FMriRDf1lpgGR4SEeR9Iho8eDCuSIVlGUCp"
"zVQdAkqBHQbVQzALFis2cv7444+RGKI0WgSHR8KSJFECEsVCnmUnThgIDsL5W7JuZKyIjyyS"
"lwg7eETk5CoVRkSQcP1r5XApi8AqogXKtIaILFsRi9jJnGFiJhKqbGbZlUqWnFTpnluH2pDz"
"rGGybKRJTrDxxDRAD+RE9oVHdo1i6BE81QBL1ZAdqgtxypATFiErGTJO6nbJjYi6okRAZpCT"
"pL8ZnSlyqVE9sJHusGTYaH+50QlGii1DISAthBgNgZDR6WREzSED5jYqiPJwMGcmsoaJrLEQ"
"HIMyoKcmNyHYyZuuQtJl6A8ReckJqew6GTPSKSR8adl1A1GgStCfVEfiCpVRMETWqNhPOiiu"
"N9G0QHpn6Wr8Y9wycIngc4Y5iRnkFB/6NuNYt0we1xRyIfTRUqc8kIyYf/CDH+7du3fOnPNa"
"W1vPOWd2QkLCNVdf8/3vX0ZkjWSs2pWkpKS8vDwc8tUV/vnnn48ePRrglZGRUVhYiDuhSJwE"
"16xZk5eXh7j/cXFxCDGNBMaYL774Yu/evXopVUdHB1AJ6IkYEDDCwtkFEJOQkIADM8BN/Qs2"
"EPIp7p+ycv1eXV0dPArBPLohtXbnzp34nJWVlZaWBiWmtRbAR0SIQdvV1QXl45YtW0aMGEFE"
"1dXVK1as0JtUzz///D59+gwaNCgxMREnc3Bj/bBhwwoKChB9loh2795dVlZWUFBwwgknvPLK"
"K75VioiMYRuowIyBWKPiLLPID+yLKIGgI6yNbM0qYTJzwFt4unO5IFPsgk6LJJsmGZIbTw2x"
"ylZGRD8iBm8GLTuEF8c3OU5BIFzzZxGGvIiecmeHq6YIRpBoSLhcJ+mxcrDOkuTa46QsFobP"
"aL9It4ErEmZIeDD9yXtB5Dqjb7hN0AR8hRGWz9VbjF2aDbHIghBAfQ9M6VXsf050h1TgeGbX"
"i+SEV+Wm0Fb3TcVOraQw3CKqG3aqaYdu7FyyMLfkNTU5G6+jlCGUmROULy1m1xNGaqXjrR2i"
"/j5E2jH+hReqn3EDG7CNQXlGy0SvGZV8SAtnln5kJ6RILzm5IphkTqhxvUCB+wEbZr7hhhu2"
"bNnyxuuvv/rqa5s3b/7FDb9wVkCZ5MohEpFKfyo1NzU1rVu3TlnC8ePH40SzImZbW9uqVasU"
"Q3HID1wSOK/29nYgHTSGSAb/lT59+uACKcVEOEv7YrLyieoBA+SC6UOVI7iUCsyjmj7Aw6IC"
"RJSfn69A2dDQsHv3blRb4+gQUXl5ObA4IyNj586d5eXl27Ztg30ck6CgoAC6BURLQ0vHjRun"
"OcP/MSUlRSPlaPcydj6ZaoYMxEQ31wQUBEs8NY4Kr06OEqHRFywUG0RFJfo2cujhvLFk/Ttx"
"TARAdldDKmCiUHFSM2roViHJzUcpn5z4K+BAvqBjZBGp9EUswrQggixgAUDjXB8FQBT/gtpC"
"VjKeQ1CwblyvGYeTirw+oDh0UHlZ6gaRGYgYuKBAGY+SArWtESmQdfdxY6y6U/cVA0jayeRG"
"RZACwOCcD4zuE4YMZDk3rwUm1QDsOdUE44fdRhBb9h4U5KDDKA642ciCDZ4+1esr3Vzc+vL2"
"W3L7oXSj+yGYoEbAkkl1n2pGNqKjkV/VMi4T2fhTj3Tnlpqrupy1C2VOsmz4RnKj5OSUovHj"
"J0wcn5aWpr5bqs0njwYNGgQ+SzWJ1lrc1A6pOSkpafLkydJABzo1NTWffPKJwihC3ajoTdKN"
"+KyxsBAIJyUlJSUlJTEx0Vqbk5MzadKkGTNmTJky5dhjj0W0REBhXFwckJE8ozCLT4wxBtI0"
"gBLaTCsWcMTyYub+/ftrna21ygvjFnlUvrW1taamxlobFxfXr18/xOYhIhzRgejd1dUFP8qS"
"khJwi4j0hdw2bNgAVak6igf965CCiGQ+Q+ssZgGBA3fdMrnFqkAks8itVId5LoUHLQIFqhTU"
"dcJqhCHZRCFeeHwCy0ySnZdF5eSeuTVijCxmlpnEgQwrqYUHMIbcVDfB4pcZzYIuomQ1bm2A"
"f0avyVJzM9gpGiUfFOXwSnR8we4RaO7c7DXGYapDQKdFFXxkb0kZ2Y3cxiB7gcNw4xSuwtmS"
"KOdkoQJJWcDbAxzpPXbMnlQHQ8PCmrleVR7bzRhSBCLZHo0gsnJiLLuOVi9AfkMUdCuTKD0F"
"mAO2CluEkdkKRJLtjUjnFqaLYRbEI7fXKwPodnLZmzAl2A2y6KOFpSN0nEEtg0mnwKdzQ/hP"
"t1m4muoaYq8/ZE/0DT5ui3VzI8wxGwFafGpra8NNcgC1goKCwYMH+5jIzBUVFSUlJUSE3OLi"
"4tLT0+G1p5cpw3MbH5KTk5EAnCBkZ8BKTExMv379xowZo5AHhhFZhbyjdaiDhm4kuecPxwcV"
"haFetNZmZ2f7OI6zfeAQ2dsYKioqkE9BQQGeh0IhhLSw1ubn58N+gr/QRebm5ubm5lq52aq4"
"uNhai5tYFMGJwBDqRCcxYMhsYVFUu6HCunb8mlsoAlPsmBIBHuUVWHHLaVrcZDAOHOWqMqNz"
"wbBI4UYkBrcNB/ysESyUqSp4yspyGGf6E+FRle4sLImwE7J2lZdlKUcwwmPgBM0Cy4Ezm2Ca"
"i2Fb94Ng3Rl/zWtvObZG5FvHXylH5PYE4gATnRyto+UQUfgOhRDjekjWM6lq0emPBGcwxIpu"
"yqG5Ze74YsEOt2sAfzAqRnBBhlQH3ARsFOtsUCGEZSqw2DGMsFi6VQS7nUB1wHwLAhmxNMmJ"
"PyNspoyasMbGwzknDYMddxydbsAyWEaF7zAe3rCILA6p3XuusWBRwdY7acjo1Ao2XzeuKomA"
"GZAdW8BdoQBlDxw4cODAgb6vMo4wl5aWWgkLNmnSJESKVYqPj4fHNaCEmXFtACTi+Ph4sHgo"
"Ij4+PjU1NT4+Hk9wNOVrX/uahqRl5h07dnR1dYE9hI90Z2dnWlqaRl6AOpLlthYfncEhApqN"
"MfX19YBRXFylvYzINwBKOK6jHzTuTkFBgXKOHR0dsM/gYj895a0XEowePVq3kHXr1qEmgHiF"
"WoeFws7JmMGuJSIWJoBuYaL8ooBJoWAGuRVvBM1YeEgPOt1O7CReJzu5wVf3bFkMrkC3ZElr"
"ohKXW72Aa5eARZ52kgpUb0YWL5CKhJcNGo2GGqe/FKgRrPBrJgvYvan8p+ChUxTJGhesDaQg"
"oyvcgT+5jUczDi61FE494Njld6kSOUx3XYk9R2+k0zxE8YG56DIRdsjNBSCEq7qvrXPwKdsN"
"+/pZ2RqxfBXmAslZdzzJjoUJk51V7bIOZYMZZYygl0NUB2cKUqips/IaYYdlsLxsSTpCeDDt"
"+WBjMKJMUBZfd3Tj9IJGGFR52fWDzgUH4ILNOpM0Uzc5iM899xv/7667Vq78oKOjPWDag83T"
"japyiECQadOmgZsDFsA6DKFYxdJTTz0Vx/WgDcQNAXl5eXCZZmakTExM7Nu3b79+/dLS0sDo"
"xcfHp6WlAYCAsH379sXdJoCkrq4uxNQBGkJX2NnZiUD/eKu1tRWAqN7UeoI4FAqBnVTCBfPW"
"WlxvoPCkFw/ExMRkZWWxcI56QjErKyspKQkaw87Ozq1bt2KKDR06VBnVrVu34gC4HljGQ+wN"
"48aN86Vm2bTdPIl1hwKthT2ZxbiKUbcwIpIhMS06S5uBLzBzECE6iKLMzgU6iDzMlnx/aNSC"
"rLXsPO3EVivWOfyRLPEPi78qM1kybuhciRp+2lgm+B8bNaKSFcOo1MPCgu1siczWWZJZnsOu"
"KoW5KkmbnG2cxYVarO7k2BLf4M2uN5xR3CVjOL5bIsPWkkFWanY21kIeJRK/ZLRV/IMt7Jau"
"9QbGTiZ2NlJUwrrlTM693ojtnBldYmVHglXVOBd0YqM9YmF8lwfsvlmL9yyzsWQNCiGj3vfi"
"Kw5rONqiRIsAACAASURBVH5lw6RWWmJxWDbuCABpJ5Bllz9bAx8GK3OC3bigxvIqBX0La6Ez"
"aaNnDGn5rgDxP0BVnHO3m8T6Aozy2A2s6wTSIwDS1Vo1cbhwvgjkRoQtW2PJOnsmTZw48fXX"
"X//Nb34TGxs7cdKkadOmnXLytKIJRQlxCbDMB9Z2IWttenp6UVHRypUrFSOAgCx6QCICtIFj"
"yszMxB3H2dnZjY2N27ZtS0lJgWcMEuP4Sk5OTktLS3t7u/J0kJetnPRgCb/4xRdfINAsqLOz"
"s62tTU0fzNzQ0ABsgrwcHx+vekMVpUl4XtwmCuCDaRu1am9vb2pqSktLI6K+ffvu2rULydra"
"2urq6jIzM5k5Ly9vy5YtyKqsrAw+QwUFBVaopaWlsrIyPz8/Ly8PV9cTUWdn5+bNm8eOHTtg"
"wABc1CVQxK5LidkS4iGyN5IOMAWikJTkngG3OBTT3L/AH4cLwCI38wI/DJ2kRIIxxh1hQB0w"
"dcXBwrh0cpiD3a/uRAHifTu9lvUA2UEUKxQ6oHMKXYUirEt41ZAsUgfkbuFp0ZjZ8KdgYuts"
"Igq3loyRMybiBiPuGBbuKq6RPupplxgrC8f1hOxO5E56EElnum7nAN6FY3Idxw7LiYMdCquY"
"3fblzo/4WOIgzW2BbiswRNawsQ4SSBxKRAXN7CBO3IOMAwKyOqLa+XKmJgAOthaFikeSw84A"
"sRykuFlmrTWyR7htybI/p4Jt3Jvs8D2SHNw8ICN+PiaY33psx6Vx/Kzb8B2Yk7iCBd1AcueG"
"3m/AQRWxY5DHHli65ZabyVJzS/PKlSvfeffd11977c477khISKitrdUJQp7Sn4RPPPHEE0tL"
"S3ft2sXMCQkJs2bNyszMDElYwLa2tvfee6+xsRHawLS0tOzs7Ly8vNbW1nXr1pWWlra0tOTl"
"5RUWFionCO2hGltIRFoSFgTU1dVVXFzc2NiIkFm4FAWhaAYOHGhdwB7W26PUSzExMVGt2CEJ"
"ooM0eqCFiFJSUuCsg+IaGhpwTxbgTxtYVVWFmwjz8vIQENsYs2fPnsbGxtTU1MzMzNTU1MbG"
"RhRRVlYGb/Bhw4Yhlq21dtOmTWPGjGFm5RwdllgZdrWhOVkI1Q37J5A1nOVRn+hxFoLQ5YSG"
"QLYkJz46vSBB0Uyiozaub1SDEKgLnaaG8Ao7eZsZ1hCRkJ2gKHp347dD7UAqQJJR/YATdalb"
"K9U07b3p5CkR37RrdG9xoqWo3QNztDRKtFKulqJy0t+9/ibRaLFDBWcPwG8oUlI4XaHKoi5L"
"I/op0SB635hViiN5S94NOscpuURrEswAl710siqVjfoBSI1lKoQ91a4X3WjQ+Sp1yrRg9Xkm"
"pzkR/WdYPsHrrvo6oR0yigOYbhvoCSLRRmrXG/3PdYtRoVigUTpfZ45oLnSyqzwD1kLrKNWW"
"lhlTX9+we/ee2pra6urquLi4oqIit6hEgsBy7ezsVKMqM8+YMQNeLzNnzsSFUNAqtra2rl69"
"evfu3bGxsbh2CuwhEW3atGnr1q3Nzc24qerdd98FugEQVf2nyKuEh7W1tZ9++um+ffuU7zPG"
"tLe3t7S05OTkpKWlqRS/Y8cOHw0BiIp6sPxiAvlHm4koPj6ePaqvr8ev6enpfq12796NBLiZ"
"Gj+FQiF4NcK0ounLysoQouLYY4/1RWl4oY8cOTLYbfyJyByrvc/iomv1+KUwTCo8+lwR6eZt"
"AxZOhCHhjyBZ4a8nQRMe69aMktkyOe9pS8TW+UGgIm6jtyS8h1U+ySUQhoWEw5WU6gbsGA3m"
"QNQVF1+jygLhHEWGdLlZgqOGuA44Z3ZwG86/3DlCG/ZuYfJ4IlQFMnLAxGo7AmZGBC4nRQpn"
"65rlmDk9HWxc610ebvG7OimwWidYO6Bw/tLk5G+ppixpl0BZafCV/ouO12Qr6xcgIGoBbYZ/"
"05L7DrZQFCjaBao50L4X5ouZ5YCx9oyoFEQ2cX3FFNmQIItg4iJDVglY/M/1zbAj4a5bReFg"
"oa4hwqPA/Vx0G4TZLnKLZddGJzD95Kc/eeeddyoqKiZOnHjKKaf87ve/nzRxYmJCglRDK09E"
"hEh/EydOxNeMjIwzzzwzNjYW19KD7Wpvb1+/fj04uJiYmLS0tPT09Pz8/JSUlLKysrVr16pz"
"HxG1t7eXlpaWl5dnZmbm5ORkZWUlJycrQqkZBCLqzp07cbcfMyckJOA2q46ODkSW9cPYNDc3"
"19XVqa8i/qampiofilv92HPKISK4XsPh0aGLta2trXgLV8RoERo+B/HEtEU7duwYNmyYMSY/"
"P/+LL74A5lZWVra2tuJoo+pem5qatm3bNnjw4IEDB0KJSRgxFbzYxmLDZIEdt49CwnGioeiL"
"SJaxqKqcHADdI8sZigAqVQSGmVWkVLekRbNGHIiGvobHiBRvrTvtASWQWyeGRMgRvHWClC5E"
"h4wOLvQBi+zHcgaFyLLo/jQTKNRY4MkEMpEU79CUrRE1WABjbMN7gtyaNg4DOBDmBaaNi6Rh"
"icQH0S1RB6HWErMRDaWqGEkQFX3soFF5FXIlolgHNz4aErOMhYjKQUgPUqxxIy+zRzJRwdG1"
"U049CWDqzuQwxgTHecjl6TbBAMOkKBKtnBsA0iZrKYr9rEpKycPreXI7nBtwJw9bIlEIsAip"
"TjHhthzLTlK2wY7qJqduI0Y2NtnPAoEb04ndZMfwsHnsscf69ev3L/9y3Vlnn130tRNjYmP1"
"+IvAeRhVVFTk5eUhWjURHXvssVY8rpm5s7Pziy++qK+vj4mJSU1NBQzl5+f369evtrZ2w4YN"
"tbW17e3tVmJoQ1wNhULV1dU1NTXMHBsbC69DABPwDpwUIm4BDeGpg0Mvzc3N48aN0+tJmXnb"
"tm3qnwhTtTEGQR+YuaOjAxyi+vrgWgISPhG1gkqxra0NLYXxRHnV+vp6fM3IyIBXEJJVV1cj"
"HzjZADQ7Ozt37do1ZMiQxMTE3Nxc6CKJCKJ0TEwMjkvLYgwYilgHQ9CnWN3TWfdg1pmJCQb1"
"vM58N4aqwHOnYESNJbpAEo2RMInCpznmiALkhJbQsACS5Eyi3tC1z8EKDpgG5U4UkgKEkJ9Z"
"WumYQ5I+oACWWTk3p/BkA+ZVlfPCoCo+irpJ2VcrJbHsJbIpOM5ZAMuwLGhhzgNElDNnFI5M"
"pFsDBxon5XApGEh8Mq5uunFRsAEo7JNyc8qb+xMCzJLwMHrYUDR6ROwQKYzZVgB3O6mRHdVq"
"M2U8XMVESnG8qPHaF7D27l1lYtmFDWL20kXsSE7nGYwTaXe78bYBC+5vJyK3OHx3XJ+8DgOb"
"FVwVvjKY/yoBMxF9/vnn76x4Z8U7K/74x+/V19efdNLUU0+dfuoppxSNH4+5Fgb9RKFQ6JNP"
"PtHo/3qGF/BUXFzc0NAAORp2jP79+/fv37+5uXnDhg2VlZWw/MIwrfo+NErRSqNFoESwVCkp"
"KbiyDsIyzMrNzc2NjY1Dhw4dPHgwoIeIcB8LpGkNDubDZUNDA9CQRcmYmJiIJpA4KpL4dcOd"
"kIgUNPEXYj5SZmZmAs2ZGYDIzDk5OUaOfhtjduzYMWTIEGYuKCiorKzEi6WlpaeccgqLGxAR"
"hdlE2BlVZF14PA1BYW2DCea0/kZAxgrUKFoGuBMo/5XvCIQ/GW9vCWhaX7p2IOA4qUB57kvD"
"XglqOpZFo2KVN7v0iLGo2jVbl7UsdlJ88xkO4aBFS+BnLWacoGkioKsc6bFAXjLtcP03gEQK"
"rKPOZkwOIpwtVAFewcyKpTpAGbBogGbbU6MVlnQTIi08aJS8b/yhIBKuvntTXJ8RadHGS+An"
"8thpYbykJykAThlRt9eRDWMcFeutNMYJH0QmvIZ+VVW0FYAkYfF8gQPiTpBOLe4KX1YDhPld"
"FtZIdFZh4bBhhYWXX3G5JVv8RfE999y9YMECHLQQ9A97zVrb2dm5atWqU045BadH8LCtrW3z"
"5s379u3TQLOJiYnZ2dmDBg2Ki4vbuHEj4uKkpaVNmDChpKRk69at8JT2tYRq+fV9Bjs6OmCk"
"BlunvGF9fX1TU9PQoUNHjBiBA86Azg0bNhiJ+wDesKurq0+fPmoPgfpPRebY2FjFI7SOiMCf"
"KqsI1hVZaXvb29sR5rZPnz41NTXIoampqa2tDRJ9ZmYmLoEhol27dgFYVbdIRJWVlYgf7s1V"
"Jk8y0HiIMhv83VUeWWvhKgbtjpvgakcOcCtsf/Rne5BZMHl99BVZS911hB/1VgZH5mjDEEgX"
"hD6NqIRUwfdokQkuCGTZn4wuG62nrIWAs1ShyO6nuAjk9zVqAZ6GczMBbARVZl+dikqFb19h"
"A+a2HiN7kSVLUDtwREmu2lpB9pBK9yRloQx5hZDjyf1KhJHTd4iwQDpogZ9M8G7EUAYaUseO"
"uj1OsIk8vlzLMjKK6sTgcW7S7dStumIK9mrus8w2EGG0GYFvguboD5H0nv9Fu812doY++uij"
"t99+e8WKFe+//35bW1tRUREcaEi2OfI8h8Hy1NfXr169eurUqYCVrq6ujRs31tXVJSYmQqqF"
"03VeXl5sbOyuXbsqKytbWlpSU1MnTZqUkJAwZsyYYcOGbdiwAYYF8sy+JICo5s0+ffrk5uZ2"
"dHTArIwIDvDOwX2kJFxbKBSqrKyEyUWVg7DSZGdnK+xWVVUp8gIQU1NTWRx6ALthU0eaj6K1"
"E1pbW+HPiNc1ZV1dXV5eXigU6tevHwAxFAopIOJ0oLoQVVZW6n2nqJI/82JV3cX+wicvibfj"
"eXgWaJlF5aLpLUfMdp0V4Qs9bMKELXs16cgLPeGrDSQtG5hNIlaaIp0PGfrAkujCpVnEPgKS"
"zvpuPEV4T/njGZHaCpfDFF4HG1bjyNbJ3kIew+bXRjm1sPyDvg1vLXvpgubLkPUMaEgnGmCt"
"gN+9YduHV22rP4Uz0QRVaZCkhymnPLG84HOWEa9pw3VTd+3zIN3v6QDeHT46k4f2VtC3wa6s"
"QlIwwzli9CKmto1kCcI+Z2Vltra2AgSvueaak08+GV4m5A+bvwbF07CmpmbNmjVFRUUdHR2I"
"WJOcnAxhOSEhITU1FRaSpqamXbt2NTY24i4RsFRElJycfOKJJ44ZM2br1q2lpaV79+4lIiuR"
"aRQZMzIy+vfvj+ck4b+IKCcnZ8CAAXrUD6xfXV1daWkp/BbxEMpHpES5HR0dVVVVeFFDh8HX"
"h+QqGF+K9w9Hs1pBiay16r2oBmh83bt3LxSI/fr1w+3PRNTY2NjS0pKUlITDObCihEKhbdu2"
"DRkyRF9XbRRmR6z0RDBJFFE8icGXs/xpofDVw/BHLjPe7xe3YiN5M+5ebkQWAuPBAvCb4pUT"
"Zvx0BeosVkTtseY+YHRj5bhbco/3lbUawT32/FpPzeu5MbIdkL8k/bE7QH7KobrO2z9/J5Xs"
"cdTCoCBsA/L7NTxJRHPwFkfiuawCTRbWAWLHi1R6dq+3vhxms414QUA7qII/AWTnCV8avhJJ"
"8/LQEIMdYVPTX4iI/vSnP/kg2DN5L3Z1dSlmbd++HW6AdXV10PGpSi49PT0+Ph7W3qampsTE"
"xJEjR8bHxyuggEWKi4sbNWrU6NGj9+7dixDTdXV1OKgHd+6srCyVW3EZS1paWkZGhp5Z1qz2"
"7NlTXFysOEhycSgRQXmHlODUFBDj4+MTExPVIwcXljq51RhjDHzLWTwlkUzVnXiYmJhovJB4"
"9fX1QG3cpqJUW1sLeTk3N7e0tBQPd+7c6UcA85V4RHJRfTCzvY3Qm4uRKzjYPQPAOOBy7AX1"
"tKbD4YciS+hhtRwgb8nGhD/zENVLxN1Z2ogeiPyxW50i4KNbPkfUYREo6S/nXryllexpxfqS"
"rI2AxDD4CN4P68uIfaynvo+oRbcNbH/4HpZhT/138Af7o+6jKX0QbgMOr2pP5ckGyN1+ISKi"
"mTNnek8PMgmgYoOciK979uyBNzVsuIAYDU7T2NjY2NgYGxurDtiQr1luJSUifE5PT09PTx8x"
"YgQzt7a2NjU1oSBYNnA+L8IhxmfWKisry8rKoDpkudQFAbiGDx8OIEYTtm/frgpK+OXA4xq/"
"IuA2SgEg6sV+yFN/JSIcWEQ+eOjbW4wx6enpWi50lwDEnJwcveMFpwB9QLTehFNAjJjZB6Ge"
"1voRoeHBM+n9aj/ERD28tj/m41ALO/CbR6XDjmJePVSsB73oAftmP/tnt3wPrT6H+ftRKqc3"
"G82h5HeoiQE0wBQwVsnJycnJybB1EBGswEjW2dkZGxs7YMAA/ziKtXbLli379u0rKCjIzMy0"
"4QpKZAsY8kVUxSyUHvKuMSkvL6+vr/fNIBpgMS0tbciQIVZCTjQ1NdXV1fmX5BljENABFduz"
"Zw97BmgcswEOqrlZ6+nbWxQNiaipqQk/+bpFZA7Djh6LZuampiZ48LgBiFAv+b0TpShF6X8V"
"dXZ26rld48WpVp4LP+GJMnF+uAQigoiNlCkpKb6NdX8UobzzRSgcj4tAVTV96H2nSNPW1gZH"
"a/LkYoTbQXpo+rRQmHTwaygUQhhEfRdXSxNRa2sr7njBT0lJSX379gVTCQ8bpE9OTsZpv9bW"
"1traWn2ek5OTmpoKNjOy4VFAjFKU/i4ED2Q1euyPWlpafvOb37z44ot6uOIwKD09/eKLL/7x"
"j3+szoZE9PLLL99333241/Twso2Lizv55JNvv/12X3m3efPmm2++ed26deo7fajEzEOHDl2w"
"YMHkyZP1YWdn50MPPfTkk0/ibN/hUVpa2ve+972rr77a74cwslGKUpS+XFq8eDEuCSGikSNH"
"Ll68+ACJv/GNbxz2+o+gu+66S7N96qmn9gsKh0jTpk3TC/aqqqry8vKOSrYpKSn+TQnz588/"
"KtkS0a9//ev99XYUEKMUpS+VHn300YSEhOuvv/6ll1566aWXrr/++oSEhEceeaTHxBs3boy0"
"+B0BZWVl4RiftbaoqOhoZUtEb731FrK95557jmK28+bNQ7bbt2+P0AMcCWVkZOiJ6QiKPVpl"
"RClKUeoN3XXXXQ8++OAVV1yBr7NmzRo+fPhdd92lT3xCBGkiSk9PX7JkyREW7ev+9Hjvrbfe"
"Onbs2CPMeejQofigN+rNmDHjyiuvPLzcVqxYcf/99xOR+sqkpKQ8/fTTB37rgQceePvtt4no"
"W9/61kUXXXSAlBE6UJ+igBilKH2pVFpaetZZZ/lPzjrrrINiR3x8/Jw5c/4W9TnppJPOOOOM"
"o5WbAs2QIUMOu8KqLdXc+vTpc9DcnnvuOXwYPnz4YRd9dJQIUYpSlHpJubm5n332mf/ks88+"
"y8nJ+XvVJ0o+RTnEKEXpS6VLL7308ssvv+eee0499VQiWrFixXXXXdejvEyeDXrv3r2TJk06"
"7ELT0tLmzp37T//0T4edw6HSc889F4H7vSe4yBBRbzyEji5FATFKUfpS6ZZbbqmrq7v88stx"
"+XpcXNw///M/33rrrT0mHjt27DHHHLNz587Ozk7cO3rY9MYbb9TX119zzTVHkknvqaamRgPS"
"HDZNnTr1qFSm9/TVEJmZ+Zxzzvl71yJKUToKFBsb+9BDD9XU1KxcuXLlypW1tbUPPvggXJG7"
"U2Ji4pIlS3Dp0pHTnXfeCRT+StBpp512/fXXf8mFRjnEgO68887Vq1d/+OGHO3funD179osv"
"vni03mpra1u4cOHjjz++Y8eOAQMGzJs371e/+tWXLw5E6X8PZWRk9FIEnjFjxubNm1esWKGX"
"qxwqNTQ0XHXVVURUU1NTX1+PEPx/a5o+ffpll112SK98/PHHDzzwABEVFhYuWbJk8uTJR8tT"
"svf01QDElpaWo+iFtD+68cYbc3JyJk6cqK4DR+utuXPnPvvss/Pnz58wYcKqVatuu+229evX"
"P/PMM0ej1lH6x6fc3NwLLrjgsF+vra0FIJJnt/1bU2Fh4bx58w7plQsvvPC5556rqKgoKSlJ"
"Skr68tGQ/r6AWF9f78c1OwDhTPvfmrZu3VpQUEDdg98c2Vt//etfly1bdtNNN912221EdMUV"
"V2RnZy9cuPDVV1+NcL+IUpT+L1NCQsIvf/nLH/3oR9baX//618uWLfvy69ArQHzllVdmzZp1"
"//33//SnP/WfT5kypbS0dPv27dCAtLa23nXXXU8++WRpaWlqaurpp59+5513FhYWanpmnj17"
"9i233HLTTTd98MEH+/btw37V0tJy++23/+lPfyovL2fmAQMGTJgw4Xe/+53y9nhRpdGampqb"
"b775L3/5S1VVVW5u7rnnnnvbbbf5ggDS33zzzT//+c8/+uijjIyM+fPn33rrrQdGOuDaodJB"
"31q6dKkx5uqrr9Yn11577aJFi/7zP/8zCoj/l6k3Z5nLysruvPPOIyzIv8hUl4CKXPfee+9/"
"//d/H2ERl1122ZQpU44wE+SzaNGiioqK559/fs2aNSeccMKR53lo1JvDRp2dnXl5eUVFRf7D"
"TZs2EdG1116Lr21tbdOmTUtOTr766qsXL158xx139O/fv2/fvlu3btVXiGj48OGZmZlXXXXV"
"4sWLb7vtNjyfO3cuEV144YUPP/zw4sWLb7755qlTpxYXF/svzp49G5/37t1bWFgYFxf3k5/8"
"5LHHHrvqqqtiY2NHjhypl3Ih/ciRIwcNGnTrrbc+8sgjJ598MhH927/9W28aG1Fc72l/b40Y"
"MWLEiBERDwsLC0eOHHmoRUTpH4MWL16sVwOPHDny0Ucf3V/Kffv2HS2LChEVFBTooeOj6ObN"
"zJ988gmy/dnPfoaH8+fPP7zOeeihh5DDt7/97UN6USX0G2644fCKtr0/y3zdddcR0eeff65P"
"FixYQERr1qzB17vvvpuZ3333XU2wZcuWxMREPY1oRXnx9NNPR2SelJQ0ZcqUA9XSw5pf/epX"
"RPTHP/5Rf33kkUeIaMGCBX56Zl69ejW+NjQ0ZGRkHH/88b1sLB1VQASzHPHwtNNO00u+o/R/"
"in7729/6Z5l/8YtfJCYm3n///ftL//vf//6oHGdOTU1dvny5ZltcXJyVlXXk2ZLHFdmjAYgt"
"LS24czUmJmbt2rW9f/GoAGJvw3+tX79+7Nixv/jFLxYtWkRE1tqhQ4emp6er72VRUVFXV9fr"
"r7/uv3XuueeWlZWptYGZhwwZoucTlQYOHNjS0vLKK69MmDChx9J9kXn06NG4QltnSSgUOuaY"
"Y7KystavX6/pTzjhhE8//VRzOO2001atWoV45QelCAm9l7S/t2JiYs4+++yXX37Zfzhr1qxX"
"X331sOMjRemrS4MHD77pppt8T+zFixffcsstFRUV+3vlf/7nf1555ZXDtjITUb9+/ebOnevf"
"JUJEO3bsWLp0aVVV1WFnm5CQcOaZZ55++un65IUXXlixYgURTZ48+Tvf+c7hZfvSSy+9/vrr"
"XV1d06dPP//883v51lNPPfXRRx8R0RlnnDFr1qzDK/oQot2ceOKJ+fn5YLlxiPruu+/WX4N7"
"TsNJ41YCec8666zuOS9btgxXTOTn519wwQWPPPJIQ0ODn4A85isxMfGMM86IyOGMM87Qa62R"
"ftasWX6C2bNn976xFOUQo/Q3o4SEhPLycv9JRUUF7iDumd57z1ZXHyjHxka7aJF9442jVMFu"
"1NFhq6rc5+pq+957R5rhF1/Y22+3ixbZ0tIjzapHeu654POOHYf06iEYtufNm7d9+/Y333yT"
"iB5//PGYmBg/pEQoFCoqKnqtGy1fvtzPpMcotXPmzNm2bduTTz553nnnFRcXz58/f/jw4Rs3"
"bux93brTUQyadIQ0YMCAHTt2RDzcvn37gAED/i71idLfl0488cSIM21r1649kPXgySepm1AV"
"Rn/+M9XV0de+dpQq2I0eeYT++tejltuuXXTllTRrFp16Kn33u1RXd9Ry7k4NDfT97x/aK73H"
"zurq6ri4uEsuuaS5uTk9PX3mzJn+r2PGjMFtpwcg6h3n9fzzzxPRFVdc0eOLo0eP7t+/P65Q"
"AHV1deXm5h533HEHKOjvyCFefPHFxpja2lp9Ul1dzcwXX3zxoRYRpX8Aeu2114YNG/b444+X"
"l5dXVFQ88cQThYWFy5cvrxGKfOHHP7arVrnPL7xgL73Uzp1rTz/d7tljrbXNzXbCBDthgi0r"
"s9baUMjecIM97zw7Z47VKffoo/bCC61q+m691f7oR/bss+1119nvf99OnWo3bHA/1dbaOXPs"
"t75lZ8+2bW3u4Ukn2UmTLG68W7fOLlzonnd12R//2M6ZYy+80KpId/fd9sor7be+ZS+4wHqL"
"NKD16+3ppzued80au3u3e752rf3GN+w3v2l94+ezz9pZs+yMGfbTT92ThQstosZee23At86b"
"Z88/3774ovt60UXuw8MP29xcxzvX1FhASlWV/eEPe6iYtfaQOMTs7OyZM2cuW7Zs6dKl9fX1"
"l156qf/rRRddVF5e/thjj0W8pVcfHID27Nnjf4X9vrGxscfE5513XmVl5RNPPKFPlixZUlVV"
"9TcKjhRBxcXF27dvP6RX5s6dGwqFEN8NBHv3gUO2RekflWbMmFFSUjJv3rzBgwcPGjTokksu"
"2bx589lnn50tdKCX6+ooL4+WLqVJk+i994iIkpJo/nyaP58GDyYi+uwzKi+nZ5+l736XHnjA"
"vVVdTdOn0733uq/bt9PMmfT88/TMM/Tv/07z55OKcXV19POf05Il1NFBZWXu4fe+Rz/6EeFw"
"YXMz6fx/800yhpYto1NOIY3VuHMnTZtGzzxDjY3U40o57ji6+GL65jdp0iR65x3KzHTPb7yR"
"bruNHnqIXn6ZKiuJiEIhWrSIli2j3/6WSkqCysMSUFZGegzxo4/oP/6DZs92XzdvDmo+ZgxB"
"xZmVReXlVFpKf/4zjRmzvw4+NMfsSy+99IUXXvj5z3+ekZHxzW9+0//pZz/72fLly+fPn798"
"+fLTTz89ISGhtLR0+fLl48aNgxX4AJSfnz9z5swpU6ZkZ2fX1tYuWbKEmffn5n799dc//fTT"
"KlPL9QAAA0JJREFU8+fP/+STT8aNG7dmzZo//OEPI0aMOPJjj7fffrt+Likp0a+wp4NGjRoV"
"YTk56FszZ84877zzbr/99urqapxUefTRR+fMmfP1r3/9CCscpa8ivfXWW0f0/sCBRERpadTj"
"qeSKCoJDz6hRYXJubm5kJgkJlJNDcXGUlETKkbS30y23UP/+VFVFcuPdfqmigkaNcmW98krw"
"PD//QDXcuJHOPpsuu4xqa+kHP6CRI2nGDCKi0lJ6/HFipjFjCMbGzk6KjaWEBBoxguTGhYB8"
"a3BKCh34nmvQxRfTH/9I779PS5fuL8mhAeI555zTt2/fPXv2XHHFFRGnR+Lj41999dX7779/"
"6dKlL774YldXV35+/pQpU3oTceiaa655880377jjDpxdmTBhwn333XfmmWf2mDgjI+O99977"
"13/916effvqhhx7Kzc2F0/WRu2vddNNN+nnjxo361QfEw3vrv/7rv+64447HH3988eLFxxxz"
"zIIFC2688cYjrG2UvqI0ffr0v2Huo0bRH/5AoRC98w6NHn3Irz/zDM2fT+eeS5MnU1eXe8jc"
"Mzgedxzdfz/98IeRZR1Yfb95Mz3zDD3yCPXrR1lZpH7pw4fTt79NkyfTwoXUpw8RUVwcWUu7"
"d1NNDT34ID34IBFRcjJVVlJLC23Y0KsSfdycM4eKiqiwMHJ7CE8fpShF6X8xLV5sS0rc5xUr"
"7F//aq21zz1nxc3WvvFGmIn5scfsnDn26qttS4t74ie21j78sFM43nijtda+/36gfdu82Z53"
"nr38cnvnnYHa7tNP7ezZtq7OWmtLSqx/JdZ999k5c+wvf+k0jNbaxx93Gsn77w90fBH08MP2"
"/PPtt79tH344eLhtm73kEnv++WEPP/zQfuc79rvftZs3uyebNtk5c+wPf2h/9atA/4iGKOnX"
"zk578cX2+eeDny66yD7xRM+1stb23g8xSlGKUpS+2lRcTFdeSS++SCkp+0vy1YiHGKUoRSlK"
"R0off0wLFx4ADSl6UX2UohSlKClFOcQoRSlKUXIUBcQoRSlKUXIUBcQoRSlKUXIUBcQoRSlK"
"UXIUBcQoRSlKUXIUBcQoRSlKUXIUBcQoRSlKUXL0/wF7Mt/ojArSagAAAABJRU5ErkJggg==")
getSplashData = splash.GetData
getSplashImage = splash.GetImage
getSplashBitmap = splash.GetBitmap
def getSCRTData():
return zlib.decompress(
'x\xda4\x9b\t4T\xff\xfb\xc7\x91\xec\xd9\x9a\x98\x18\xbb\x90&&\x89\xb1\x84d\
\x99l!i,\xd9\x92\x90\xec\xd9\xb2/\xd9\xf7\x10\x92-\xc9W\xb2\x87\xec[H\x12\
\x12\xb2\x9b\xd0d\xdfw\xfeO\xbfs\xfesN\xd1\x999\xf7\xde\xcf\xf3y\x9e\xf7\xfb\
\xf5\xbes\x0b\xd7\xd6R=C\xc3FCBBr\xe6\x16NI\x17~&\xff\xfbCE\x06\x7f\x97?\x98\
\xab\x84\x1f\xa7\\\x14o\xe9QQQ\x8dN\xdf\x9e"!\xe1&\xb9\xa5tC\xcf\xe3\xd5X\
\xfd=\x97\xab#O%~\x8dwJ%v\x92\xc7\x875\x91\'8\xbcN\xdd\xa9\x14g\xeb\x8dR\xe7\
LZt\x16R\xff\xb6\x1d-\xba\xf4\xec@\xb4\xdax\xd6y;\xa4\x1c\xad~\xa1\xebC\x82\
\xcaOrM\x0f\xbe\xab82~g~\xb2\x1b\xbf\xa3\x96]\xd6\xa8\x9eya7|\x8e\xbfI\xb9e\
\xfc7\xc0\x8a\x8e\xb3s]\x99x\\\\9\xe4\xf5\xb3\\\x7f\x00\xd5\xa1srr\x1d\x1b\
\x93k\xb3\xef\xb7_{\xe2\xb0\xbc\xee\xb7\xb0\x9f\xf9\x8co\xees\xc1\xc9\x01\
\xedQ\x15\xc3\xf2\xc9\xea\xae\xa8\xc3Q\xa3\xf4\xf4\xe7S~\xdb\x15\'=\xa6\xc7\
\xbdy\xeb\xf2\xc2\xf2\xc7[\xf2;*\x9b~)f\xc7\xf7.\xad\x9d\xac\xd1\x1ce\x9a\
\xfc:\x99\xe4<\x89\xbe\xbd\x13N\xf5\xac~bp\xf0\xdc\xc9\xa7\xc3Q\x0c]k\xd9\
\xe2\xfc\x17\xa3\xc2\xc1/\xb3\xce\x7f\x1b\r\xa3r\xfb\xde\xe9\x16L\xa5\xedv\
\xc9gz\xb3D\xb6\x0b$R\x9f\\\xa8Zp\xab\x8dV!|\xbe\xbcj\x90?0\xbd\x8ct\xac\x1a\
}"\x91.\xf9\xe1b\x92Z\xb4\n_\x8d\xe81\x16E?\xba$q\xed\x9e\x8c\xb3\xa8\xac\
\xc0\xde]~f\x83"}\xefR\xc3"\xb7\xda\xfb\xc6\x01om\x8a\xf4\x0b\x07m\xd7\xac\
\xdb-\xe2\xc7\xf4r\xfb\xfely\xd1\xd5\xea\x16\xc4\tV\x8e.)f\xc7\x11\xaa\xed$\
\xba\xcb\xd0\xc7\xd5\xd5\xd5B\xf18|\xf2e\x96HK\xc3\xf7\xfa\x974\x07\x8e|3qJp\
=\x83\xb6Q4\xf3T\xe4dc:8\xab\xdf\x9b\x9b\x9b\xa6\'[\xa6\xe7\x9f\xf3\xdc\xb44\
`\x89m\xc3D\xe9\x17>\xa9\x1a\x95|\x83\x1b\xd8\xb4\x93x\x80T)\xd0c\x8bV\x89l\
\xb7\x90H\xef\xb1*W\xc9\xe5\x91\xcc\x12\x8a0G~\x8d\xc7\xf1K\xcb\xc8D\xaa\xe4\
vY:\x8b\xa5N\x99\xfcv\xfb}/\xbd\xe7cyy\xf9\xc7\x8f\x02,\x91k\xef\xcaG\xde\
\xeb\t\xfb\xac6S\xa6j<\x97W|nn)62^\xeb\xee0\xd9p9I\xedbD\x9a\x86@\x9f\xf5\
\xe6\xb8\xfb\xe4\xd2\xce\x01V\x93\x9b\x91\x8a<\x10\xc7\xbf\xfe\xebqq\xd7\xac\
s\x0c\x01\x13\xeb\xd0x\xe4&\x97\x99\xa9\x95?@t\x95H\xd7\xcc\xbf\x83\xb3\x17K\
\xed\x9c]\xb7\xadT\xa38\x99j\x0e\xaa\x9a\xf0RG\xb3\xa8\xf3\'\xf8\xed\x86\xfa\
a\xa5\xa4\x9a\x8bf\x9d\xf7\x8a\x8f\xff\x16\xdb\xdb\xd9\x85)\xe5x\xc8\x1e\x17\
9\xf8\xd0\x8a\xc5\xa9\xe5\x99\x9e\xec\x0f9\x8c\x7f\xba\xc0\x129\xf9l\xa5\x9e\
A\xfe0\xce\x1c\xf9\xdc\\__\xdf\xf7\xa0\xca\xcfH$%\xa5{\xb6\xf1h\xb4\xb1\x9dn\
\xd8\xee\xe3\xe4n\xf4\xe4w\xdb\xca.\xcbu\xf6\xc9]\x8a\xee2\xb2\x00\xb3\x07\
\x0f\x08\x97L\xf7"\x14\xb8w\'v\x1a\xdc\x1d\x1d\xfd\xf7\xe0x\x8d\xf6\xe5#\x98\
\x13\x8bRdah\x8eP3\xd7Q\x1f\xd7\xf4\x9dR\xbc\xac\x9c\xdc\xdc\\\x07\x14\xcd\
\x80E,\xb1{6NH@\x8f\x9b\xd1LC\\\xf3\x01\rEP\xf3\xe0\xe0]\x91\x14\xa8\x97\xa8\
I\x1dVZ\xfa\xcb\xfc\xc6\x92\x9b\x91\x10\xe2\x8b\xa5X\xd9\xe1\xacs\xcd\xb8\
\xfd\xe4\xc4\x84/1/\x05EO\xa9\xac\xb3\xe5\xe5#\x91>M^\xb6-\x93\xa1%\xcb\xe9\
\xb76\xa3\\8\xd8\xbf\xb0\xe5\\\xf3\xca\x1c\x99\xdbW]_\xbf\xe4VkT\xa4\x9f\xdb\
\xd7\xe4\x81\x8dI\xd7\xca\x1f\xb1\xdb\xfe\xf3\'\xec-\xb1\xbd]9@a\xcaU\xba\
\xaa\xaaJ0\xf1V)\x9e\xbf\xb0D\xb7\x00\xc5\xc9)\x9c\xf4\x05\xdeJ{\xbb\xb1w8=\
\xcd\xb3\x1c9\x1f\x88\x10L\x0c\xef\xb3><\x9a<\xf1\tP\x88\xd3\x10p\x8a\xb4\
\x93`\x8fV\xf1\x91\xcbl6\xab\xf69\x90\xd0\x1c\x98\x9f\x9f\xdf\xfdr||l)\xb6vp\
\xe4\xdb8IXCqpD\xab\xe4\x8e\xd8\xb9yx\xe0t~|\xf9\xa2\xc6\xcf\x1c\xaf\x96\x97\
\x9f\x9f\x8f\xee6\xb9\x7f\x9f\x83\x83\xe34\x19il\x07a\xe7`gg\xa7\x83\xe0T<$#\
\'\xb7\xbd\xe7-\x97\xe9\xe0\xe9)\xc5\x16\r{i[i(\x84\xd0\xbfwORBB:CK.\x93+Z\
\xa5\xcf\xfa\x87U\xf9\x8d\xc6R7\x87\x08Lf/\x17\n5uk\x15\xa3\x14!\x9dA\xef\
\x12@\xb4BR\xa8\xa7?\x1c\x99v\xc4\xd11\xc9\x95\xa0e\x90tp\x04\xde\xf8\xe1\
\xd1\xd1\xd1\xc9IQ\x96\xc8\xa0f\x05\xe1$\xb5<\xdd\x9c\xf3\xb4\xb4\xb4\xec\
\xec\xec\xa2\x8d\x87e}\xd6\xa8\xd8\x8eh\x95SU\xdc\x02Lb\xc3\xe3\xe3\xd0\xff\
\xf0\x19(\x95\xdb\x13;;\x8f\xfa\t\xb7Z\x9f\xa7O\x95\xc9\xe59c\xcf\x86+\xa1Y\
\x06\xac\xc5\xe1R\xd0,\x07\xde\rf\x18|\xf1P\xf8\xe54\xad|X{JJ\x8aD\xfa\xd2\
\x1a\xa1#\xb7/\xec\xcfu\xcd\x81\xc4.KYN\x86\xd0V\xd3\xfd<S\xb1\xf4\x9e\x97\
\x03x\xf4\xcf\x95\x9d\xd2\xff^\xcb\xfb}\x18Zx"a\xc4\xcd\x8874\x8cP\xc9%\xac\
\xbd$T\xeb\t\'\xf5\xce\xdf\xcc\x8e\xeb\xb1\x1a\x9d\x98\x88i\xb7(\xd2\x8f\xcc\
yg)\x06c\x1c\xd4\x9c5\xb9R5\xba\x14\x1a\x1a\x8a_888PWW\xd7\xcaw\xf1\xf19Z\\|\
?\xbaD\x1f\xda\x9a\x9b\xcbW;\xbe\xbc\xe5u\xe4{B$\x12\x1d\xb1u\r\rZy\xba\x01\
\nfO\x9e\x9c\xb9\x07{\xfd\xe8\xd1#\x81D\x1b\'\'\x1d!D\xcb_o\xb5\xe0{\xf7\xee\
%\xa9\xb5\x9ba\xfe\xb8\xc2:\xf2\x07m\xaf\xb1E+f\x7f_\xd8\xa2\t!{\xfeX\x9c\
\x8d\xe9\xac\xda\x80k\xd5(\xec\x9c\xf7\xb4\x8f\xfe+\x18\x7f\xc3\xa2\xcf\x84\
\xb5G\xbd\xaf\x95s\xfb\n\x0b/\x05(\x90x\x9a]3(\xfaYTT4Z\xe5P8\xf8\x17\xc4\
\x85\xe8Z}\x9b\x0b\x0eLII\xd9e\xa9+\x90H\xdc\xf2\xa2\x12c\x0e\x87\xeaY\x95S\
\xdfP\xbcH\x19\xd4L$\xdaH\xa4\xdf\xc7\xe3\xb5\x0b\xf4l+\xbd\\\\\\\xcc\xcdI\
\x0b\x91U\x0b[^0\xef\x04,Z\t\xc7\xf4kle\xe7:g/\xbbC]\x00\xe5\x8f\x85-\xf7\
\xe5\xb1\x02\xfd\xc2\xe1<\x81\xc4\xae\xe9i\xf3\xfc\x01\x03\x03\x03\x90\xb4*P\
\x84\xeajk\x89tt\xf7O\xdb\xca\xd6\xd6\x9b\x8d\x93\x0e\x12\xe9+\xe3\xb5\xf7D`\
2\xc5:\x9c\xf2t\x89\x87\x87\xb2\x08\x9a\x1a\xe3\x0f\x8eXoWWq$\x9dJn_ss@\xb3\
\x19\xde\xc4dblL#O7Z\xe5\x81,\'\x17\x07G\xaa\x86@f/+}h)\xd4\xe2\xdb\xde\xde\
\x1e\xf4(\x16\x95\xdce\xa9}40p\'Z\xa5\xbc\xa2\x82?\x1e\xe7\xcbK[Vu&\xb4U+\
\xffjz\x0f9\x19i)\xe6%\x87\x9dU\xf9\x88\x9dX\xea\xe6\xc1Q\xb3\xd9w\xa2\xabW\
\xbd\x89\\f\xf1\xd0B\x8f\xd5\xc6\xfe>\x9bD\xb2\x9d^I\x85%\x1f\xbfnG1L\xd2\
\xfc\xc6i\xb2\x80\xd9\xd9\x87\xd9\xda8-3\x03\x90E9\xcfu~ho\x81\xb3\xc7\x14\
\xe1\xbd\xd0\x88\x0c\x9c\xb2T!\x8a\x96~\x9a\xfc\xcc\xf2\x87\xd3\x0c\xb4!d\
\xcb!\xd7\xf1B\x08\x15\xdex\xd5X\xce\x8b\x08\x9a\xb4\xb7\xa1\xad\xd3\x0c~\
\xbb\xadGG>\xd9\xda\xe0\x0f\x8bn2\xad\x98a\xb4P\x04\x1b\xcc\xcf\xda\xee\xfe\
\xfe\xa7\xa1\x85\xa3\x89\x95\xa3\x89\xf5u\xb1v\x97\x10l\xccA_\xac\xef&(\xdac\
\xe9\x0c\x0b\x8c}\xd5\x81\xa4\xdf\x86\xe4\x19\x8aVk\xf1\xfd\x83\xc6\x93\x83\
\x8eX\xae\xd6\x0fB\x88\x08\xd3\x83JS#S\xd3\xcc\x1e\xab\x93m\x87\x93\xed\xa3\
\x15?\xae\x8f_\xcc0\x87\xdfV\x0e\xa1\x07\x0b\x86\x16\xfc\x8e\xe7\xfdL\xd0,\
\xe7\x9f3\x85+94\xec\xcd\x1d\x1d\xec\xa8\xe6\xf6M\x1e\x8dO\xc2\xf4R\x93\x93\
\x95\xe2\x0f\x7f\x1a9\x8c\xd7?\x83\x06=^\x9a<~Rc\xccBE\xeeU\x7f\xfc\xd8\xf4\
\xe0\xd7\xcc\xcc\x0c\xa1#\xb6\xf1`|\xa5\x0c}\tA\xe3\xb7\x97\xe2\xb7\xd7\x97\
\xab\x1a\xf7M\xd0\x10\x9b\xa1en\x14F?}j\xf2\xe29\x9a\x10\xe6p\xa5<\xddfO8\
\x1e\xcc\xe9\xce\xca\xa4\xdf\xee\xaa9X\x98\xa1!H\x18(*\x16\xb5}\xe0\x83T\x99\
\tL8\x9f-d+\xce\x96\xd4=k-\xfe\xa9\xb1q\xd2Z\\\x897^\xbf\xb0=\xba\xc2p\xe9\
\xef\xdf\xbfK\xa3U\xe7h\xdaA6\x8b\x87Z\xab\xcc\xfaA\x84\xd0,2\xd7\xaf\'\xe1\
\xf8Ur\xe3s\x84\xeaM\x1a\xc1lJJ\xb4av\x8f\xe4>\x7f-\xb3+\xd0\x93\xc6b\xe3p\
\xfcV\xe5\x06\xfc\xcc\xcd\x18\xd0\xa4\xf3t\x14\xe1J\x8a\x03\x158\xfe\x96\xbf\
\xd0\x1b0{C\x0bE\x1f>\xc8y\xef\xc6\x91\xf7\xfd[\xc0EF*4K\xe4\xff\xe6\x0e\xe3\
\x85\xea\xb8\x96\xde\xc3*j2\xbc\xe4V>\x82\nWB\xd0\xb4wuQw\x97\xc1\xe8\xfb\
\xf9e\xbe\xb5\xb1N\xd0\xca\x07I{\xfe\x04\xf4\xc7\xc8\xd88\x98K[\x08\xf1\xdf\
\xa0p\x87\x13+++a\r\xcbHego\xff\xd1\xb0\xc8X\xa4\xa3\xcc\x0e\xcdB\xc0\xda?}\
\xfa\xd4\xc5\xe5j\x86\x96dzO\x9f\xb5\xab\x9b\xdb\x9cs\r\x18t\x9fu\x94J\xee\
\xf0\xb7\xe5\xb1\x9a\x8d\xc4\xfb\xd4\xbcm\t\xfc\xfaL\xc2\xca\x9b\xd4\xf2\xe1\
\xdd\xearqg\xa1\x06jye\x86E\x9c\xb10\xe9\xf3\xd3\xad\xa107\xa5\xa3K\xce5\xde\
b\xa9\x15\x86\x97\xdf\xbb7D\xb7\xcfO;zxzz\xba\xbaV46\xca#\xe9~\xd8V\xea\t\
\x87\x0cTWT\x00\xeb\\\x0fm%=9\x9d\x16P\xcc@\x11\xe4!\xeb\xeb\xe6\xf6V[(J\x85\
\xaf\x15\x13\x19\x19\x19\xe7\x05\x9e\xaa!\x80}"\x9d!\xc0\x1cn)\x06#\\*\xfa\
\xbe\xa8\xe8V^\xbf[\xad\xe6FL\x9f5\xf9\xb3\xddK\x8d\xeb\x97,\xbb\xb1\xb2\xb2\
\x87\xdf\xdc\x1b\xcakkeP\xf4P8\xad\xfcZ\x97y\xad\x02\xbd\xff\xf4\x84\xb5\xc7\
\xc6\xc7\xef\xc7\xe3~\xbb\xd5\x8e\xdbWu\x10\xa4de\xdb~z89\xf5Y\x8b\x93\x05\
\xf8w\xcf\xae{\xd5\x83n\xb4N;Jgprq]JR\x0bm\x85v\x03\x88(\xc5\x07\xdf{\xea\
\xe3\xc3A\x13\xd22\xbdj\'\xa1\xce\xcf\\>\x82NR\xdb\xf2\xaa\xd7\x16\xaa\xa8\
\xa91\x10IY\xd8\xdaw\x95\x8en\xb7\xd0\xca?+\x81\x12\xd8\x99\xa8\x7f\x96\xd9\
\xaf\x98\xf6\x16\x8e9\x90\x7f{\x1a\xf1*3\xb3c\xbeco\xcfS,\xf5\xabU9\x08\xc3\
\xfaz\x99~!`\xcf\xb4cg\xd9{\x1c\xff\x0c;J\xac\x9b,@A 1\xb5gN[\xc8ugy\xfc`get\
\xc9\xad@\xef\x17\xd1\xf5\xd3\xf8\xf2\x19\x83\x19\x98\x19 \xb3\xdeL\xf9^K1\
\x8c\x1c&\x9c"\xc8\xfc\xf1\xe3\xee\x95\x1d\x90v`\x8b@\x89\x18\x82W=\xe7\xf3\
\xff\xf4\x0b\x9fJs\xb4b\xe8C\x1f\xc0.H\xa4\x9b\xef\xb4t-\xb9=\xf3\xf3\xf3\
\xc3\xa2\xa4Q\xf4\xa5\xf8\xf7\x03D\xc0\x8f\xc2A6\xfaP|e\xbdI\xf1\xe8\xd2\xbe\
w\x03\x9c\x8c\xe0\x84O>*\x97?(w\xab\xedq2\x96\xce\x88\xe9 4\x9bi\x83\xfbpDw\
\x10\xb0\xf3 \xf2&\xc5\xa1\x8a<\xe5#u\x95\x95O\xbd\xbc"\x14\xca\xf1h\x13\xe8\
\xa5V\xf3\xc8\xf6\xb2O\x9f\xbc\xbc\xbc~-l\xe1\x93\xe1\xc3\xe5#_\tk\xb07\xa5\
\xe8t\xb5<lL\xa1~\xe1\xeas\xa8/l\xb4\xe2\xbcsMaa\xe1\xd6\xff^*\xb9N\xe0\xae\
\xe9=Z\xb7o\x17\xe9/|\xf9\xf2\xa5\xbf\xbf\x7fvvV\xf3\xf2\x1b!a\x96H\x9f\x9d\
\xda\xc9\x9e\xf9\x8dv\x8bR\xdd\x02\xbd\xc2\xbb\x87\xef\xf5\x0b\x016\xf4\x84\
\x1f\x96\x8f\x80E\xe1\xf8\xbf}\xfb\x16\x13\x13\xe3\xe6\xee\xce%\xef[W[\xcb\
\x1e\xd9.\x96\xaaQo\xa2:L\xfb(\xa5\xfa\xaa\xb8&p\x1f\x8c\x02\x1d\x85\xa9\x17\
1\xaf\xa8\xe82\xb4r\x86\xd6\xc2\x964\xb5\x83\x93\xd9g\xe0A4\x0bL\xf8\xc2\xce\
\x01\x94L\xad@\xb7\xa0e\xda\x91\x9f9U+\x1f\xbc\xd3{\xcd\xbdv<11\xd1N\x02<\
\x08,\x1aVi`@\x1d\xa2Xix\xe6S\xe4\x00M\x88\xa28\x1b\x18\x15\xe8,o<\x88G\xb66\
\xbeH?\xb4\xd5<\xb7\xefe\x8f\xd5?C/\xd8\xf5\x96{\xaf\x9e\xf6\xf6\xb6\x96\x96\
c\xca\xc2\xc2\x82\xe0\xed\xd7\x80g!!\xa7\x1b\'\x1b\xeb\xea\xfe\xb9\x00g\xacM\
\x9a\x08K$\xd0G\xb6v\xa7\x19f\x0c4K[\xe8E\xf7C\xef\xd9?\x7f:\x17\xb6\x14?\
\x85+\xe5\x8c\xd89\xa3R\xba,\xa1\xf5\x84\x10\x86\x06\x06\xe0\x98\xb0\x99\xa8\
\x8e\xe6=\xba\xd0V \x1a3\xcc\xe2\xd2R)\x9a\x9d&D\x963\x16\xc7O\x13\xda\n\x1b\
\xa9\'\xac\x83x\xea\xe4d.\x96\xaa\xff\xbf\x97\x9e\xf0\xa0f\xacxf\xef\xca\xca\
J">n\x96\xe3\xf7\xbd\xb2\x8a\xae\xde.\xcb~\xfb*\x80p\x89\xf4\xf6\x1f\xf2>\
\xfb\xe2,\x91\xa0b\xf6U\x97\x18\xa9\x18q\x8c\x1cw\x85\x10`\xebP\x1ap\x02\x0c\
[t\xdc\x05\x11\xe55\xff\x04pR!\x04\x94\xbap\xf0?\xdd\x82\x1f\xd6\xe2\xa8\x8e\
[y\xba\x8b\xa8\xc4\xae7\xc6"\xadX\xc2%0\x9b\xd5\xd5\x92\x11\xbb\xb4\x1e+\xe8\
/=a\xbe\xf8\xb0>k\xe8\xf3a\x8e\xa9b\x0e..\x90\xe4\xbc\xbc\x0b\xa5x;q\xb6\x1e\
+\x17\xe1\xa4\xc0<\xf8 @\xef\xea\xea\xf7iGkq\xb6\xaa\xd13\xf9\x8d\x03D\xc6n\
\xd0\xb41\xfb\x9d\x99\x19\x0b~f\x9e\x13\xdb\xaa\xd1g\xcf\x9eq\xc6vX\x8a=qp\
\x80=q\x95\xd6\xd4\xd1k\x9c\xec\x18dae\x05\x0c\x13\x11\x15%\xd5\x82k\x07\x96\
\xa5\t\x19\xb3\xaf\xea\xb2\x94\x96\x93{+D\xf3\xed\x117\xe3\xea\x9e7\x8cd\x8f\
\xe4d\xdd\xd3\x05\x16\t\xd1\xe7\xe6\xc9\xc9\xc9ZZZ\x10ar\xee\x16\xe9\xd3\x16\
\xba\xdd\t\xae1\xa6_\x17\xec\x9f\xe1*\x1f\x89\x12\xc5\t\x9d!S-K\x9a\t&\xba\
\xc2\xb4\xc1\x81EED\xb6wvL\xeb<\xe2\xbbg\r\x8bXZ14!\x81\xd5\xea\x00\'\x16\
\xa5 r0,\x00\x0bJ\x8a\xde\x17gl\x81vVW1Ij\xb7\xf8\x99\xabe-J\xffI\xe9\x93\'0\
\x03\xce5\x1a$\x94\x119`\xba\xc7\xc7r\xca:,\x91\xca\x00\xd4A\xa8h\x95=\xef\
\xd7t\xb5z\xc2\x1c\x9c\x9clt\x14\x89]\xfc\x89]?\x7f\xfe|;p1G$\xf9!\x84\xb1\
\xe5e\x91VLww\xb7a\xd16h\xc8\x96W\x86\xff\xa3\xff\x954D\x91\'Z\xc5ee\xa2^-\
\xcf\xd9\xd33\xbc=\xe0]\x9f\x9d\x04\xf4]\x8d1gd{IClL\x8c8\x1b\xc8\xcf\xc8\
\xf0\xf0\x9f\x9d\x83\x1a\xe3\x98\x9cw0gJ\xbcL\xf4\xa1Kn\x070Xkkkv\x12A\xf9\
\x14L-\xf1\x9foB\x97\xfe\xb3\x10\x9b\xaaQ\xe6p\xf2tu\xdex\xd8j\xb7\xda\xa3\
\xb7TWS\xbf\x96?\xea-\xe1\xe4a\xf7Vxy{\x95VVX\xa9\xfd\x9e\xf6\xdf\x81?\xbcL\
\xdb\xa0\xb9\xe2l\xa9\x1as\x7f\xfeX;8\x14\x1b\x8b\xdc\xce\xbf\x03\x17\xc5\
\x1c\xee\xf8\x16\xc7\xaf\xa8]W_\x7f\x19AS\\\\\x1c\x10\xbd\xc6\xaa];.\x07\t\
\xab\x83\xc0\x1c\xdef\x86\xc1\xd0\xf1%\xa9\x01\x13~\xfe\xacR5Z4h{\x98\x07\\c\
Q\xfa\x12\xe4\xe4\x19\x8a\x8a\x1c:u}\xcf\xbbx\x88\xef`\xb9\xafO\xa7v\x1c\xb4\
\xd9\xa2t\x98\xb0\x86\x16\x15\xcd\xcc\xccl\xcd~h \x01\x8a\xbd\xe1\xd98\t\x1d\
\x0e\xf1 O\xd7F\x96\xb3\x83\x00"@ \x10\x14\xb8\xb3\x06m\xab \xb7%R+\xa0\x92\
\x1f\xc6\xc6\xc6\xf2\x1c@\xafE\xb6\x935\x97\xa0!\x15\\`\xeeZ_w\xc6 \xdf\xd8H\
\xa7j\xe8\xba\x90\x0b#\xe9xs\xe4\xe5\xe4^k\x08@\xfc\xdc\xdc\x04\xb8\x03\xc4n\
\x12Xi8X\x1euh\x1c_\xd8\xe2\x96\x9f\xd1rF\xb1\x86+\xd9G`2\xb4\xe4\xfd\x8eQl\
\xfe$ \xf8]\x96b`!\xdaBot\x0b\xf0h\x96p%\xfe\x1c\xde\x16\x01^\xc1\xc4\xce\
\x14!\xbf\xc2t\r\x81-/\x8e)\x03\x81D\xf0m\xc8:\x8b\x19\xb0\xf2Yg\xc0\x91!\
\xa2k\x8c\n\x9f\xdcn\xe0\x03\x0c\x12L 1\x8c,\xd0\xf8\xfe\xfd\xad\x83#\xf0\
\x8e>\xeb\xb8\xd3\xc1\xc8F\xfd\xc2\x11;\t8\xf6\xa0\xad\xbb\x9b\x1b9\xd9\r\
\x89\xf4\x90J\xff\x01\xa6h\x150gP\x1aH5\xd0\xc4\x1f\xfdp\xa5\xfa\x85\xa4*}\
\xb6\x95$\x9a9}B\xab\x01Z\xf9\x1b3\xed\xb4v\xf0\xfew3\xcc\xabW\xafH,\x80\xda\
\x80|\x81\xb7\xc1\xd1Vv2_\xbdj\x8a\x12d\x0e_\xf3X\x84P&\x97\xa9[\xa0\xf4\x1b\
\xe0\x1a\xd4\xf6\xc8W\xfe\x15\xa2\xe9:\xe0\xda\xe2\xa2\x9d\xa7g\x8dI\xb1\xdf\
\xc1\xe8\xa4\x9c\xbc<\x18,\x88\xad\x10\x82\xd9\xf2e\x8ePb\x178[x\xf8\x11\xbf\
E\xf7\xac3\\\n\xe4\x17\xd0+K\xb1`\x1aFA\xf9\x92\x9b_\xc8\xf00\x8c+[^\xa0\xdd\
@}\x03D\xb0r\xb5\xbc@\\\x0cC\xde\xcc\xa9\xf6\xd6Z\xa2\xd4w\xad\x81\x8av\x0bj\
\x08\x90f\x80\xda3"d\x01$\xbe=\xcb\xcb\xcb\xd0a\xa0\xba\xcd\xe4\xb8(\xe0\xd4\
\x7f\xf2\xec\x88\xd5`\x01A~v\xb0\x04\x10.\xdd\xecp1"A-/\x96\xf3\xfa\xa7g;"\
\xa5\x15\'\xfb\xc5\'\x90\xbd\x1c\x99\xa2\x96\x80\xd9\x8a\x1b\xf6\x85\xeb^\
\xeeL\xfa\x9dL\x9e\x1cM\xe6\x0f\\\xbc\xc6\x90<\xe1\xb5P\x94b\xea\x9bj\xfeR%w\
-\xdfo??U\x83b\xb2\xbd\x91S\xce\x9b\xaa{\x1bP/%\xa8\x99\x84om\xcf\x1bHp\x08\
\x92L\xbd\xc9Id\xe3.-\xdd-!\x84\xbd\xbd\xfd\xf1X\xef\xd1+\x05n,\x16\xbb\xdb\
\xe1w\xa8:\xecWix\xb4\xd0x\xbc@\x1f\x9a\xab_x\xb4ez\xf2\xb4p\x90\x99\x8a\x1c\
T@\xa1\xdfZ<\x01\xc7\xcf\x1bO5\xd0\xee\x84ruz\xf8pv\xc8wiH\xaeO\xf1+\xb4\x04\
\x85\xd0\x85\xc4\xae6\x8b+\xb7\xae9\xd6\xfd\xe1$\xa1\x99\xb1\xf8\x1fq\x90\
\xb2{\xc9\xbeV\xe0v\\\xf8Yt\xcf\xc4\xf3\xf4\xd5\x05G,7\xc0\xd8)\x19\r\t)1b[\
\x9eF\xfe\x00\xa8\\\x88"i\x9c\xac\x92P\x92\x1a\xf8\x0f$\xfe\xbcA\xdb\xe2\x0f\
\x1f\x18\xf5\x13yn\xf0\x9d\r\x0f6,\xa2\xf8K\x812\xe6\x8d\x8f\xfb\x1e\x97\xf3\
\x8e\xe8\x9a\xa6\x96\xd7O\xac\xe8\xb1\x82\xa4.\x9cd\xfb\xecY\xba9\xb2\xad\
\xef\xfb/H\x0e\x1a\x02\xd79c5\x04\x8c\x0c\r\xd7\x0f\x8e`\xcfJ>\x85U\xb2G?\
\x9f\\\xb9\'\x84\x00\x18\xab\xf9\xf4\t2\x00\xf2\xa6,\xa7C\xd5\xa8I\xf1\x90\
\xb5\xf8\x87\xe2\xe2\xcc\xd7\xaf[\x04!\xb8\xc0\xc7/]\xbadX\xe4\xe6\xe3s=I\
\x8dg\xf8\x9d\x9e\xf0\xac\xf3\xf9R\x0e\xed\x01hO\xce\xd8\xdb\xb7o\xdf\x17I\
\x81\xfd\x0cQ,\xc5\xd3=\xa6\xbd\x02\xe7\t:^\xbb"\x9a\xad]]ZJ\x8a \n\xf2\xb65\
\n&Rc\x1e7\x14\xc7\x86\xfe\xa6\\\xf7\x8e\r\xca\xdc\xe1\x91\x8f\xa7t\x92\xce\
\x98v\xdc]\x1e\xaf\x05O\xc1\x1b\x18\\A\xd2\x89kV\x19\x16\x01W\xb66%<615\x85\
\xe0\x10\x14\x14\xa4_\x98\x076\xa8v\xe1\xa7\x0c\xc9\xad\xc2A/\x1f\x9f3C\xaf\
\xe8 XBd2\x16\t\r#\xf3W\xe0g\x0eH\xd4H\xbe_;\xae\xf2\xb7\xc9\xb6\x12\x9c\x18\
\x8ce\xe7\xe0j*]\xf2i\x8a\x8f\x8d\x8d\xd0M\xb0\x86\xa7\xd2\xe3EFU\x8c\xda\
\xd3"\xed\x16WX"\xf7\x8e|k\x8c\x1bJKy\xe2q3k\xd8\x06\xc820\x9e\xbd\xf3\x90\
\xe7\x9b\xee\xc4)\x13]\xa5\xc1\x86\xed\xab\x8c\x8a\x87\x1c\xec\xedYhB\xbc\
\xea\xe9U\x05\xd2{\x9e\xb7\x9a\x97\x8f,\xaey\x94\x9c\x86\xe8\x9f\x98x6\xf4\
\xdf\xbfA\xa7\xc1\xdf\xcbG\x94\xbe\xa9\x0b\'1\x073owY\x8b7l\xfd4\xa2\xe2uX\
\x0e!\xac!\xdf\xfa\x9b\xbf$\xbaF\x8c\x11\x07\xff\xbd\xeeV\x1a\xd6\xd7\xd5\
\x91{\x19\x15\xe9\xc3^B\x9d\xe1\x17\x08\xb3\xb1\x1d\xe8nf\x83\x84\xa0&=\xa0\
\xbd"\xfdZ\xe3\x0f8\xfe\xf5\xf5n\xd1\x07R\x00\xbd\xfa\x85[\x06x\xd4\xf2\xed\
\x1ac9ii`=\xdex\\\xfe\x00\xd3\x02W\xb8\x12\x8a\x1e\xd2\x0e\t\xbf\xfa6N\tQ\
\xa4\xdf\r\xa3\xc6\x0fY\x05\xb0\x0b\\\xd2\xa5f\xcc\x0c\xd3$\x85\xee\xb6\x12K\
\x05>\xaa\x1a\x8d\xdf\x01\xc5\xd0\x87\xd9@\xd0ToY\x8aI\xc9\xc9q\xd2Q\xac:IHH\
\x9f\xdf\x12H\xdc\x85\xa8\x0cq\x03 7L\xf2\xbd\xfe\xa52\xc5\xb8/\xed\xed!\
\x93+\x94\xff\xd1K&\x11\xaa\x01\xd0\x9d\x9dg\x9ck6\xbd\x8e\xf6\xf7a\xbaov\
\x0e\xdbVN\xc5\x88\xa0\xd1@6\xbc\xed\xd6\xe24\x17\x81G5\x04 \x82\x06+\xf2\
\xe81Z\x85\xe7\xbc\x8b\x8e\x8e\xce\xdcK\xcd\xd4)\xd0;\xd4h\x17L7\x16N\xf2\
\x9a\xcfp\x87E\x936\xac\x0exl\xe6o\x8c{K\xdd\xf0\xe8\xa4%\xbb\x10\xf0p`a\x0b\
\xb0\x86\x9f\x99\x89\xba\x99\x8aQ\x07\x10xcc\x03\xe2\x90\xabtO\x19\x9a\xd1\
\xbc\xe1\xc3\x80K\xa78s\xf8\xfb\x0f\x1fZ\x16\x9a\xb9\xc0\x88\'<\xe79\x14F\
\xc5E\xfd.\'v\x99x\xce&edp\xd8\xd9\x81x\x1f\xedL\x9e\xfc\xc9\x8d\xa5"\xef\
\xc5D\xe1\xd1\xc7-\xac\r\xfe\xe3\xf1\x84j 7\x07\xdf\x8dt\x18y\x06^F*c\x91!\
\xd1\xe3\x19\xd1\xbf\xae\xd21\x04 \xc6\xe35\xae\x93\xd2\xa1\x05&\xda\xed\xc1\
b\xdf\xad\x9fEF\xe8n\xd8t\x98\xf6\xc6\x86\x86\x86\xcf\x8d\x90\xde\x8aAn\x8c\
\xd0\xc9#v\xdb\xe3+\xbe;A\x0crM*\x97R\xba\xff\xa5O9F*G\xd4\x0e$\xd9\x05\xe2@\
>2|\xc4\xee\xb8g\xf2P3O\x17\xe0\xffdF\xf4\x98\xac\x8a1\xb2\x1d\xf2G#\x88\x05\
H\xdeN\xc3\xce\xf5,\x1dK\xa3\xbd\x19\xd6F\\T\xbe\x12/d\x0c\xc9\xca\xd5\xc3\
\xc3CU\xf9}\xa68\xc1\xc5\x0bm\x83\x87c\x1e\x99\')\xd02J\x94\xbaW\x0b\xf4\x84\
\xd9\xa2998@E\x0c\x8b~\xae\xec(\xf32%\xf0m\xcaR\xf2&\xa9\x81\xc2\xce\xcd\xcd\
e\xc8\xb8oz\xf94N\n\x8a\\`\xba\x8aZ\xd0\x108\xf5\xc6\xc9\xdc\x9cT\xa4\xd4\
\xa4x\xf2\xd7\xaf2<z\xf5u\xa7Y\xc9F;a\r`3Z\xe5\x14\xab<#\x15\x82\xe6tK)$}\
\xb0\xed\x8d=\x08\xfe\xfeS\xf18\xfe\x94\xee\x80\x0f7\\\x7fY\x95\x9f\xa7{\x1e\
\x12\x12RYY\xb9\xb1\xe12\x15\x88\xee~\xfd\xfauL\xc7\x03\xa0\xb1\x8c\xda\xeaj\
\x057GYN.\x14\n"eP3#\x15y\xb0\xe2t{\xfb\xe9\xb9\xd5\xcdM\xbd\xfc\x01\x83\xa2\
\x9ff\x18,[t\xd9\x08\xfa\xdf\xcd\xd9\x93\xc5E4\x92NDD\x84\x97\xe1t\xa5\x94\
\x94T)\x1e-\x91\xde\xb3\xb0ebbBOqJ\xf6\x17k\xe3\xaer\xf1\x90z\xf2.\xa4\xfe\
\xe2\xbe\xf3\xb2\x97\xe5\xddO\x0c\xc2\x9d\xeb\xc4y\xf64\x8e\x03\x1d+M\x8a\
\xe77<\xcd\x8a\xc0\xa6d9\xcf:)^db\x05a\xb6\xb7\xb3\xc3\xe5\xf5\xab\xe4\x06\
\xec\xf1._J\xc6\x95\xe2eeeQ\xf4\xa1*\xb9L_\x9e\x88\xa5\xba\xb9\xb9\xe9\t\xdf\
\xcd\xd6F\xd2\x87Z\xde^r\xabU\xb0\xb5\x93`\xf7\xae\x00\xd2\xaf\xc6\xfc\x18\
\xcb\xfc\xd4\xd0 \'%u\xc3\n(\x81\xecS\xb7U9\xf2\xd0CVNV\xf6\x05\x8e_\xbf\xf0\
\xaeH\xca\xca\xe2\xa2\xb1\x89I\x8bl\xbfxF\xf7\xec\x7f\x83\xff\xf5\xceCR622R\
\xc8Y\xab\xeb\xb3F\x04\x9a\x9a\x98\xe8\x17\x0eRq\xe2\x93\xaf\xcb\xcb\x83v\
\x02\x93\xcco\xe8 B\xbe;\x82\x8bgd\x14\xbd\x7f\xffR\xe3\xad#6*\x9f\x0f\xffn\
\xd0\xf6\xbe\x89IC]\x9d]\xd5(\x18k\xaa\xc6\xaf%\xb7\xdf\x06\x99r>\x9fjkk\xeb\
\xeb\x8f\xf6x\x7f\xd3\x86\x93\xaf\xf3\xf7\x0b:]?\x89\xf5\x8e~3\x1f\xc5\xee\
\xcb\xa8\xf8\xde\x13Xhh\xe1\x7f\xb7}Z\x93H\xc8iFa\xa3S\x92\x93\xed\'\x1b 0\
\x7f&8U\x1a^\xd8;\x83\x1a V\x18\x8b\x9c\x8dFD\xb6\xd3\xb5\x0e/m\x8fV9\xb8\
\xa0Rz\xe7\x996\xa6\x7f\xe2\xac\x80\xc7\xc0r0Q\xc0\xa8\x06\x060\xeca\x9eY\
\x92$Y\xa0\xd2 ) d\xa1\x84\xc5E\xba^\x9c\x8e\x9e,g\xa8\xa80s\xb8\xff_R)\x02\
\xf6\x03\xe8\xec\x80Mf\xaf`J7_<\xaep0\xf0\xee#\x83\xed\xdf\xbf\x95DR\xf8D\
\x1b\xab\xab\x91\xd1*W\xa4\xe6\xe7\xe7y\xe3\xadml\xba\x08kT\xe4\x81\x91\x91\
\xb4\x119\xcf[\xa7GFR{\xe7\x8d\xf0x\xfc\xfd\xfb\xd7\xa5\xa5\x15\x13\xe8?Y\
\x97\xe2\x93SR \xe9\x80;:\x19_c\xeb\xb1\x93\x00V\xf8w_\xecb\x8b\x9d\xc4\xc1\
\xe1\x9e\x0bs\xf9M\xd9\x12-\x14C Of\xb8\xd2^N=\x13\xf1\xec\xed$rs\xd0>!D\x19\
z\xd4N\x82)\xbc\x85\x91\xf4Yk\x97t\x86\x08\x92N\x85\x97\t\xf2T\x81\x1e\xd0\\\
<\xef\xb2\t\x9a\x05\xb0\x16\x9f|-\xbd\xa7\xfa\xfd\xe4\x8a\xa9\xa9)(\xbc\x82\
\xad\xa3XS\x8ca\xd1\x9cs\r\x98\xad\x06\xd7\xb0\xd5\xefW\x80\xcb\xe0Eg5\x04\
\x06@\xb0x\xe3\xb9\x89/\x00DV\x15\xd0,\xad\xe6<\x18\xcd\xc2A2\xce\xb7\xdaBq\
\xc2\x8as\x01om\xc2:\x96\xdcD\xd1\xe8a;\t\xac\x1c\xaa\xe3\xbe\xb11U\x88bJ\
\xf7\xac[m\xb8\x91\x0c[\xb4E)\x9e\x9b\x11\x83\xa4\xcb\xd6\xa6\xfaDuQ\xe4\xf2\
\xe5\xcb""^\xf5\x13x\xf4\xcf%7\xc1\xc4[h\x96\r\xafz\xe8\xec\xf9\x8d\x82\xc1\
\xbf\xe9\xbc_-\xc5`\x8f\xdb~\xc0!r\xfb\xc2\x10f\x18\xa4\xb1\xa9\xe9\xe4\xf0\
\xf0\xf0\xd8X\x94\x12/\x84(\x19\x19\x19h*vv\xf6\xce\x04\x0f\xd8\x84Vs\x0c\
\xf2\x7f\xd9\x95\xec\x8b\xc2OR%\xaa\x13\x1a\x1d\xe1\x11\xa6G\xe5\xe7\xa7$-ma\
5\x9dZ\x10\xac\xf8\x99\xabF}\xe6\xbfqT\x1a\xde/\x1c\xdc5\x10F\xa7\x19\x8b\
\xb8\xbb\xba\xf2\xc7\xe3XXX\xa6\xba\xed\xec\xec\xd0""V\xe5\x06\xcc\x1c:.S\
\x88\xacw\n\xdcM\x08K\xb1\xd4>\xeb\xe0\xa2\xd35\xf4\xb9}\xd6\x18\xa4\x19{\
\xfcd\xc0\xcb\xd1%7q\xb6\xa75\xc6\x00\x0bV\xe5\xb0\x12s\x997B\xffM\xae\x80\
\xdd\xc3\xba\x02\x82\xb3\xb4\x85\xbe\xf5\xf6\x9aS\xf4\x137\xcd0\xab^\xf5\x02\
\x89]Kn\\\\\\\x80\xf5Y\x8f\xd1\xddd\xfd\x89jy\xa5\xf8\xa8\xb7D\xf0\x03//6*r\
\r\x81\xb3\xaeN\x18$\xc4:\xe0n\xd0\x927?a\xe3A\xe9,\xc5H\xb3\x1e\x88\xb3\xc9\
w\xf0\xf9\xac\xff\xee\x02\x88\x86\xfe\xddURd\xee\xde eKK\xd9\xba\xf091\xf3\
\xfc\x1c\x96~\xf9\x16D\xd4\xa1\x85,S\x1ar\xb2p%\xde\xc4\xae\x12\xa62Y\xbad\
\xa4\xe5ss\xa8I\x07\xc1\xa9v\xbc\xed\xf4C\xa4\x15\xe4R\x8bR\x06\xb9]\xff\xef\
b6\x9c]\xadv\x12\x8d\xfb\x7f\x0b\xfd}\xae\xf0\xc9\xef63\xbc\x96\xf5l\xe1\xc2\
4\xc5\x8b\xfa\xedu{\xac\xdbW\x1dd\xacx\x0b&\xa9M\xeb\x96\x9c\xf9w\xb7nu\x8a\
\x94s:\xc3}bo\x9a\xc1\xaf\xb3\xe7\x81i\x81\xde\xfe\x86\xfc\xc9F\xb3\x19`\x93\
m\xe5\xfe=\xbf\xad{E\xfa\x91\xc3\'\xa6\xbb!\xa6@"\xd96^~~\'\xff\xbe/\tK\xa5k\
]\\\\\xac\x037\'\xd9>\xfe[|l+\xce\xd6\xec\xe9\xbb\xf9\xa7o\xa1\xb8QK\xe7G(\
\x97\xef.\x9c\x8a2\xb1`\xd0\xd6o\xc7\xddo\xc8\x0c\x03\xc9A\xfe\x98 \xbf\xecV\
K\xd7\xda9\xbb>\xb9\x971\xe9TcL\xe2\x15\xc7\xdeGtm\xdcy\xd68\xef\\co\xcb\xf3\
\x15\x9a\x8b,\xf8\xf6\xf6U\x0b\xba=J\xc9G\xa8x?\x9dL\xfa\x95`\xc5l\x80\xba\
\xc7\xa4\xd4\x7f \xacdh\t&v\xed60p=lg=8\xf2\x95\xcb\x84|b/\x91\xee\\c\x9c\
\xadm\xc6\x19\xff\xc5\xc0\xd88Xe\x9d\x90h_\xf5\xcc\xcb\xeb\x97\x9d\xc4e\x96H\
H 4!@kr\x99\xa2\x89]\x8c\xcb\x0bv\x12\xb5uuJ\xe4E\xfa\x85L2P\xdf],\xc9WFr\
\x19\x14}0\x8b~\xe15\xce\xd8\x9b\xcf\xa6\x1e\xe2\xf8\x99k\xc7?\xe8\t\x0f\xfd\
\xfc\tB\x1dw\x14\x9aN\x83o\xb2\x930B\xb3,\xfa!\x94yp\x80\x1f\xd0\xe9\xf3\x1b\
\x019a\xd7ikL\x81\x1eX"\x1f\x94"\xb7\xe7\xa0s\xe9C\xe3\x99\x94\xd2\xd8t\x1a{\
\xe7\xcb\xf1h\xb9L\x861\xff5\x8f\xb1\x95\x1d\xc8+\xf6\x11\x8c\x7f\xe8\xb6\
\x88A\xc5\xe6\xdezQJ\x86\x87\xf7\xff~\x96B\xc5J\x87\x9c~\x88A2~\n4\x7f\xfe\
\x924\x90\xdf\x94\xb4\x8b\xc6\x7f\x97H\xb4\xc9\xd6V\xa0\x0b\x0e!C\xf4X\x8b\'\
w?\\<l\xf3\xfc0h+((\x08j\xe3\xe4\xf4\xbf;\xa3~\x0b?/G\xe4\xe8\x16 \x9c\\\xe0\
r"YE\xc9\xc9\x9a\xfb\xfb\x99\xd7\x1d\xd5\xd5\xd5\x0b\xf4"\x1e\x1a@\xef\xb0D\
\xcadhI\xa4W\xe2\xd1\xf0\xb9lwH\x83\x15\x8e(\x10\xbd\xe7\xad<Y\xd5===\xc6"\
\xaa\xcc\x96\xa5x\r\x81D\xd2\x03\xb7\x1acUU\xd5\xe1\x13\xe1\x7f6z*\xe9\xf3\
\xcaNP\x04\xd5\xc524\xd4+j\x98B\xc3\x7f\xda\x11\xf7\x06\x12\xb3\xbb\xbb\xfb\
\x8dh\xd9\xeb\xd7\xcfP\x04Y\x95\x8f,\xb9\x11h\x04\x12\x89\xdf2d2\xf9\x14I\
\x82\xbc\x1f\x8f,l\x89\x88\x8a\xba\xd4\xa0t\xc9q\xc8\xeb\xcf\xb6\x17\xf76\
\xe6{__o\r\x1d\x1f\x1f\x07t\xb5\x16g\x8b\xed`r\x1e\x04\xc7+\xa6\x14\x1b\x8b\
\xf6\xe7hdf\xfc0n\xef^{?\x8c\xb1\xec\xf7\xc6=\xa9\x98\x05Wi\xb8\xf2$5\xd2\
\x82\x80\x00C<~xb\x02Bp\x9e.\xf5\x02\x1b\xb3\xe2_\xd0bR\xa6sT\xe4r\x99\xbd\
\x0b[\x8a\x0f\xe2\xaeA\x9e\xa4\xa5\xa5\x05\x15\x9a\x84\xcc\xd1l\xc6\x18C!"\
\xc7\x16\x9d\xd0\xf5\x06\xa2T\xe1 @?0z\x86\x16\xdf\xfb7B\xc2\x99\xbd\x92l\
\xd1II\xe7\x80\x06\x1b\x1a\xe8Q\xf4)\x9f\xaa\xab\x81DDR\xd4\xf3\x07j\x8cE\
\x9aD\x10\xca\xcd]4\x14A\x86E\xc9\xc5\x915\xc61\x04\xecb\xa4\xb8\xa6 3u\xda[\
\xca\xa0\x1b\xbc\xf1\xa0\xe6\x102AB!\x864\xe9<(\x1d\xfe\xfb\x17\x92h\xe5\x88\
\xdd\xd4\xb7\xb1\xfag;.J\xc3+;\xf1\xdc\xd7\xa5\xa4<\x8f\x0f\xf7\x0eV\x1a\x19\
\xe4\xb8\xd7\x1fAL\x89V\xc9\x1d\xb4\x85\x10;}\x08\x96\x813\xa8\x18]\xf2\xaa7\
)\xd0c\xfc\x95\t\x80\xc6\xc9\t9\x8f\x93\x8b+\x88Z\x8c-:\xaet\xc6\x83\xc4\xa6\
\xde$T5b\xaf\xfdt\xb8\xa0b\xcfFL\xbd\x89h<\xaeI\x14\xd21\x9e&\x8bY3\x7f@\xb7\
\xa0m\xad)\xd7\xad\xd6\xc7\xd5\xb5\xcd\xe9\xd5\xa0m\x9e\x10\xcd\x8b0\xad\xfc\
D\xad\xfcG\xe5#p\x1d\xbe\xc4\x14\xdf\xf0i\x1d\xbe\x9d\xed\xed\xbf[\xd2\x06\
\xb6\x98t\xa2\xab!?\xb3\xf9m\x1e)w\xaf\xd7\xcbB\x08\x1a(\ro\xfc\xfb\xa2\xa2k\
\xe9\x9a\xbf\x83\xa5bNq\xcf\xafy\xb8Js\x94\xf9\x7f\x97\xfa\xed\xa8\x98\x1d\
\xa7\x95\xcfg|\xf1O\x8e\xb6\x108\xff\xb6\x87l\xa7lk\xbd\x9a@\xe2\xc2\xc2\x02\
l\xb8U9\x0f\x83<\x8a\x1eV\x0fQ\x16rC\xaaF\xa5\xe1\xd2Y\xa6\xe3i\xaec\x1e\x95\
\xe4\xde\xf9\xc6\xbd\x1e\x87\x86\x9d\x89g\x10\x16\xf0\x8f\tk/\xb9\xccV[o\xf8\
\xa2wI\xbd\xdd9\xff\xffY\x80\x11\xd3=\'\xa0cP\x0e(;K$t\t]<*\xb5\xc1\xa2\x94*\
\x92\xe1\xac<\x92\x8e\xe4SYf\x17\x8f\x0bJ\xf2\x7f\xa4\xc4\x1c\x1e\xac\x96\
\xa7Ha*\x84\x806W\xe0\x86]5\x1b\x08\xcb\xe8\x9du.\xd0\xfbbU\x0ef\x1e\xa0P\
\xf2\xf1#\xc5\xc6ne\xb1O\xf4\xb4\xd3\x9d\x9d\x9d\x1d\x06zzF\x8buw\x9f\x18BD\
\xfb\x8c\x87\xec\xc7\x11;\x18\x1b\x1f\x1f\x0e\x7faK?\x1f\x9f\x0c\xad|\x9ev3\
\xcc\xf0\xc2V\x96.]\xabe\xf9\x08\xd1\x95\xe6b`N\xec\xf8\xe6\xdf\xbf\x11x=\
\xcc\xa8\xd93\xdd\x82\x17z\xc2O\xa53|\x1b\x1bG\x97\xc0B\x16\x97\xf7\xbc}\x1f\
<\x00\x8d\xfdw;K\x1bGT\xe2e\xfa\xdf\x9d\xc1\xe9\xd0;\x92$\x1f\xb9Nv;,7\x04\
\xd7\xa7<(omhyR\xceP\xce\x18%dZ\xa6\xb4\xf8\xa9\xe6\xea4N2\x90\x93\xb9\xd5f\
\x18c\x92\xd4n\x1c\xec\xad\xb7\xe6\xfd\x1c\x1a\xa2\x0c"\xb5\xa0\x9e9.AG\x8a\
\xdf\x91\xcb\x84\xcd\xed\xb4\xe2\x0e\xc8\x1aZ\xa0\r!S<\xfdjr\xc9-66\x16\xd4}\
\xd0V\xf9\xf3\x03\x17\x97\xb3\xdc\xa7\xb4\xc2Tra\x8eN\xa5\xbfpX\xea\x92\xd3-\
\x1c\xfc\xbb\xb5\xdf\xdf\x9f\xa0-DG\x114L\xbf6\xa9_x\xfa\xedG%^jr2\xc2\x1a\
\x16\x94\xee\x01xt\x9dI\xb1~a\xfb\xf2\xe9H\xc3"\xfdp\x04t"\x8f\xb2\xf1\x9d"}\
\xa3\xa2\x9f\x10\xb3\x9f>=\xedkQ8\xf8\xa9\xae\xce\xd8\xc8\xc8E:C,u\xf1\xf7\
\xef\xe0\x9cw@}h\x96\x1b\xb4\xfb\xaf4\x94\xe52C+F<\xaf\xf4=:\x8f<4u8L\xf1\
\x9b\xd2\xb0\xff\x96\x10xM5\xcd\xe6\xf1\xe3n;\x89\x07\x81P\x9a\x7f_\r\x1a2\
\xf2\xbc\xe0<\xa5]\xa0W\\\xbf\xfdK]\xb4\x1e$%\xee\\\xb6|\x8f\xe0\xb2\xab4h\
\xd6\xbd{\xf7X"O\xebD\x8c\xd8M\x0c\x0f7\xcdi&Su\x97\x11]?\xe1\xd1\x81\xebJ=\
\xb7\x9e"\x94/0\x87\xc7\xca\xed\xe3\x00\x1a.\xa8\x06\xf2\xee:\x95\x8f\xbc7\
\x16\xc9\x1f\xb0)\x1e\xa2\x8d\xa0\xf0\xb8\x82\xbc\xd58\x19\xdbA\x08PP\x10H\
\x84\x94:\x16)\xaa\x9c\xd9\x9b?\xb40n\xbfs|,\xe7ZjX\x04\xd1\x18\x98\xa1\xded\
\xc5\x11KOOo\xe6\x00~\xcd\xfa\xbf\xd7\xcdl\xb8\xaa\xff\xf4\x0b\xc55\x87\x16\
\x9e\x96h@\r\xc0A\xf6\xf6\xa4\x94u\x1c\xd3\xab>~\x94S\r\x0eQ$M\x17\xec>\xd5\
\x1b\xc6\x16\x1f\xfa\xfb\xd2\x05o\xa7R\xfc\xd7yK\x12\xdeP\x96\xf5\xad\xa7b\
\xa9\xdc\xdb\xbcw\xef&v\xbd\xd1\x13\xae\x1c}\xe2q\xbe\xcdA\xa9OC\x08\x91\xa0\
\x96\xe7U\xff\xaf!\x88\xaep\xea \xbd\xce\x1d\x0fh\x0cq\xb6<\x9b_\x16\xa5$\
\xd7\xa9\xc8\xc9$\xd2/\xa7t\x9b\x97O\xcf\x1d~\xc4\xa3!n\xe2\xfe\n\xa7\xf7\
\xfc\x1b\x0c\xe4U;\xbe\xd2\t~\x85\xc08 \x82\xad\xad\xa7\xbc\xf1\xab{\xde\xcc\
\xe1m\xd3\x8e\x7f\xb6\xbc\xe2(/F\xb0\xd1Q\x94\xe2\xed\x1e>T\x86h(\xcbi\x1b\
\xe3\xe7\xfc\x8fc;\x08W\xd0\xffu\xcf\xf2&o\xac\xaf3\xff\xec\xb4\x16\x87\x94\
\x89\xe2\xe4\x9cix\xf4\xe8\x11$okqH\xce\x02\x89\xb7\x16i\x93\xd4B\x1e\x1ad\
\xf6R:C~57\xd8\xde\xdfg\xf3$d"\xe9\xca*\xe4\xf7\xff\xe46\xee\xa57N\x1f0\x1d\
\xce\xda\xfc\x0fO\x9a\xcd0l\xd1\xb6\x95lm\n\t\xb3<\xb5Tm\xc0n\xc0B\xa0\x86\
\x9f>)\xfa\x9b\x8a\xa4\x14\x17\x17\xb7[\xacq\x1d6s\xa1\xd1h\xb3\xe8\x9d\x83\
\xa3\xf9\xde\xcc!\xdbk\xd5\xdf0o\x12\xbb\xc8?L\xa54\x1e>D\xb4\x9b\xa7\x90av\
\x8f\x0fV\x12\xcf\x84s\xe5\xa7i\x08(\xedhg5\x9b\xf9\xf3\xd2\xa4\x85.\xf5\x89\
\x1fM5\x07=\xdbY\xbe[8\xb8\xe2wr\x80\xf7\xfbMj.\xd5\xc9\xa7\xca\x1b\xef\\CGs\
\xf6-\xaeH\xdf\xefh\xc1\xb4\x83\x17+\'w\x9c\xef\xb7/\xc8\xdc\xc27PQR\xd2\x84\
\xf9\xba\xd3xr\xc4u8E\xf9;\xc6t\xb7\x85\xd5\xe4\xd9\xd2\xb5\x88\x1c\x10\x84\
\xe39\x87c\x8a\xc5;\x01\n\xab\xab\xd3\xad\x92\x13\x1eAo\x89\xf9\xb73\x8f\xbb\
\x1b\x0fi\x8f\xdfi\x0b\xf9.T\xf9v\xdbW\x81K\xcb\xef%\xcaCXt$\xfa\xcd=\xf2\
\x9d\xcbK1I\xd3\xca?9\xec=\x81\xfc\xb12Q_\xa8[0\xf5SG\x12\x17vpp\xd0V\xf9\
\x00I\xf1\xa3\x86\x92\x81\xf3p\xb1\xf7x1\x88\x92!\xb0\x99[Q\xfa\xc5\xd6z\x06\
YY\xfc\xd4)*\xf0\xba\xf3\xa8\xf5\x8c\x8c\x8c\xcc\xfd\x82L\xaf\xdd\xd5\xec\
\xd1\xa5\xe7\x9f\xcc|\xfc\xa3c\xd5n\xe5\xcc\xf0\xc6\xe3DR"\xdbg\xcc0a\x06Ys\
\xbe \xf8\x01\x99\xcf\xb3\xe2\x1c\x98\x15.\xb0\xd1\x87J\xa4\x9f\xc1q\x86\xdd\
"\xba\xb2\xb1\xd0\xf5\xc2p\xe9X\x85\x08\xc55\x1e\xef~Y\xda9\x80\xebXX\xf3\
\xf8\xb5$\xb1^\x81\xea M\x00\xb5U\xcbs\x86\xacj09RWB!E\xa9\xa0\xee]\x87\x90\
\xcb\xef\x9e\r\xa4\xf7\x97mr2\x16I\x01\xd8\x14I\x81\xf5\xc1\xe9 \x8c\xa6\xbd\
\x85~\x1e\x191\xc0\xa2 \x855\x9b\xadNM\x91\x18=\x13K5\xc6\xe3K\xf0hP\xec\xe1\
\x97\xcd)\'(\xa6\x9c\xac\x1fD\xd7\x89F?\xae\xa7\x92f\xd5\t\x05\xc1\xc1a\x81\
\xcc\x8d\xfe\xafY\xed\x128B\xd2\x04_w\xfeX\xb5\xd9~9\x00{\xe6J\xfe-t\xe9B\
\xf2\xf9\x17\xd7\x8d\xdf@\xfa_o4)\x8eU\xe2U\xcbc\xe6\xe4\xaba\xfb\xdf\xf7\
\xf433-\xd6\xe2\xbc\x1c\x11z\xc65\xc6\xc0^\x9b\xaf\xca\xc5\xd3{\xde\xe8\x16(\
\xf1\x06\xb0\xde\xdc\xa6ZW\xd2/\xdc\xea\xcbUE2;\xdf\xc4\x98\xa2:\xe2]\x03\
\x0c\x8b$\xfe=\x18r\xd6VM\x07\x11q\xaf\xe9\x16 4\xee\xf1\xd4\xc8\xd5\xd4(\
\xe0u,\nL\xc5Z<\xf0@]G\x8f7^\x81\xd3\xc9\xd3\xd3\x13\xe6H\xf7\xa2\xf1\xfc\
\xcc\x8c\xc5\xd3\xa7\xd06\xfbG\xc7AA\xa7\xec4 ke}T$\xcf\x14m\xb1\xad\x04H\
\xd1A){\x93\x97U\xdd\xa9\x0bZ\xd17\xdb`\x87\xa2\xf7QY\xc5\xed\xe6X\x9at\xa2\
\xcb3\xbf\xdd\x04\xc7u\xae\x89*\xc3\xcf\xecG\xcc\x85N\xae\xe4\xcf\x9f\xdf\
\x03X\x9e\xeaJ\x14\x04\xc1*\xd2ouK\xed\x99\xeb\xe9\xd1|\x93\xb9\x9b\x0b#\'d\
\x07\x1aH\xd7\xcaM2\x8f\xa5\x9fc}A[cL\xef\xcc\xf9[\x8d\x0c\x19\x91\xe3*=\xfe\
\xb8\xb8^\x05\xf1"Gh\x15\x13\xe7\x9d%d\x0b\xb2\x10\x9a5@\xdc\xf4\xaa\x1f \
\xcaJK\x07\x96rD\xab\x80\xce\r\xb7\x80YG\xec8\x8e@\x0ed\x0c\xb9\xccB;== \xee\
\xe3\xebk_i\x18\xadB\xf8\xf2\x05\xb8\xea\xa6\xb7\x14\x92\x0e\x82\xc4\xdc\xdc\
[\xad|\x17,*8\xf3\xcc\xc4E\xc6\x8e5\x8fC\xa9\x8b\xa6/^\xf1\x04t\x98\xbe\xb4\
\xf4\xe4mk \xfd\xc9\xfdAZNN.\x95\xab\xcfzsw\x17\xcb\x16\xfd:33\x7f\x80\xa8[\
\x10\x8c\x16\xe9.\x0b\xd6\x08\xed\xe6\x15\x120\xe2\x16\x0e2\xbb\xd0\xb4y\xa1\
LJF\xa6\xdbR\x0c\xc4`\xd6\xd9 \x7f \xfb\x18\x07v)\xcb\x19\xdbq*3\xec={DND\
\xbb\x85t\xc6\xc7\xd1%\xb3;\x9f\x8cE\xd2{\xf4\x0b\xf4*\x0c/\x87\t\x8a\xc7~ E\
r Go\x9fl\xde&\\\xfa\xfd\x13\xdd\x1dh\x7fJ\x15Q\xbb\x0e\t\xb6u\xfa\xbbm%d\
\x1d\xfb\xaa\x9b\x11\xdf\xb3=d\xe9\xd3:\xbf|\xf9\xb2\xb5u\xcd\xc0|\xcd\x03\
\xa5\xb3\xd7\xdf\xdfo\'\xc1gr\x1b\xcd\xe2R;\xce\x16m\xf5\xf4\xe9\xb5\x0c\xad\
\\\x81\xf3\x87\xa1\xa0jA\xcdY}\xd6\xdf\xbe}c\xcc\xcf\x1b \xce\xce\xce\xc2\
\x80\xbe\xd5-\xd8\xda\xdaJ\xe7%\xf7P\xf3\xb1r@\xd2Q\x90\x17Ka\xb1\x9d\x96b\
\xab\x9a\x9f\xc6E\x9cy\xbd\x19\x92\x95\xd8\xd0\xdd\xbcS7m\xce\x07\xd8\xad\
\xef\x1d\x0e\x0e\x0e\x9aa\x18\xa9\xc8\xffw,\xba\xd0\x9b\xcc:\x8cC7\x05\x12=e\
9\xff\x88\xb4b\x80U;\xa3$\xd3\x81\x8f\xd5l\x05\xad\xd3`EBR\x84|o\xa6\xa7J\
\xb9:\xa9\x1a*_\x18\xef\xe9\xeb{\xd5sN1\xff\x1d8\xf2(\xe1n\xd8\xcc\x13\x8d\
\xff\xc0\x8c\xe9\xfe\x17\x8e\xa00\xdaBJ\xf12\x15\x94\xc7\xdbD\xe2\xc5\xffDI\
\x0e\xc9\x18\x02\x05\x9f:9\x05\xf0\x9c&\x83\xbci\xc9\xf3i\xdc>x\xbbH\xffRf/A\
\xb7\x14\x8fF\xd0L\xdf\xc0y\xea\x15\xe9\xb3\xd3\x87B\x9aPP\xe4vN~x6\\\xa9@\
\x0f\xac83\xb12\xf46.j\xc2CV\x04A\xf3\xce\x0c\xa8dhhh\xf5m\xd8\x13\xfe\xd8/)\
\xa8=\xbe\xc9\xfe\xf5\xe9\xf4\xff\xddD\xfe2\xeb,\x96*\xce\x16M\xceq!\xe4\xc7\
\xb0\xfb9\x9a\x90\xd1%\x81\xa0\x91\xc9I\x87JC\xa5^\x7fk\xda\x90\x96\xf6ve\
\xeb\xa8\x01"\xd8:\x98\x95\xffB\x98\xc3+\r\x81g\xf5\x13@\xf6\x9a\x03\x16\xa5\
\xc3V\xe5\xa4\xb4\xafz.\xce[\xb2\xfb\xee\xcd\xa6\x04\xb8+\xc8\xd4\x8e/K\x02u\
\xd9I\xd0~L\xc5\xf1\x97\xe1\xd1\xd2\x19\xf4\xb3Zc\xaf\x10A\xe2\x9f\xba \xefj\
\xf4Y#n\x8f\xaf\xec@\xbf\x02\xb0\xe2\xd1\xf4\xa1\xadB\xed\x9f\x1b|\xdd\xdcX\
\xa8\xc8_\xa8A\xfa\xf8:\xebl&\xa0%\x90\x08\x1eWZ\x9a\xad-\x14\xd8<\xb5\xbc,b\
\xc5\x97\xd9\xfb\xe0\xec\xe9\xd8\xb7\x03\xc4?\xae\xd2\xef=*\r\x8f\xa40\x92\
\x9fD\xe3\xb6"u\xe5e/_\xeb\x1de\xe2\x0e\xfa*\xc0\xd5\xccCr\xa6\xe3\x92O\x8e\
\x87\t\xc3I+\xf0\'\x0b+\xabA2 :\x8c\xe3\xd3\xab\xe4\xef\xf4\n\x07q\x02S\xde\
\xae\x8dS\xa2\x19#v\xc9=V`\xa6]\x96\x8a\xc7l\xd7D\x14\xb3K\xb4\x85J\xea\xbe\
\x81g]HR\x03\x16t\x14\x8b\xef\xf0\xd7"\x90\x0f\xbfs\xc4F]fl=\x9e\x93l4\x10Bh\
W\x85|\xb9R\x8a\xaf\xab\xae\x86\xf8\xa0C\x16v\xa5\x8d\xe0\x14\xdb\x01\x01~\
\xea\x1d\xf0\x95\xfdDE\xae\x9ep\xf6\nM\x01\xe3g\xc3\xf0v\x0b3\xea\x90\xd6i\
\x0f\xd9\x98\xf9\xbf[^!\x8a\xe6>>\xf7\xb3\xb5=d_\xb3\x8a\x9ap\xb7\xdc\xe2g\
\x86\xe5[\x8a\x9d\x0ej\x06\xb9\xb0\x93\xf8\x1fc\\M\xef\xc9\xbcY\xa7\xf08\xb3\
\xbe\xbe\x1e\xfc\xed^\xe1]\xb3\xfb\xa8X\xa6S\xd6\x11G\xe2\x8f_ E\xf8\xcf\xdb\
O\xe5Ir\xb3\xf3\xda%vm&\x15{+\xe7\xe9\xc2i\x14\xee\xb4U\xb5\xb5\x9f\xa18\x15\
(;\xd6\xbe\xb0\xd5\xe2R3\xcd\xc8s\x07"\xc0\x13\t\xf6\xb6\x15\xd0\x0c{\xc9\
\x84\xb9\xb6\xdb\xcb[^0\x94Y{\x81$\xf9\xdaB\x90\x8c\x98h\xdd\xa43\xbc\xea_\
\xe3\xf8\x93\x93\x93\x19\xc3\xe1j\x1e\xf5\xbe\xce\xc6\xa6\x12\xaaA\xd2\x17I]\
ur\xfbH\x96\x03\xc9223\x83.\xd8U\x1a\xc6\x87\xcd\xcf\xdd\r\x8fV\xe1C\xd1\xd3\
\x86(\xb6\xe8\xe4\n\xd9bQ0I \\\x05zl4!\n\xa7\xc9r~\xdaI\x906\x9e\x8fVa\xf6%!\
Q\x8c\x0c\xde\xd8\xd8 \xba\n3Ri\x08\x9c\xd5\xb5\xf9qf\xa6\xbd\xaaa\xab\xc8\
\xb0H\xc5X-\x862\x82y\xf6\x81\xc7\x91o\xa7\x8d\xa5L\xdb\x1f\x1d\x95 \xa9\xa3\
E\xab[:?\x86\x1a\xb6\xaf\x055s3\x87k\xbf3A\xb3\x84\xbb\xc4.9Jg\xe4|:%\xa9X\
\xea\x8dE\xa9p\x9c5\xe5(\x90j}hn~\xa3@\xafeI\x89,\xe0{\xe9\x95!\x1c\x91\xf4\
\x07\xc0\x15EP6\x1e\xed&\xcd\xc1\x18ypt\x0cd%\xcb\x1a\x8fCP\x91\'t\xf1\xd7\
\xf1[\xdf\x12K\r~R\x81G{\xafX\xc9rfff\x16\x7f\xf8\x10\xb8E\x9e\x7f\xea\x04\
\x82\x13a\r\xf9\x8a.\xb3\xb7\xaa\xaa\x8a\xb2\x041\xf1\xe5\xd7/\xb6\x8f\x87\
\xe3\xe3\xf7\xc9\xc6\xfc\xb16\xa5\xf8?R\x94\x8f$\xd2/0S\x0b\xbd\xeb\xb1\x82\
\x80\xc6\x1cN~-RR9\x8cB\xf6p<\xd2?\xe7\xf0~D\x9c\x05\xb4\x8d\x86\x84\xd4\xd9\
\xc9\x87H\xab\'O\x96\nM\x8f\xfenoo\xff\xfc\xa9\xcf\xcfL\x1d\xda\n\xf3fQ\x9ae\
b\x01\xd8\x1f\xce\x1cC\x80\xd2\xcb\x1f\xef>\xe0\xbfD\xa5\xd4\xe4468xW\x96S\
\xf5\x90V!\xe6\xad\r\x96*k.\xdb+\x00\xe9\x12\xfcp\x95\x93\xc7Y!\x85\x8a\x18\
\xd6\x99\xd8\xf5\xdb\x0c\xe3Xc\x8c\xbb\x91\xde3\x97\x98x\xf6\x8dD\xdf\xe5\
\x99\xc5E`&\x9e\xa2\xc6\x8f\x1f\x935\x04H{\xfb7P\x8co\xac\xabF\x95y\x99\x14\
\xd4#r\xe2~\x07\xb5d\'\xd1d\xd9.>2\x9b\xb3\x11g+\x1c\xa4\xd1~A\xa8\xf6\xaa\
\x97C\xd1\xfb\xc8q\x1d\xd3\xef\x0bj\x0e\x00\x04\xc8\x14v\x10n\x9c\xa10~\xe3\
\\SWZ\x1a\x80\xd0\xcc\xd3\xe5\xb9\xd8{;\xbcDO8\xe0\xf1\xd6\xc8pL\xf0\xb6X\
\xaa\x9d\xa7\xa7\x14\x82\x06\x8e\x8cX\n(\xa4\xa1b\x1f\x8a\xef\xb2\xcc\xd0\
\xa2|\xc6p}\xa54O\xbf\x90\x1a_^]\r\x01&n\xd2\xe3\xf8P\x8a\xa6kD\xea\x8c\x027\
c\x93Cp\xb8\xba@b\xaa\x06\xc5\x1a\xaa\xcd\xff\xef\xe9T\x8d\xcf\xa2\x87me7\
\x93j\x06\x88J\xb7i!38ba_PW\xec\xb5\xb4\xde\x0f-\x98\x14\xb3V\xb0\x82C\xa4P\
\x16\xb2\x8c\xa6\xf7X\x01DZ\x94b\x114<\xf75z\xces\xeeys\xe6\xf0\xd2\x84\xe6C\
\x03\xb7[8A\x15\xb1\x97u\xdf|o\x07\x8d\xa8\xae\xae.)1\x13K\xfd\xed\xbc\x07\
\xe2)P\xae-\x94\x15D\x13W\x03\xa6\xe7\x15\xd3HIF\x9a\xa4\x06\xba\x06\xdaoF\
\x1dw`d`\xd0\xb7\xb0\xf5=\x19\x02\xc7\x1d@\xed\xe9\xa9)\x7f\xd1\xba\xccW<\
\x8c\xdc\x8d43i=VA\xf5\x16H\x154\xcb\xa9\xaf\x17\xac\xce\xa0\x04c\x08%\xc3\
\x05\xbd\xe2\r|\xdf\xa8,\x90\x8c\x8f`\xf7\x91Hk\x0f\xc5\xddG\xbb\x90\xd1,J\
\xf1")\x03\xb6\x95\xc0\xab\t\x90\xe8\xa6\x1d\xabG\xec\x98\x17\xd9\x1fS\t\x02\
z+\x84\xb1E\x0b hf\x9d\xa5\x14;\xde\xda|2)~Tn\xd08)\x8a\xa0\xd1I\xcfT\xcbkB\
\xb7L0\xc5v0m\xdf\xe2O\x88\x8e>3\x13\xa5\x8d\x08A}\xff\xfbWXs@\x1b\x8b\xc1]\
\x8c\x80$\xb7\xe5u\x1df\x8c<\x10ZC\xb7\xab\xc7\t\xd5\x19d[\xf9k\xda\x91\x8e\
\xe2TE\xb0\xff]\x12\xb5\x92\xba:\xef\xa7OC\x02\xbd\x92\x1f\x8a\x8a\x88HHHX\
\x8a\x19`\x90\xdek3\xa7\xc9\xb2\x15\xb3\xfd\xa5\xcb\xda\xdb\xdb\x89\xae\xb6U\
\xa3D\xf6%f\x92\x92\xf8\x1a>\x85o\xe0\x97\xb9\xaa\xb1\xderp\t\xbc^]=\xf1\x93\
v\x12\xea/\xcfn\xd6\xa5\xd2v8-\xb9\xb1\x13\xf5\xc9\xfc\xc6\x18\xf9\xaf\xa4\
\xab\xe5\xf5[\x942\xfe\xe18\xafd\xb1\xdfy\x03\xab\xa5_x\xce\x8c\xfd5$]f\x12\
\xba\xfe\xd3\xb1a9B\xa1\xad\xa4\x1b\xb5\x9f>E,S[\xd2:T\x8dvY2\x93\xeb\xf8{\
\xc2NG\xe7\xbf\xab\x99\xa4r\x03\xd5\x86\x18w\x8a\x921\xf7S\xc3\xbf\x17=E\x10\
]+y\xd0\rW\xb1\xe6\xa4\x85-ig\r\x12\x9b\xe9\xff\x14\x1a{{{u\xce\xcdn\xec\x95\
\x8d\xa0[\xa8c\x08\x10\x93\xc0\xa9\x1e\x8a\x9dw\xa5\xc7O9b\xd1IjFFF\xab\tn\
\xee\xeeWB\xc3\x95x\x91t\xff\xe9\t\x97\x8f\x1c\xee]\x1f&qI\xdc{Z;\xfe\xef;\
\xe5\x04\xb4\xb8\xe8}\xd5\xb5\x96S5\xc6&B\x08\x85\x19\xbe\x1e\x0f\xee|\xdf\
\xc6\xdb\xd2\x17\xa9\xc9\x03\x95x\x9d\x9c\x9d\xfb\x89\xae\xf8\xe4\xd3\xbdQY\
\xf6\xd5\xb9\x1a\x02@9pT\xb1\xd4\xf7}\xd6q\x9flcX\xeb\xf5\n\xef\xfe\xb8\xce\
\'\x91%\xf4\xc5R,D1\x1b\x16o_\xb5\xa7\x84\xfb\x11T3n\xdft=[\xdb\x1a\x83\x8c\
\xfb\xec\x85\xea\x08\x98\xa5g\x89\xc2\xf1\xd3P\x04}W\xb67!c\x8c\xed\x00M\xfd\
d\xed?qk?\x854c\xc7\xab\xbez\x1d\xf4&\\uc\xafN-\xaf\xec\xdf\xa3\x9d_\x97\xdc\
Nq\xe6\n\xd1\x0c"\xe2q\xc1\xe6/\x1d\xb1\xde\xde\xdet\xdc\x0f\xc5R\xc1\x06q\
\xfc\x81\x9cdT46i\xf6\xe2l\xd6\xdf)\xcd\xa48c[%5\xf8\x99\xad{\xdc\xef\x04{X\
\x91]\x11J\xfa2d\xf2\xec\xcc\xb9\x14\xa5\xfba\x95\xbfO\x94\x0b\xf4\x92\xd5.0\
3ko4\x91\x18\xfd\x9a\x99\xe1\xb5a\x11f\x0e\xf7\xddm\xe5\nl\x06\xdc\x1b \x8a\
\xebZ\xb4\xb9\x87\xbc"\'\x0b\xe8\xb1\x1a\xfd\xf5K\x0cI\xf7\xc7\xa9\xd5\xf4\
\xca\xaa\x8c\xee\xbf\xdb\xf27.\xb2)\xeb\x0cWX\xa7\xcd\rT\\\x8c \xeb\x1a&\xac\
M\x89g\xf6\n2R\xf5\x13\xc5K\x97:\xb7\xb3\xf5o\x0b!\xfe\x856fj3\xaa\xb6A\xa5&\
\xee\x94\xee#\xdf\x93\x89\tN;6\xa5\x90\x04\xdd\x00\x05F:\x8a\xe6\x96\xb7\xf3\
!\xf7\x80\x9d\x95M(\x82\x02\xa6~-\xb9\xd5\x8e\xd8\xf1b\xd7\xf6\xbc\xdb\x14s\
\xe8\x19K^\xc1`\xd7\xc8j\x0e\x84(6[\x8a\x050\xdf\x13I\xe9\xab\x84\xf2\xfdXx*\
\xf4\'_\t\xd7{ C\x13\xa2\xd88y2+\xeaw\xbe}Q\xe9bxzZ\xef\xbc\xbb\xbb\xfb\xe3J\
\xb6\xec\t\xb5<]\xbd\x05F\x8f(\xc6\xa1_K\xdb\xeb\xebb\x1d\x90\xbb5x\xe3\xc5\
\xd35\x7fcH\xabx\x91$\x8f^\x13\xb0Ij\xed\xb7\xc9\xd9\xa2?\x83\xe1$v\x01\xff\
\xdcr\xb8n\x1bC\xf9T:\x83@x\x10\x8f#\xa7\xf7W\xe0V8sS:\x83\xa2"vD\xc9b\xdc\
\xbe\xea\x94>\xbcI\x13\x02\x91\xc8\xec\xceo,\xfd;\x01W\xa6}\xa6\xc8v\xdc\x10\
\xe6\xf7\x19\x08Uji\xf7EX\xe5\xb9R\xb4\xf2\x99\x16\xd5\xf9\x99\x8b\x87"\x7f\
\xc8~>\x1f\xdbq3;\x0ep\xc0\xb0\x08\xef\xf2r\x19\x82asn\xd0\xa5@\xf9w\xd7\x90\
\xe2^\xf5\xd0e\x81\xb3\x9a:zb\xa9t!\x8a\xce5\xb2\x8cTY3\x819\xef\x0c\x8b\xc0\
\x07\xf5\xf5\xf5\xed\xd9`x<\xdc\xdc\xdc\xe6\xbfe\xfc{\xb6\xe1\xc7i\xe6\'\xa0\
fD\xd7}B\xac\xbcJ\x80n\xbdI\xa8\x0c7#\x95\x9e\xf0\xdd\x02=\xc5_\x0c-|\xe7\
\x0f\xad\xcaA\x9f\xef\n\x9f\x9bS/\x1eJ\xe4\xbfv%\xa1|\xe4\xfd\x88\xdd\xdb\
\x01\x9b\x1f~\xfe\xc7\xea\xcb\xd0!|\xf1a*\xb9\x17Y"\xdf4"\x03h\xa1\x9fk\x8c\
\x05\x9e\x90ny\xf9`\x90J\x15\xaa\xef[,\xc5\xc2;\x08\x7f\xa6\x95u^\x0c\xda\
\x0e\x12]\xff\x08\x87QrE\xab|\x97\x08Q\xe4\x01\xc9\xb9\x1c\xbd\xcc\xba\xeb \
\x96J.\x9d;\xb4p+\x1f\x19\xf5\x1bB\xd3\xc2\x16\x0c\xd3\xa6(\x9f\x0cS\xf3W\
\x85!\x05I\x01:jm&fV\xb3\xa2\xe9[\x03\xc4\x8a\xd1\xa5\x9f\xb6\x95")\xb4\xd1\
\x1f\xad\xd3T\xea\xef\xf23\xdf\xe8\x8d\x02b1\x1b\xb6(\xdd=>:\x90T\xc6\x0b\'\
\xb9Js\xe8^E\xf4\x12\xb3\x84\xfe3,:\xde\xed\x90\x8f\xe9 \x1c\xf9r\xf1\x87\
\x13]#r\xc4\xbca\xf3Y#\x95y\xe5\xa1\xf5\x97\xa0\xdf\x8dMMA\x8bI\xc8\xa0j?\
\xf0\r\x03\xdd)\x17\xf6\xcd\x114\xa7\xe1]\xb2\x1b\xefy\x12\x0cg\x82H\x86\x95\
\xd8\xa3?\x13\x89\x05#v\xcf\xcb\x85\xe3q-\xd3\x98a2\xa5\xec\x00\xfa\xdc\xad\
\xb3\x1f\xac0H\xaa(\xb2\'A\xf9\xdcIj\xca\xbc}_\xbe\xd0\xa4\xdd\x9dZ\x0b\xf0-\
**\xcaN9\xd3\x8a\tW\nD\xba\x91\xd9\x8a\xa2\xd1q\t\x17\xfcw\xc3&\xa7\x99999\
\x13T\xda\xc3\x95\xc8\x90\xec\x8ai\x85\x85\x97d\x99\xb4\xd9\xab\xff=^\x8em\
\x1c\x02\x89\xaew\xc4\xbe\xd7-\xb0\x14;o\x91\x00R\tQ\xdb\x9bW\xd1~"\xe6\xe5\
\xcb{t\xf4i\x15\xb5\xb5\x07\x04\xae\x13\xe4W\xb7\xe4\x87\xe6k\xd9\xe6S\x15\
\x0fT\x95\x96\xb8_\xb1U\x87\xc4\xb9\x82\xc5\xb6\x8b\nt\x9ba\xe2\x84U\x02\xab\
t2\x9d\xe4\x12\xbb\xf8\xff4\x02\xdd\xffL\xfa\xd1\x194\xabE\'C\xee\xe8\xe5E\
\x83`O6{\xf8\x10\xc6=\xab\x98AJ,5J+\xff*\x92\x0e\xd5\xd1L\x92\xd3\xccp\xed\
\xf6\x97S\xb4\xa7\xad~\xbb\xd5\xc6\xc6\xc4\x849\x8c\xd9I\xa4\xf7\x08\xe4\xd8\
\xe4\x0f\xdc\xfa\xaf\xcf:\x8db\xc3\xed\xc9\x93hsd\xd5\xe8}\xc0\xd5\x0b\xbcb\
\xaaB\xe9d\xeb\xe1\xa3\xd9U\xb0\x15\xd96lFh\xabs\xc8\xf9\xb99\x01\xd7\xe5\
\xbd\x8dy\xccn\x9en\x97\x19\x06\x8ff\tm\rPt\x04\x85\xecW\x17\xf5\xe1UDG\xafy\
h\xe4pE\xdc3%3\xa67\x04A\xe8\x9c\x87\xd2\xcau\x8d\xa1\xf4.!h0E\x03\xc4\x17\
\x18\x9a\x8b\x11\xbc\x11\x9d\x98(\xc5l\x05\xce\x08mV\x1e\xea\x88\xa1\x9a\x17\
\xe9\x82\x0eWS5\xa0anS.\xa8\x88\xa48\xd8\xdb\xc3\xd8\xb6q\xfe\xa6\xc1\xa9e\
\xf93\xc4\xd6\xd7\xd5E\x95$\xa5\xa9\xca\xf0\xc6\xbf3\x16\tz\xda\xe2\xf4\xaa\
\xcb\xb2\xdfZ\\\x89\xb5tbcO\xf6\xe7\x85@{\xff\x8b\xda\x0f\xf1\x12\xe4\xb5\
\x01\n\xb0_\xf1r\xcbSo\x82\xac\xfc\xe9\xc5V[\xac\xd3\xbeY\x95\x0b!\xf5\x0b\
\x07\xd9\xa2\xe767\x11\xa66\x95\x86\x03\xc4\x1f\x0b[\x81\x9f\x1b\xab\xaaB!7S\
\x84\xb1\t\xb9\xa8\xba\x92\x9f>\xfd\xbc\xaa\xac\xcc\xd9\xd11k\xd0V\xfe\x82\
\xa2\xb7b\x87\x87\x85\xacQ\xd1O\xbf\xc3\x0e?\x90l\xff\xa1\xa9H\xe6b\xb6\xee\
\x93\xfd}\xb6\xc2X\x026D\x91\xf4;UZ\x84\xb1\xac\x01J\xa8\xb5\xa3\x9d\x83\xbe\
cnn\xee\xe7\xcf\x9f $4\x9cL\x06\xeb\xf1\xf8\x9c\x8c\xd6\xe9\xd5\xc3\xc3:\xc3\
\xa2@\xfc\x99|\x1b\x1b\x1b\xea\xc40\xa5/\xae\xd2\xe7"\xdbM\xf7\xfbS\x96\'\
\xea\xd5\xf7\xcb\xb9\x0f\xef\x15\x0e~\xb7\x12We\xa5"o\xb7\xd0)\xd0\xe3]\x85P\
G\x13BvnP\t\xb9\xe5\xc5q%\xd3\xa4\x18>\xf1\xd2\xf5U\\\x8f\x95\xa6\xc0\xd9\
\x08\xb6p\xbf\x80GuuuJ\xc5jz\xa4Y(\xb2!\xf2\x96\xf04\xf3\xefJ\xa5\xe8\xf5=Y\
\xddm\x90\xc9\x87\x06\xda5N\xac\x8d\x93\x0c\xc6\xec\x96\\\xfe\x9fk\x8c\xed\
\x9f=c_5)\xd0\xdb\x7fU\x1f\x17\x16\x97\xf3N%w\xad~\xc5\xa7x\xc4.\x81\xffT\
\xf6\xa1\xc5\xad\x96\x98p\xa5\x9c\xd9\xd9\x87/M\x93\x1f\x06\xdcR\xec\'\tx\
\xe99\xca\xf3\x83\xc7\xcc\x97\xc7\xb9?\x8b5)\xd2\xb62\xad\xa2y\x8a\x12\x13w\
\xe1\xef\x15\xa9\x98\xd6M\'T\xbe\x9e\xb0\xbb\x9b\x1b\xe8q\xb6\xf6\xf0\xd8\
\xd8a\xf4\xe4\xee\x99\x17\xa1\xd3\x06Y\xdf3\xb4\xe6\xff\xfcy7b\x97s\x86\x82\
\xfa>,\x03\xe7<EIKN\xa6\x9b\xa7\xd2\xd3\xea\x82\xa1Yr\x93\xa9\xd6h\x13\xcb\
\xae\t\xd1-@\x84+9j?\xdaH{\xed*\xbd\xb4\xe6\xa1\'\x9cd,2\xff,]\xe8\xfc\xf0\
\xf0\xb0\xff\x88\x92\xcd\x15\',\xcf\xfd\xaa\xd1h\x95\xb7\xc35\x80\xa3?\x7f\
\xb2HG\xad,-\x99\xdfS\xe9\xb5j\x9c,\x1eZ\x80\xec\xc1\xc3\xe3\xcb\xab\xf8\x95\
[Y\xa7\xded\xe5\xdfwl%M{I\xaf\xdf\xbce\x96\xcd\x91q_\xae[\xfb\xcc\xfe\x87\
\xe5\xb9g\x8aZ\xde\xa0m\xd2dt0*\xb7\x0fW\xa0\x97\xa6!`\xfbM\xd7|\xee\xba\xad\
\xd35=\xe1g\xf5&m+\x87c\xd2\xd7:z\xc1\x0fDR\xca\xf1\xe8w\x8eg\xa9\xc8\xa7\
\x1d=||\x8e\x9a\x83(\x0fM\xe8~\x8cu{\xec\x8c\xbbG\xb6\xf3~B\xc5\xb3$m\xbe\
\xb2c\xb8\x97J\xc0\xa6t\x0f.\xb9\x91\xda\x9b[e\x17H\xa4G?\x1c\x81\x98\xef\\\
\x13\xb5I\xd5\xe1\xb4\xca\x03X\xa13\xc2\x97\xd2\xdd\xfb\xed[\xa7v\xb8|YI\x89\
\xbf\xb0\x8cE?`\x01i\xd3\xac3\x0f\x07N\xe7\xc7\xbf\xa7\x04\x85\xd9\xd9\xd9\
\xbf\x8fC\xe8\xba\xe9\x9e\xad\xe5\x18\xc4\xca\xc2\x82\xe3\xbf\x83f\xb9/\x92\
\xa2_x\x8d-Z\x92\xa0\xb8^9b\xb7\xbd=\xea0d[9R\xd7i-1?R\xba!\x89\xc2f|\xcb<\
\xa8\xc9\xe4k\xa3\x8a\tM\xee\x9au\x1e\xce\xeb\xdd\x7femM\xfa\x99\xc2\xfb\x82\
\xc5f\xd8\xdf\xa6X\xbc\xa9\xe9#$\xc5\x9el\x0e\x1e{6U^\x1c\xdb~\xaf\xa9Kp\x8e\
\xe3\xcb\xcet(W\xce\x9d\xeb\xd2:\xb9\x963*iW3{\xc1*4\x82\x87\x16N\x1fjV\x9c\
\xe1\xb0\x05\xdcS\xd6\tP \xc9\xc8p\xec\xf0m4-\x1c$I\x04s\x041\xb3K\\\xe5\xfc\
\xbb%\x8d\xa0\x0fyB^\x01\xd3l\xb6\xcf\xdc\xc1\xbc\xd32G\xf9\x00\xf2qV\xad\
\xb8\xe6=\xe1s\xc8\'P`C!\xc5!\xbex\xaa\xef\x14\xcb*\x0e\xcb\xcdf\xdf\xed$2\
\xaf?ssr"\xed\xa6\x9en\x06\x86y\xf0\xb5\x05B\xb2\x89\xdf\xd1SK\x9e?R<\xf1\
\xa9\xee\xf9$}\x14\xea\xe1\x04\x9d\xc9\xac\xdc\xce\xe9$\xb5<\xd0\xf9\xc8vh\
\x9d\x84-\\E\xff\x86\xb7\xa8a\xc3\x87\xa2\xa2\xe6\x9b\xb9\x1eL\xf4\xfa\xa7\
\xd6I\x14T\xc2\x8a>#\x01\xdd\xe4w\xa7Nm\xaaQ\xce\xd1\x7f;\x17\xcfep\x05<L[\
\x08}\xf9r\xf3\x0059\xd9\x9a\xc7+\xf2-\x84\xf2}cc\xff\x18\xe0\xe2\xd2\xfa\
\x9e\xb9\x8a)\x95\xdc\x00^\xe5\x88\x9c\xef"\xb1\xc3\x9faI%\x17\xac\xd3\xdcj\
\xc7\xadE\xc2e\xc0I\x8b\x8a.\xc7\xe3~\xbbJ\xf7e\xb3\xc7&w\xcfjw:\x94\xe21\
\xf2\xff\xfe\x93\x13Y\xf2\xf3u\xb61\xb2\'Q\xf1\x85\xd9\x0eMcSKu\'\xc7\x07\
\xfc{\xaa:\x08\xce\xd7\xde\xbeG[C\xb7\xd4\xa5cXw\x83$<\x1d\x1d\x1d\x9d\x9c\
\x024&\xce\x18\xd1L\x08t\xa8\xd3\xf7\xce]\xd6T\xd0|,\x91\xdey#\xa1\xd8\xec\
\x89W\xfd\x04\x9cVn!0}\xbe\xedj*\xdd_\xaa+\xcf\xddj\x8fv\xd7\x1el\xa2CSH\x8f\
nj\x91,\x04\xa5\'2\xae\xa6\x10]\xbd<=\x89n\xb5\xf1`\xb9\xf5\xfcqSX{\xd0\xf4:\
*\x0e\xff\xef"\x19H#q\x03\x92\x97d\xa38Y#\xc4\xf5\x8bIj\x7fh\xbaD\xb2\x19\
\xe4\x7f\x0c\xad\xec4z\xac\x05\xf2{\xae\xef\x1d\xc2\xac\x84_Q`H\xff\xf7\x9c\
\xf9\xe9\'|v\x12\xec\xa4\x13\xc2I\x11\x90\xbe\xa43$\xcf\xd8\x00\x10}\x96l\
\x08\xf8\xe0\x8eqUX\x0b\xff\xaf\x1e\xe6$==\xfd\x10G\xcf\xd7=\xeb|;S\xeerJ\
\xb7\xd9\xdcW\x8f\x1e\xaf.\x9b)\x89\xf5\x81g\xbf\xef\xb5!\xad\xed\xa3\xbc\
\xab\xb4\xf2\xf9\xea\tq1\x90z\xa9\xfe\xee\xb9\x9d{l\x0f\x89X 1\xdb\xe7\xf2\
\xc5^\x92u\x92\xbd+u\xff]=\xfe\xaf\xa5I%\x17\xe6\xd1\xbd\xc6\xf8\x7f\x0f\x7f\
\xb7(\xb7J\x01\x16\x8d.}x\xff\xbe)Q\xf1\xc7\rNR\x893\xbe#\xd1\x9aW\xd2{\xec\
\x9f<1\xff\xdb\xb7EWU8\xb4\x90u\x12\xd1n\x11\xd4\xec/\xf9\xd3l\xfdQy\xea\xd0\
\x02\x84(\xabrO\x07\x87\xd8\xf7zIj!_Y\x19\xb9\xc6\x02\x1f\x8e<\xdbY\x1eG\x98\
\xa3Y\x9e\xad\xd4\xef\x94\xec\xf1\xc4?o\xb9\xf7\xb5ei\xb4\n\xd4\x08o\xe2t\
\xbd\xc3\x89\xb0\xb6;R\xfeH\x08\'HMY\x16\xd1\xa5d\xd6\x1bd\xf5 \xf2B\xfd\xfd\
]Z2\xb7jm!\xc8\xc6\xcf\xcdK\x18\xa6M\xe1\xd8\x0f\x1f\x9e~\xc2\x1aF\xf7\xc7\
\xf3\xb2\xa0\xe1\xbd\xb3\x02\xc2,\\\xd7\x1c<oF\xc9\xd6\xa4)fs\x07Y\x1a\xb0\
\x8c\xe8\xb5\x9d2,\xd2\x07\xfeZS\xed\x8d\xbf\xf8\xa3o@\x1c\x94*\xeet\x1b]V\
\xa5/\x165\x87\xfdf_\x95h\xe2\xc9H\xdd"\x17\x91#\xcb\xc9\xb0&\x9c\xdd@5\xa1)\
008x.\xec\xad\xe3]\xff\xc1\xf3\xd1*\t\xcf\x83S\xefC6)\xe4c\x8b\x9eY\xb6*\xdf\
s\t\x8c\xd0\x13\x0e\xa9C\xfcV\x1d\x10\x97\xe3\xce\xf2\xdc]\xe5Y\x93\xebl\xf1\
\x04\x89\xb9b\x8dT\xb1\x10\xbe\x12\xeb-\x1f2s\xed\xf9\x0c\xd7U\xce\x8dD\xa1\
\x0b\xbb\xb1\x05zj")\x89\xcb\x00\xe0\xe4\xbf\x14~\x85OQ\x83\x13X\xbf\xfb#H\
\xdaM\xfa.\xb6\xba\xc1\xb2\'\xbc}fii\xc9\xa8\xca>\xbb\x8eJd\xc7\x11\xfbu~\
\xe3\x0f;\x1c\x9f1\xce\xb0H\xe5\x87$Dg2\xd2\x17\xf1\xd5\xe3\xf6\xfeh\xa5&\
\xc3"\x16Q\xe9\r\xaf\xfa\xda\xdaZ\xfe$\x1d\xe1$m\x8a\xd3\xcbTB\x11x\xb7J\xc3\
3g\xcco\xbc\xe29L\xf2\x96\x8b]d<\xa6\xff\x840v%a\xd3/\xdcj\x8fdE(\xa7\xf6\
\xcc\xdd&\xb5\xdb\r\x9e\x18k;\x8e\x8b\xc9u\xae9\x04},\t\x1e\x93}\n\xcd7\xe99\
\xcf\x91\xd5\x85?\xee<\x7fe\xc3\xdd\x89)R\xd5\xffN\xaa\xc6\xf3\x98\x0bx\x9a\
\xc5\xba\x97\x08\'Ae\x1f\x9a\xa4Y\xac=\x16\x95#\xc9\xff\x95\xfe\xc3\x9c#\xf6\
\xbbx\xc6C\x83\xe7\xe6q\x17\x14\xa4\xfc\xb5\x85\x94\x1a\xad\xb9\xbd\x7f\xd4\
\x1d\\\xbe\xc2NKK[\\\xb7N\xf3\xcb\xcd\x08\x9d\x0c\x95\x8ap\xa1\x03%\r\xb0?%\
\xf9{w\xf7\xe5\x9f3\xa8L\x80\xf1\xdd\xaf\xba\x05/\x1c\x03\xa7k\xc9V\x95^\x06\
XHyi\xbd\n\x90\xba\x8f\xc7\x93`\x9e\xfb]c;S\xa4\x1fI\xc39\x13"Fw\xea8ttI%z\
\x17r\xc5\x07\x17\x1f\x9fh1>\xa5^PT\x81\xa6\xe7\xb7\xc1\xc1\x8f4\xae\x10<.\
\xd1YG\xf37F<\x90H\xaf\xdf\xf8&\xd3Y\xd8\xbc\x13P\x96\xd0CAqvZ\x15\x8b\x9a\
\xf8\xf5\xcb\xd0\xd4\x94!D\xf1]"\xd8Xaa\xa1\xd0\xce\xdd\xc2At\xb7\x9b4G\x83\
\xcd\x05\xb7$\x02\xb6\xe2\xd1i\xe9\x90\xf9+l\xd1\xea\xea\xea\xefB\x86\xac\
\xc5\x87\xfd\xfb\\\xc2^\x0fxwZ\x04\x9a\xb3\xd2\x84,,,\xe0\xa7\xc6\xea\xfc>h\
\xe5\xdf\x14}\xe86\x11Xl,\x12\xaa\xc8\x13C\xf8n\xc2p\xd9\x94(\xd5\x0fa\xfb\
\xfb\xb5\xdaq\xad\xfee|q\xa5\xe1\x93\xc7\x8fi\xb9wgg/\xd8\x84DlMo\xecac\x8cL\
L\xe6nJt\xcas/\xb6Z\x857\x02\xe3\x82\xb5\xbbD\x0c\xa93\xba\xa8\xad\x9f\x0b\r\
\r\x05\xb0qQ\n\xb3<\xab\xa0\x05P\xfa\x86\xa1\x8eZ\x83\xf2F\x1e\x88\xee\xf3\
\xdf\xc2\x92\xf9I\t\xaf\xbf\xeeR\xbf^\x08\xa2dHNI\x81\x90\xffUl\xed\xa2\xe3\
\xd4]r\xc8\xd8\x8b\x8a \x0f\x93\x01\xfb\x9b\x01]\x9a\xf9\x03B\xb2A\xcf\xc2WK\
\xa18^\xf5]\x96d{k\xdd\xc5\xbb&Q&\xc5~{\xb3)O$\xd8\xafS&aH\xa2U\x80\xd3\xb4G\
\xb5zF4\x07\x88\x08g\xb2n\xaa\xe8\xad\xceCh+\xfeo\xd0\xfe\xe1,\xd8\x98\xa0\
\x01\xf3\xaaQ\xd2\xd3q\xc6"\xad\x98<]\xea\x0bg\xad\xff\xa8"\'\x81\xf0\x07m\
\xa7g\xef\xfc\xbe\x844\xcf\xd3%\x1d\x99 \xac\xc9\xfa\x1d\xa3\xa6Q\xdfLMMC\
\x03\x93\xd5\xf2z\x99\xcc\x0b\xf4\xa4ed\x82x\x14\xd3\x05\n\xf4"\xd2\xb4_\xf9\
/\x15\xe9\t\xaf\xf1\xf1\x83b\xc7\xfa{\x9c\x01\xa9\x84$\xe0q\x83}2\xe0\x8d\
\xdc\xfd\x0f\x9f%]\xadY\x82Lj2\xb4B]\xa9\xa6\x0b\xf9L\xbc\x02\xef\x05k#N+7\
\xbf\xbe\xfd[\xf5\xc3\xed^u\xfez\x13Q\xf9\xe0\x9c9\xd9\xbb\xaf\xc2\xbd\xcc\
\x02\xf5]P\x10\x11\xa6\x8fu\x10\xb3\xae\xd2\xd4k\xd4\x89FB\x88\xefzL\xf5\xc5\
\xe1\xb44E\xbcs\x88\x89\x8b5\x0fj\x8cUt[\xec$\x1e\x95\x1bdY\xafj\x15\xe9\x17\
vY*\xaeF\x88\x8bj\x0edh\xa9\x16\xe8\x05\x86\xaa*\x90\xa4\xbd\x8d\xb7\x92L\
\xd5P\xc9\x8d\'`\x11\xdf|\xd4\x82g\x9d!K\x9f\xfa\xa6\xf0\xcdZ\x96\xf3\xe0\
\xc8\x17\xc9%\xb6\xb5\xd2x"\xe7Y{\x0e\xe5\xe4{\xd9\x1e\x83$We\x18z3\xefJ\xb3\
\xfeT\xee\xd3\xe7F\xf4\x87\xab\xf1\x95\x7f\xff\xffY\x80\xac>\xeb\xeff\xef\r\
\x8b\x12\x9e\x8bY\xa6\x01OI\xa4?\xc5\xa20\xbf,J\x87\xa7\x1d\xb9\xb3on`\xf3v\
\x935\x04F\'&\x9e\xaf\xaf\xad\x04J\xa2\xe2\x8c\xe5M\xce\xde\xc8q\x88\xc8\xc1\
\x95\x91!\x187\xf3Z_6\xfb\x93\xf1tN;\x1a\x1b\x1b\xc7I\xb0C\xa5\xc7\xed\x8b\
\x8b\x8b\x07\xc4\x9fT\x1a\xde)@|\xeaqFA\xfah\xb3(\x85\r\xa3"\x17I\xf9\xf7Lj\
\xb9\xe7\xad\x87\xa5x\xa5b\xc7\xf4\x1e\x96\xc8\xf6\xb0\xe2\xc8\x97\xe4a\x12a\
RJ~\x90"\x83\x15\xb3\x07\x88\x87(\xff%\x00\xd9x\\\x96-\xe7r\xab\xb5\xb8"\xad\
\x17/\x9d\xa3fWT\xa4\x12\xaf+e\xbb\xdf\x1a\x05"mh8RV\xf6\xd7@\x85\xe1e\xbd\
\x83*\xc3\xcb\x15#m\xec\xda\xe4u\xdb\x8a\xa8\x8e\x00\xa9\xb0\xa6d.:3\\\x8d1=\
G\xa2u\xda\xe7H\xb7\xb5O\xac\xbc\xf2\xaf\x0eW\xa7o"\xaf\xdb.S\x8a\xd1%\xf6X\
\xf9\xad\x06qM\x1d\x85h\xe5\'\xfe{V\'G[\x08\xa7\x15\xfa\xd6\xc6\xb5\xb05\xe8\
\x9d\xde\x7f\xd0\x81\x8e\xb4\x85o\xf4\x84\x9be\xf2l~\xd9W\xc9{\xae\x07\x8fU\
\xe8\x17Jeh\xb9\xf2\x84.\xe0>\xeaV\x144\xd5\x7f&\\Y\xbc\xba|\x81\x91\x8a\xe4\
\xee\xcc\xa51\x14\xa1=}\x8c\xa4M\xfe\xd96]\n\x1b\x1dEgz\xcb\xf5\xba\x8b-jy\t\
]\x96aw&\x96\xdct\xdc\x94_G\x13\xaa\xeb\xeb\xaf\xbf\xa7\x8a\x1b\xfc.\xcd\xb8\
oYi\xa8\xf3\xa3+\x96J*\xe6f\xbe\x8b+\x9b\xca\x0e\xe9+\xffW\x16\x8d\x93\x1d\
\xf4\xc1\xc8\xe7\x0e\xa7\x96\xdcj\x07m/#h\xda\x08YsmO\xb8\xf9\xe2x_I\xf3\xb7\
\x97\x0c\xf2\x18\xb3G\xab\xd0\x87\x06y;aQ\xbe\xab\xcd\x94\xbd\x99\xf2\xaa7\
\xee\x97\xe2+\x0c\x8b\xc2\xf0S\xdaq\xaf\xdb\x80\x87\n\xefb\x18KE\xfd\x03p,\
\x06\xf5&\xaaO2\xd5\xf2\x02G5\xc42\xac\xcf~\xc1D\xa9\xe5)~\xe0\xf2\x0b\xed~\
\x1eU:n\xaf\xeaw\xcd\xb8D[\xc8\xce\xce\x8e\xdb\x9bW\xdc1\x8a\xf4\xc7\x9d"\
\xfd\xd1%\xb7.1\xc5-:\x13K\xc7;\xc10\xb6)\xdd\xf1C\xecM\xaf\xad+\r-\x02\xad\
\xcaG\x12\x05o\xfb+\xf7\x89\x17i\x0bi\x08\x9c\xe54\x82\x90\xc6%\xd5\xb30B\
\xe1\x9e5@T(gz6\xb50,\xd3nQ\n)k\xe4\xf2U\x87\x85\xdbv\xe8\x0e\'\xb5\xbc\xfe\
\x85-}\xe1s-\x05\xed\xda\xa1\xad<\x8e\xca\xc6\x94k\x0c\xd2\x06A\xe1\xad\xffn\
\x08\x9f\x8b8\xc1\xf0\xa5j\xccA\xbe;\xc4P\x88v\x87\x90*\x9c0\xea"H2\xc9n\x93\
7\xb2>\xc5 \x15\n\x07\xc9\x82R\xdfBd\xdc[\x1c\xb9\xdar\x9f\x9d>\x14\xd7\xe4\
\x84\x9aw\xc4\xae\xbe5\xe5\xf5H\xe9\xce\x1b \x92\x93\x05\xe8\x16(-\x06<\xfeh\
XTYy\x8d\x8e\x00\xcd\xc6\xcf\x7f)\xa5\xdb\xc3e\xa0\xbf\xbf\x9f\xd0\x11{E*\
\xcb\xf7\x89\xbd\xbdo\x0c\xcf\x13q\xb6wV,4!\xef\x82~\x8d\x8c\xf0\xdcB\xe1\r\
\xb5U\x88{\xdd\xdf\xb5\xc5\xe9\x1c\xd8\xcf\xdf/;\x17pp-\xbd\xe7{\x81\x9ftF`\
\xee\x19S\xea\xf5\x9bd\x1cwk;\xb7v\xbc\xae\x7f\xbf\x11\xee\xa4\xd2.!]\xb2\
\xba\xf3m\xda\x11\x0f\x17\x16@\xa2uG\x08\x01-\x8e\x91\xbeB\x98u\x1e\x99u\x9e\
\xd6\x95\xce\xf8\xa0[\x10\'iW\x8a\xa6\xfd\xe1\x01\xe9K\x87\xbc\xecy\x1b\xdd\
\xa6\xd7u\x90\x97uDb\x97\x1e\x9a\xa5\xebmd\x81\x02+\xaf\x07\xc1{\xb1\x02\x8f\
\xcei\x94aaq\xd2\x9dj4\xfb\xdfw\xba\x97\xa95\x07\xa2UZ\x89\xae\x81\xb2\xdf0t\
j\xd7\xb0\xff\x1e\xcdb\x8b\xa6X\xcaG>\x0f\xa8\xecxZg"j\x93\xb3u\xeb\xd2M/\
\x93\x1b\x9dN\xa8~\xbb\xdfO\xb6\x90\xc6\xe5#\xc12\x94j\x91jydl-Nc33\xbck:DO\
\xb1\xb32\xd18~\xc5\x9cS\xeb7\xcb\x107\xf2\xff9\x01r\x98<\xc1\x04\xb8\xa8\
\xc9\xaf@\xea\xd1\xcc\xd6\x90ihq\xef\xa3\xb1\xdf\xce5\xee\x93>)c}\x0b[d\xf5|\
_\xe0tr\x87\xab7_\xa1\x19\xa98\xe5\xbc;1tS\xd7qkk\x84\x8e`/\xd7\x85\x9f\x97\
\xebMn\xe3\xb9\x11\xca>\x9b}\xaa%\x99\x08\xe5\x7f\x1c\xf7\xf5\x157\xb5\xb2\
\xf7\xfe\xe6\xc5\x0e\xa7\xff=\xa7\xc8\xfd\xaai\x972\xa8y\xd4\xa11\xfa\x16\
\x88\x08\x84~\xcb\xd9iM\xcf\xef\x01\x92\x93\xde\x893\xd7\xae\xf8\x1d,U\x95z\
\x99x\x11/\x94<s<\xfc\xa3*\xdf\xf2\xd3\xe5\xe4\xd8\x078\x9e\x04\xedj\xd4\x9e\
$\x1f\xd0,\xf4\xba\x97\xac"d\xec\x94|\xdf\x1f\x13!\xc4\x97YgK\xe2\xaa\xe8.\
\x1fj\xe5\x9d\x9e0a\r\xb9\x0e\x94\x19\xe6x\xe3R\x8b\x92\xd9\x83!\xa2\xabY\
\xddMU\xd5\\\xa6\x88yF\xe5\xf8u9\xfc\xdc\x90\xa9\x1f\x87\x94bY\xb73\xca\xdd\
\xd9Y\xb1?\x91\xaeu\xac\xfe\x19\xabz\x9c,\x82\xd9\x08kZ\xa07Q\xe3\x12\x1a\
\xc7\xc8\xbd\xf2\xef\xb9\x84\xc3.%\xcc\xd7\x94\x86\xcd<\xf6\xb9\x7f\xf7\xae>\
?\xda\xfc-x\xa7\xd9i\x0c\x9a\x9d\xf1\x90\x8a<\x10\xae\xf9\xac\xe8\xaf>k\xf1,\
/F\xb9\xb5\xc2\xe2\x065\x15u\x9d\x1f2+uA\x83\x8b\x8e\xd8\x96iL\xcd\xfdl\xed&\
v\xa22o\xdf\xbf/\xac\xbf9T\x8dr\xf9\xee\xf2|\x9d\xf8\xf7\x98\x86\x17gF\xab\
\xdc\x80d\x82\xe6\xbd{\xf7R5(\x0e\xaeBL[\xa9\xa9\tR\x1a\x10\x9fylg\x06{\xf7\
\x07E\xdd\xc2-y\x9bH*\x1d\xf0Tq\x99L\xfa\xcc\x93D\x1c\x7f\xe7\xd4\xa9;\xcf\
\xf3\x7f\x11]\xb59C\xaa\xd2\xdc\xa59VS\x9c\xe8\x9f/\xb8\xd5\xf6\xf6\xf6\xde\
\xe8\xdd\x0b?c\xfe\x12X\xa9\x9f\xca\xae\x17\xb7\xbcq*\x1c\x06\xdbp\x18\x8dFs\
\x9fQo\x88Q\xd9\xa7"\x7f*\r\xba\xc0\xefD\xa6\xac\x03\x19t7\x13\\\xf7\x85\x98\
\xed\xc4\x9b\xd3\xc1\x9e1\x04:\x8a\xd6\\\xd5X\xce\xeb^\x9b\xab\xd3\xad\x94\
\x0c\x9cc\x8d~\\\xa6\xe6\x06,\xf5\x0e\xda\x17o\x0c\x88\xff\xba\x91\xc3\xde\
\x11M\x9b\xdc\xfd\xb0N\x06H;\x80\xcb\xac\xc9Z=O7\xb1\xeb\x85\x86\xc0\x8d\x0f\
\xd0\xaf\xe4|Gmq3M\xddDk\x93\x9bT\xb6\x9c\x9f\xff\xaf\x82k\x0c\x8b\xb4\x0b\
\xc3\xb5\xd9\x9a\xad6o\x9cll\xd6d\xdb66\xdb\xb6m\xdb\xb6]\xbb\xd9\x9b\xb1\
\xd5f\xbb\xbe\xf9\xce\x8f\xb9\xe6\xd7{\x9d\xeb9\xf7sc\xe6=\x8f\x10(>8\xc8m\
\xc1\x82\xcd\xff\x16\x93\xc2L\x13\xdb\x19d\x01%\xc1\xf6\x84\xad\xa0\x96 \x11\
|#\x93\xcfa\xbf$\x81\x03\x05NX+\xa7s\xbc`\xeaz\xf0\xe4\xbbQb\x89,4a\xf9\x86\
\x8c764\x0c\x17\x04\xf9\x8b:\x16l\xae\xda\r\x16<x\xda~\xb6I;r\x92&\x1c#\xe9?\
}\xd1\x97\xa5\x8e\xf4\xfc|\x7f\x9c\xc0\xbe:\x9e#KU\xbf\xe0\xce\x90\x91\xb0\
\x16\n\x11\x0eX9J\x1cqpv\x16\xd5S\x91\xb2\x81z\xc0\xfb\xf9a\xc5\x1cj\xa8\xe7\
s\xaf\xfa\x91\xf8r\\\x14\x97\x1f.V\xe4`n\x1e\x96^{zjm@\xc9&\xbb?r\xf6\xf4j\
\xcf\xe5}\x94\xe3F\x12\xf2?\n\xf7C\xfa8A\xe1\xefX\x8c\x94\xa5\x02\x9bM\xa8NB\
\x07p\xa5\x8c\x98|\xfe4@\xbd\xb3\x8dsW!X\xf1-\x8cW\x8a_,\x98w\xf1X/\x07"\x9f\
;Q\xa4\x9f\x81\xe7x\t\xb0\x9cv\xa7\xa2\x94\xd3F\r$\xf2bb E\xc0\xd6\x81\xc8o\
\xeb\x99\xabi\xa8\xd84=~~~X\xc8\xc0R\xcb\xbc\x19\xf1\xc4\x12Q\xd9]\xea\x9d\
\x17\x7f\xb6\x89R\xfc\xa8\x11x\xa7\x1d\xb0\x8b\x00{\xc7bMpp\x05\x13R\x87\xfc\
\xe3\xe1\xf7"Rb\x1d\x06\x9cO\xfa\xef\xef\xbe\x9f\x13IT\x1a\x86\xe0S\x11\x86#\
\xf6\xbe\x82|@\xed\xfe\xccz#\xb4S\xce\xf9m\xd6\x9f\xfd|\xf3\x9a\x0c\xaf\
\x99\xda\xd3\xdbK\xc9\x05\xa3\x86\xf8{\xaa\xaf(3\xf4J\xe1\xe6\x997q\xdf\xfey\
\xfc]!.b\x98\x91T\x01}\x13\xe6\x0bd>A\xb2\x85\x13\x01\xd8\x1b\xf3\xe4\xa0\
\xe2\x96\x819\x8aM\x04\xbd\x01\xe1<r\x9fk~;\xa8\xb0|\xcd\x06;B\x14>&\x92\x80\
\xd8\xe7\x0e\xc3\xd0c\x7f\x13"\xf8\x1e\xca\x944\x9fd\xcc\x81\xb1\xef\x8de\
\x8c\x07\x18\xf9\xf2\xb4\x1f718\xe257\xd1\xb7\xc3c@\xaeT\x01\xf0\xa1\x1c\xf7\
CCCC\n\x11\x06\x13\r=$\x9d \xaa\x11\x8d\x9e\xc7\xe1\xd7^5^\xdf7\\wN\xcf8\xf4\
\xa2\xf9\x13g"\x01_d\xc9\xcf;\xf9O6\xcd\x8a\xa6\xb5\xb3\xffo\x92\xf4~\xfc\
\xcd\xb2Kd\x02\x84\xc5{\x7f!\x15\x84\x15G\xc4=\xf6@\r\x87\xb0$\x18\xfb\xbe\
\x1a\xcc3y\xa3\')$\x86p\x84]W\x17\x9c\xf9ee\xf4O<u\xa4p\x98P\xf2+\x0br\x11\
\x18\xb9R\x14\xf5\xf8\xbev\xe6\x8d\xc4\xb81b6\xfdl\xb2}o7S\xdb\x1f\x0fm\x13\
\n\x86\xff\xff\xd5\xa5\xa6\xae~mM\x8e\t\x89\x1e\xa7\xc7\x01\x00\x8d\xec_K\
\x90\x07\xb6\xb2\x10\xc5\x8d\x995}\x8d\x02\xb1\xe0X\xa4\xed\xdc>\xe3\x86\n!\
\xd9$s\xef\xbc4h\x9c\xa5\xa8\xeb)V-\t\x17\\-\xd7h\xcb\xe7\xf2~\xdf\x01\xcb\
\xa3\'8\xd2\x1d{\x18e\x1c3\xb03\xda\x8e9\x8a\xe3;\xb0$Y\x8c]M\xd2\xd6)\x00\
\xee\xc1\x9f\xbc^\x0f\xd1\xf0\xafEq\xfc\x9d>\xaf\x1c\xab"\xbe}\xc4=k\xc0\xc8\
H\xb1"\x81{e\x01S\x96\x0c\x05\xd3\x99mx\xfe\x04*\xcc2\xf2\xfa |I5\x82\xa9Fh\
\xd9\\\xb9\xb2+\xa8\x00\x1c\xb2\x1d\xb2\xfdR b\xcc\xd98-\x8a\xc5\x96W\x87\
\xc4\x1c\xb0\x93b\xd0x\xd9\xaa\xc5\xb3\x87(\x86\xdf@x\x97\xf3\xa6\xce\xc3\
\xc3C\x129|h\xd7\x9f\x99\x1c)\xbc\x10x wM\xbb\x9em,\xdc\x80we\xc4v\xe3<\x8c\
\x97\x04\x12\xdc\x9a\x13R\\\xdc\xecr{V\xcf\xfdm\xe6\xae\xbb\xec\xcc\xadJ\x93\
\xa2\x01\x04\x80\n\xde\xa27B\x08\x80\x7f\x88\xc1M\xbd\x8b\x0e\x8a)\xfeC\x87\
\nv\x8a\x81\xf9@D;.\x8a\xd2\xae\x95\xd3\x002\xd6\xac\xdf\xbdO#\x8c\x89\xdf\
\x850\xae\xef\xd5\xf5}\xee\x95s\x11\x9fE\x0b)HYWU\x8d\xfc \xb2\xa7\xcdX\xe3A\
\x86\xc6\xbbqW\x9ch\x06\x1b\xf1\xdf\x8e\xf5\x91\x87$J\x94\xda7\x15\x8c\xba}\
\x01\xa5\xb0\xc3\x9aW\xa6`kg\x18\xf5\xd4\xce\n\xc0\x03qlt\x87,?<p\xfc\x1e\
\x9a\xad\xf8\x01/\xf7\x1bj\x03,\xb9??\xd4\xb4\x90\x91\x19o\x07&\xc0\xf5\xd1C\
c\xfe\xee-\xb56?\x8f\xe9O\xc5\x82\x7f@_O7\xb5\xd5\xe5\x86\xd6\x8d\x1e"<\x90F\
\x80\xf1c\xed\x81)l\xb8\xad\xd8\x9d"=h`\xe7\xea\n\xbd\xcd\xf70E]bv\xc2ac\x8c\
\xd8\x0f9\x0e\x8c\xa3@\x8d(\xda\xbc\xecl\x83`\xca$K[\xdbsp\xcep\x88W\x9dh\
\xa4\xdb2\x90\xda\x13(\xa6\x9b\rH\xd4\x90\x8b\xc6\x1419\xbaCJ55\x14\xbc\xd2\
\x05kk:\x82I\xf8\x9c\x05t4\xfe\x8d\xc8ow\xfe<*\x05\x1a\x84\x19\x07\x0c\xab\
\xdc\x971u\x7f\xd65\x1b\x8efX\x8aW\xec\xa9\xc6`^4k\xea\x04\x89\xa7H\xf8N\x80\
\x1c:<t\xbe\xe3\xb5\x9b\xc3b\xc5~\xef\xf8j\xc2g\xac\x18Y\xb7m\xc6>W\x8f\x1e\
\x9a\x11/\x89\x88=\xffU\xc0Y <\xdf\xf5\xa4\xec\r@\xd4W\xc3\xc4bg\xca!{\xc0@0\
Z\xe2\xb7\xa2i7N\xb6!n\xb7Ej\x0eUI\xc4\xad\x0cw\xaa\x02\'\xaf\xd2\r\xe4\xb6\
\xd0\x0f*>\x81\x03\xc7g\x13\x16!8vR\x81T\xd3\xef\x8a\xa4\xbe\x10\xb9\xb3\xce\
6\xe5\xe0F\x9a\xe3\x96]\xd8\xbfnW\xabb\xcf\x92m\x9e\xac\xff\xff\xa5d"F\xa2#\
\xf5\xde\xdf\xee|\x94\x13\xa5\xf3V-\x12Q`&{B\xf6~\xda+Q\x8a\x00$\x91jJ\\w\
\x13\xc5\xee_e\x92\xf3\x92\xbd\xe0\xd9\xf3*0aG%\xfc\xbd(P\x80\x8f\xe5\x9c\
\xa3W\xa8\xbc\xb6\x06\x02H\xe7\x03\x0f\xc9\x03\xb3\xa7\xf5\x05c\xe1\xc8=p\
\x8d\x14\xd3\x9f\xf1\x89\xbb6\x19\xf4\xd0\x87\x1b\xcf\xa4\x19{D\xd9r\x0b\xcb\
\xd1\x04\xde\xd0\xa7\xa6\xcf]\xa1$\xf6\xdd\r\x91y\xc4\x0b\xec"\xe3\xb3\xc5\
\xb1\xd9\xb3-\t\x9e\xf0\x08\xe0\x8c\xb4\x1c\xb2\x1c;d\xfau$\xf0\xd2\x89Ja\
\xb1\xbf\x8a\xb9s\xe7\xec\x11\xe6_\x19\xcd\x9a\xb6h\xd5\xec\xb9\xe4\xad\xfe\
\x90P\x96)\x8c\xba\xcb\xae\xd4\xaaZ\xca\x15\xf0\xfbx{\xbe\x15%u\xf0\xf19w\
\xec\x80g\x12]\xc4\x08\xe7/\x1d:w\xbd\xa5\xe7\x1ds8t\xcc\xb8\xa3\x02\xf7\x9a\
\x89k{G\x9aP\xbb5b\x08):\xfcU\xdbW\x95f\x03\xcc\xab\xc2\x9f\x1f~\xcf\x1cg\
\x9f\xff.\xa4\x8bN+k\xd9\xfa\x9c.|.%\xe5\x13)\xaa\xe7Zm$\x89\xfa\xfdQZ!\x8d\
\x91\xd3\x07\x02\xa0\x8fq\xe1*\xcf\x91\xa3\x96$)\x1c\x90\x9d)\xa2\x82U\xd4\
\xb9\x01\x17\x95-\xdcS\x15f\x12\x95ILw\x95\xc1\x15X\x89F\xf0\xc6\xf9_\xd2I\r\
\x98\x98\x87\xa1\xa18\x88L&ZM4Sm\xac=\x87z\x1e\xb3?\xae{\xba\\\xee\xe8\x90\
\xc8\xb2\x9c\xeb\xb6\xa2Ls\xb7\xba\x1ee^\xcf\xae\xcfz\xd0\xc8\xd4\x81B*\xd9o\
g\x89\xf9\xd9\xf1v\xeb\xcd\x13QQj\xcc\xe8\x89\xb3\x0b\x0bC,\xdc\xb9G\x18]\
\x85\x97\xb3\x86&\xe2\xaa\xe6\xc1\x1f&jlX\xa6\xaf\xb8\xc6\x93\x93\x93\x07\
\xff\x00k\xb9\xcf\xdez\x98\x98\x12(\xdd\x08\xa1{I\x8a\xf2\x8eO\xd4j\xf9\x00h\
\x13\r\xe24\xc1U\x8co\x8a;\x1b\x952#|\x91\x85\x01\x01\x01\xba]\xae\xb6\x1b\
\xed\x0e\xfb\xa31\xc2E\x8d@G\xf3\x90\x96\xe5\x9aJ+#\xbbb\x0bl-\xeb\xd1\xe4v\
\xac#!\xde4\x99ElEJ\xea\x9e\xa7\x8b\xe8o\xac:/\xa9\xb6\xa2<\xe9{\xfac\xd7<\
\xc3\xc0\xb1\t4\x1a\xfe\xa8x\xab4\x1c\x1d\xfb\x80\x98\xdf\xb8\xd6\xf45H\xd6\
\xe7\x1b\x1dNTr\xd9\xc0\xc5\xfb\x87?v\xd2\x07\\}\xdf\x06$6)\x9fL\xad\xad\xab\
\xf0\x8d\x15\xb5\xb5\xad\xc5R\xa1)%\x08\xc9\x87\xa9\xfeR\xf2\xe7\x1cy\xb7\
\xdf\xf1\xf1\xf1\xed1\x9aV>\xbd^\xf6\xcd\xc8\xf0\x9c\x08\xab@A\xbe\xc4R\xfc\
\t\xfdl\xac\x0e#\x89\xdby\xfd\x08\x80\x82\x8b\xde\xf756\xe8\x0ew\xb2\xa6\x8f\
#\x03\xa6\x98\xffJ/\x10\x0f\x85iik\x8b\x1a\xe8\xff\x03#\xa3\xc3<\xa35\xa5\
\x02\xf3A#\xae\xbb!\xe2\xac\xaf\xa2\xa6\xa6\xa6\xaa\xca<\x19#\xce\xc6\xebGA+\
U\xb6\x10\x1dn\xaf\x0b\xa9d\x1d\x13\xb2\x1c|\xda\xdc\xd2\xc2<\xb2t\xcb\xab\
\xee7}\xe8\x18-B\x0e,\t=\x17\x8e\xe6\x85\xdb\xc9d\x89\x12\xf7\xd6\x8a\xaeN\
\x8c!;\xbf\xd8\xecJ\x0bp\xd9X\xe45G\xd8\x7f\xb1%\xe7\xd7\xc2\xe0\xd52\xe6\
\x1d\xfb\xfb;K\xf3\x1d*\x96Qd\xc4\x05\x08\xda\x80\x15\x12\x1c\x0c)2T\xe6\xc4\
2\x85sX\xc4\xba\x8d4\xa5\xc7\xc3\xc3s\xbd\xd88\x91\x8b\\\xe3\x18&\xa8#L\xc5d\
: \xb5\xb5\xb5u.\x95\x08\xef\x8d\x90\x86o\xd4c\x04\xd1\xd9-\xec\x9f\xc4\xc4\
\x14U\xaa\xd5\xeaT\xec?\x8d\x91Y\x0e\x8dRT\xe6z\xfc\xfd\xa6ye\xfb\xa7U\xd7}7\
\xa4\xfb\xed\xad\xbai\xda\xf3\xa4$M\x14e\xa3\xdep\xe8\xe6p*E\xe9(\xdbi\x1a\
\x89\xb1[\xd0<\xaa\xe5\xe17\xe7v\xf1cw\xbd\xf2fn4V\xbb\x81\xd1Lx1\x8fS\x89\
\xfb~\xe8\xa9\xf5\x8eB{\x96(J\x81\xd2P\x12\xd9_\x86\xc7\xbb\xbbY\x9c\xf5\x8d\
\r\x1de\xe5&\xeb\x95\xda\xff\x07\xa0m\xd9V|\x8e\xf8\xd0\x1f\x8b\x124M\x8a +\
\x99\xe3\x86\xd2\x81\xa8=!\x03\xc4\x9ds\x8f\xae\x85Y\xa7S\xe1\x9a.\x08\x9a\
\xd6\xaa\tf\xf1\x8dKe\xb3\xcc\x08\xf7\x07p\x87tD. \xf6\xe6G Q\x8c\x948f\xf0j\
Z\x9e\xbdsa$\xc9\x95\x86\x97d\x93\x95\xdbc\xe1\x06\x13\xb9\x07\x87\x18;\x96\
\x13S\x13e>s\xdc\xb6\x0c3Q/\x95\x126gWV\xc4C\xf5w\xfe]\xdc\xe9t)\x94\x0eW\
\xad\x1d\xc0\x94\xfe\xd5\x12\'4\xf7\xeb^\x94L\r\xea\xa6H\x9a\xb8\xbb\xbfo\
\xd0\x99\xef\xef\x17\xe4\xc8:\xfca\xe5\xe6\xf6\x08.\xf07y\xfawl\xc9\x8fB]\n\
\x9a\xa0d\x06\xca\x0f\x1d\xd1\xceV\xb811jR\xb3\x8c\x98}\x1a\xf4[ZX\xa3\xae\
\xeap\x9c\xc4&\xebe\xd1\x102v\xa0&\x97\xd6G\xe6\xd4\xa1{\x05=\xc4\x8f\x94%\
\x87\xb3\x00&\xbf]\xfb\xdc#\xc9\x1e\x0c~jZ\x99w\xc1Vith:t\xa3\x1a\x17B\xafe-\
1\x0c\xfb\x83!\xe0A\x80\x08\xc6\x07\xc4\xff@H\x86t\xb0\xefZ\x95\x01\xf1$\x0f\
\xc4Sr\xa4z\xac\x8a\xd2\xf2\xa7ME\xfc\xb1\x19\x8b%\xa4\xc3\x90\xce\t\x01\x98\
\x11\xba\xf8\xeb9\x92\x86N\xd6\xc0\xb3z\xb2\xbd=\xa5\xb2N~\xc7\xf1\xcb=\xea0\
U\xcc \xa0\xb1\x82\x11hWw\x9b\xfa-~\xf3{g\x87D\xfe\xb5\xfd=C\xf6;Oz\x9a]y;,R\
\x84\x9e+rRM\x89\xdb\x16\xe3\xd6\xd6\xd6\xf4\xbf\xb6h\xf3\xe8\xc2p\xcd\xe7/\
\xddj\xe1y8\x0c:\xe5\x04\xca\xaa{G\xa52\x19\xa40Z\x11\xbaB\x1bL\x88\'~M\xe2\
\x82\x00P\x16\x87-\xa6\x12\xc4\xec\xc6\xc6/2\x98S\xdc\xa7\xd1\xf4\xa104\xe9\
\x122|b4\x07Y\x90\xe4\x0f\xef\xa7\xff\xed\xc1\xc8%Ydaw\xc9~4\xcd\xcb\x91\xce\
9o\xe4-a\x88\xf9\xa0\x03(\xb2\x97\xaa+&X\xdaG\xa3\xbe<\xd9\xb1\x0e\xad\xb2\
\xbc\xdc\xa0=hg\x02w\x0831\x9b?J\xb8+\xa9ps\x90\xc1=\x91\x81|D\xb8(\xbc\xa8\
\x9b\xd8\xe3 A\x1cl\xb4\x1a\xc6\x88\xbc\xc0\xdf\x88\xfc\xde\x1c4\xc5JO&\'\
\xf1\x8d[\x7fnN.\xed|\x9d\t\xac|\x7f}\xbc\\k\n\xf3k\x0c\xad\x9eE\x8e5)\x86\
\xf8\xfb+\x83\xec\x8eA>\xe9g1&\xcb\xe7aH\x05is\r\x19\xcd\x01\x07\x8ef\x91\
\x89\xf7nNK*\x07\x94Q\xafl$F\x0cD\x03OMcccy2\xdf{\x12\xdd:H\xea~\x0f\x0f\x04\
j\x06\x16\x91^\x9c(\xa1\x7f\x86+EZ\x8a\x17\xf2I\xf6(k\xc2U\xb6\x1c\xa4*\xd9+\
\x98\r\x939\xf0\xf7\xd3)\xae]@\xe0\xeeZ\xd4\x98>xpd\x11\xe2\xe2\xe6G\x05-Y\
\xadf\xae\x927\xe7ly]\xf29^\x8a\x92~\xcf\xc5\xc3lV\xd94\xa0V\xc1\t\x19\x89\
\x06\x08*+\'\x9d\xc5\x92\xc6N\xde\x9ab\xf1X\xbb\x7f\xf5J8\r\xc7\xb7\x02\x13\
\xdb\xe6\xe6f-\xa4\x82\xa2\xe2.\xde \x0b\xbc\xfaMG\xa94M\x8aQ<\xa1\xaa\x13\
\xc2@&\x8f\xdb\x85\xa6yN\x8b\xf3\xc9\xbc\xd3\x0c_[GG\x069\x83k\xc8\xab\x10,T\
\xd8i$\n\x89/\x14\xbe~\xe1\xa9\x94T\t@:\xc2\x91\xf7\xfe\xfe^\xcf\xdb\x8f-\
\xaf@n\x8a\x01\x0f\x1f\xbf\x87k*\xac\xae\xb9\xad\r\t\x86\x93\x83Ci\xcb\xa9\
\xb1\xa7\xa7\x07\xccc\xde\x8d%%!\x93\x1c\x03=\xa8\x01\x1f\xf2\xf8U\xc2\x81\
\xbaj\xd2\xce\x08\x11\ng\x06E\xd9XZ\xc9\t.\xae\xae\x17\xeb-\xd2\xc3\x11\xcc\
\xa1.-\x07\xf8\x9a\x81L\xc8\x9c6\xab$\x19\'6\xd9T=\xdc\xbd\xfbz6d\x18\xaf\
\x85)L9\x91\x1e\x0e\xbczfY\xdd}\x91\xc0\x8a\xaa\xaa,\x01?~\xe5\xf5\x12\xa1@\
\xa5\xc8\xb0\x04\x16\xb2\x02\x0e\xa1\xfa\xa9N\xc8z\xef\xe6r^\x88bG\x80\xff\
\xe4\xa4\x14\xdeI\xa3\xd4\xa2\xe5\xb3\xffb\xe7\xd7f\xe6\x9bp\xdd\x85\xdd\xbd\
\xc9\xcc\xe5vh\xfa\xfdP\xf2\xa5y-Z)Y\'\xa7\xa6\xe2\x03Mk\x8fw\xca<C\xcaz\xe9\
p2\x85\xf1\xdc\xcc\xccLJ\xf9\\\x1b\xc2\xb2\x04o\xf7\xfd\x18N\xdc\xb6B\xa3\
\x9c\xd7A$\xc6;13L\x87\x98\xf2L\xe9Q\xc6\xb0D\x19JM\xa6o\r\xbcN\xcf\xb9\x91\
\xefx&\xdd\xeb\x13\xd2F\x96\x0b\xdfX\xcd\xe07\xeb\x85\x1cm\x10((Z@\x8a\xc9re\
\x13\x85\x18e\xfd\x03\xd7-\x9d|\xa4\x0f\xf0\x93\xe6\xbb\xcd\xcdN\xf0XTr\xcey\
m\r\x8f\xe9S4ZU\xf5I\xd3\xd3r\x0c$\xfbA\xc1\x14\xb8\xc6\xc7\x00\n\xd5\xebi\
\x8ck\xb5I\x84\x12\xe2\xed\x14\x0b\xa3\xbax\x95r\x1bKK\xc3\x1c\x16.\x05\x82\
\x81\xd2dG\x1f\xc8\xc1\xbb\x13=\xcd>_\x1f\x80\xfa\xe9_T\xddf\x0b\xc5\x84\xde\
\xb2\x16J|\x13P\x9e0\'3-xUUV\xee}O.O\xf0N@\xa0H\xd4\x95y\xa7H\x9bjP\x88\x8f\
\x94\x17n\xb7\xf5\xa8\xf8\x0eS\xc66\x02\xe7\x8e\x15k\xa3Z\xd1\x12\xa8\xf1\
\x98@\x1a\xc0\xc1\x1f\x90@m\xdf\x1c\xcd\x96\x96b\x88\xc1,T\xdf\xa2\xc6U\xa3\
\x14YZ\xe6\x88lj\x18!UQ\xa1\\6\xc8\xbc\xaa\x81\x1c\xbf]\xaf\x90\x12}\xbb3\
\x10T-\x87\x03\xc2`\r\x0b\xd1\xae\xe7h\x899m6 \x8ff\x1a\xe4\x9axy\xf1$j\xed\
\x08?Xoii\xb9\xae\x16$\xfa\xf9\xf3\xa7\x19$p\xe2_\xe9T\x11\x18\xed\x05\x05\
\x01\xdf\x0f\x07gm\x08\x00\xe48<\n\xb9\x8aC\xc5\x9c\xaa?\xfe\xbf5\x94\xcbn\
\x8d\xdd\x11\xd8\x15\x08f\xcf\x8a\xf8D\xa91\xacZ\x12x\xb0d\xbb\x8f\xff\x12Rr\
\xc2\xf0\xa5\xf5\xe9P.\xc2<\xfd)Z\xc3n,\xf5\xe4\xbf\x11K\xfe\'a\x92\xf0HU[\
\x9b\xf6\xf2\x06_\xaf\'xU\x9a\xca\xe7Z\xd0\xf8\xf1b\xf3,PS\xab6\x92\x84SD6\
\x87;\xe5\x90\xa4\xe3g\x86L(\x9bU\xf4\x8f\xdb_\xac\xa0m\xba\xe81\n\xa15F\xb0\
\\\xc4\xb6W\x86\xe4\x01K\x18\xba\xceX\x14=LMH\x91\xbb\xdbY\xa0i\xd1\x94w\xcd\
$o\n3\x9bt\xda\xc1\xb8\xc1\xd6\xa1z\xac\x05\xaf\xf2r #\xa3y\x00\xbd\x18\xc2-\
\xc4\x1a7\x07KY\xe6\x84I\xb8\xb0\x12\xbd$U\xde\xccm\xbd\xde\x93\xad\x9f_\x1f\
\x13D\xa4L&\xed\xbc\r\x80\xea\xdb\xf7VO\x8c\xa3k\xf8\x8cI\xab\xc9\xad\x14A\
\x9012\x14\xd3&\\\xf1b\x11\xb3\xd9d2\xb50\xc4\x01\xb1\x8c"l\xbc\x04\xb9\x8e2\
ZC\xad\xb4U\x16\xbbu\xea\x14\xd3a)C\xa8wl\xac\xf0\xb7\x00%\x9a\x14)]\xc7\t\n\
\x06FU)]\xdf\xe7\xc3$JY\xcc\x12@\xc3n\xa3\xe1\x10l\xef\x14\xc4\xd0\xfc\xbc\
\xd2\x0b\x1a\xdf\xcc\x9f.\xb7K0\xd3\xb9\x9d\xaf\xdf\xbf\x9c\xd6\xb99;\xa7\
\x935.V\xb7L\x98$N\xa09\xa3\xb3\xb2\xeb\x0c\x17\xb8\x10\xb0\xbd9\x1dM\xe7\
\x80\xdb\\X\xbe\xf0\x92i\'A\x9a\x12xrq\x81\xbdN\x8a\xd8\xb2\x8cy\xf8kL\xa2t\
\x1d\x92\x9de%{Z8m\x8d\xa5-4\x8a\xec\xc4\xa3Q\r\x19i/\x8f\x9d\x9d\x1d\xf9\
\x07\xfaH\xe0\xa1\x9b\xe4\xd7E\xb7\xe0\xca\x87\xf3?Tm4\xcc\xc2\xaa\x7f\x1b\
\x8d\xc7P\xa5\xdcj\xee+\xcar\xff\x15V\xaf\xe2\xd3\xce\xae\xaer\xd9m\xf1:P\
\xea\x825\x19\xb4K3L\\\xd4\xc5%\xe2y\x0c\xc6\x9f\xc6L\x063HWW\xf7\x97\x81\
\xe7\xe3\xa6\xdb\xff\xcd\xccoj)\x85id\x1a\xa0\xf3\x15\xb5AK\xfa\xe3\x19\xccX\
\xe0\xb6\xff\xb7\xba-q:\x8b;\xa4\\\xd9\xd9\xc9]\xeaA\xc2Q\xf9T\xbf\x13\xa7\
\x15\xb7\x88\xb2\x93\xe4\xb8\x8a n&.\xf7\x96\xa2\xd2\x96\x9a3|\x87\xa6\xc9F\
\x13\xff\xf6k\xcc\xd3\xcb\x0b4f\x86\x1c\x06\x0c\x05?\xffln\x98HK\x13E2\x18\
\xaa\x9f\xb6\xca$[DD\x84W\x9cu\x94\xe0k\xc8\xafI@hm\xb2V\xb3\x05FW\xdfqa\x14\
y5\xc6\xb5\xc7,\xd81m\xf9<n\xdap\xd0z".$\x83M\t\r--\x1a\x11o\xbb\xed\x1f\x8e\
\xdc\xc3\xc9\xe3o\xe7\x9d\x86C\xa1=^\x8f\x91Q\xe3\\\x05m\x8d\t\xba\x93\xfb2\
\xdc\xdc\xdc\x07\x7f)\x7f\xb8\x14\x85\xf4k\x90Y\xd3\x7f\xfd\x81_\xb5@\'{P\
\xc9;\\HkV\xdd\x92\xe8NI\x92Z\x12S\x0538\xa7\x8e!\xad\xa9\xc9\x1d\x89\xe9\
\xb1\x85+\xe2\x13\xa6a\xd2\xc3\xe3\xc1\x87,\x82\x10\xb6\x03\x83\xc6P\xa9\xa5\
\xad}\xf2S\xa6l\x01|\xa6\x8e\x08\xd1\xdf\xe1\x96\xd7\xc1m\xd5\x99b\x8cD*D\
\xcd\xce\xc6v7\xcds\xc9\xc4\x17\x1b\xf7\xd2\xeb\x07\x0eU`2\x8c\xc2.\x89\xb4\
\x86\xa7.A\x05o\xcft*\x95\tYZZ:\xe3\\OM-U\xae\xec\xf4\xdf?\xdc\xd8\xcd\xf5\
\xf5Ir\r\xce\xe0\xc1Q\x17\xbd\'OYYY[[\xfa\xa1\xceM\n\xf49a\xf6\xf6\xb6\xb6\
\xb6\xaa*\xda\xb4)\x87\xd6Z)\xe1g*\xc9\x9ch\xaa\xa4RQ\x07{\x14\xf1\x9a\x96\
\x8de\x83\xfd!\xcf\x95z\x0c\x07 \xa6\xe8(\xd9\x93\xb2\x94\xa2\xc6\x8d\xbc\
\xba\xc0\x8e\xf1\x1d\xcd\xed\xb8\x0c\xaa\x8d)\x9c\xb3\xab\xab\\&[\x04\xad\
\x8e\xb6\xf6\x14\x9b\xe8E\xd6\xc3\x83+\xbe\xf4\xf1\xc9BY\xd2\xcd\xea>\x18\
\x10i\x0c\xba\x93B\x89ba\x8a@\xba\xc0\x94C\x9d4%SS\x8a\x16eB\xfd~$\x7f_\xb9Q\
\xbf\x1b%U"\xf4\xe5\xa5\xa5\xb9\xb4\x01|\x92os\xb4U\xf9\xd9\x06\xd2%*\x00\
\x90\xd7\xc3\x99\xa1X\xf1\x81\xa5\x02tb\xc9\x95\x92\xd7\xd7#\xe4&\xd2\xa0\
\x9d8\xf1\xa0\xc5\x0f\xa7\x0e\x1d\x10\x08F\x12\x1dU$I\x87\'\xb1\xba\xdd\xd2b\
\xeaO;\x17\xaaW\x80\xd3\xcb\xb8\x1e4m\xa3\x83\xac\x8e\x92\xb85\xfd\x1c\x92\
\x145\xec\xe4\xe4_e\xa9\x88\xa2\r-me\xe5"\xb1\xdc\xdf\x92\xee\xee\x00\x10X\
\x89\x90\x9d)\xebKi\xe32\xb323\xff\xffI\xc9\xa3\n\\\x86\xba:yaC\xf0F\xcf\xb2\
\x82\x9e\x16\xf2>D?\xc9\xe1l\x8d\x97D>\xc8?\xd2\xf5\x07&f\x89\x0cz\xae\x12\
\x13\x13\xc1\x9dN\xc8\xe3z\x95kS\xb0\td0\xf9\x87\xf8u\xadI\x08]\xa1\xb9\xbd\
\xcf\xf1\xbe\x83AUUUQ\x877\xe1\x89\x8c\x10\x19\xe5\xff\xe5\x91+\x0e\xd7G\x00\
\xb0\xae\xf1\x18\xb1!\xad-\xa0Z\xa6\x8a\x18\xe5\n\xe8\xe0\xd5\x1f\x96\xb8\
\xcd\xdb\x92H\x98\xbc\xb0\xe0\xa3j\x8c\xea\xc3N\x99?h\x8d\xef\x88\x82\x92\
\xff8\xc4\xa0#\xa6\xcc\xfa\xfa`\x1cm\xa8\xc9U\xa2"-\x8dT\xd2x6\xd5\xda\xca!\
\xc9\x94\xfa\xfd\xab\xbe1\x0e\xf4@(\n\xbe\x0e\xbe`\xbdu\xf4\xc9\xe9\xe9\x9fV\
[\xfc\xd3;\x05!\xf3n>\xbb\x96\x9a\xca\xca\x7f\x97\x97\xdb-\xd6\xea\xe2\xe2p \
\x03\x11\x11(\xa4T-\xeb\x93\x19\xd3b\x9c\x80\x0b\x8a\x1fc\xe8\t\xd4/\xf8\xb6\
\n\xb9p\x0e\xafI\x89\\r\xa1\xd2\xd4%\xd8\x8cz\xbf $\xd7Nd\x14\xbeN\x9f\xec\
\xc5\x91\x03\x0b\xdd\x0c\x0cH\xe0\xbf(\x96\x0e?\xc4#\xb2\'\xeb5Vu\xb7\xd3\t\
\x1e@\xc7b\xab&\xe7\x17\xb35\x18\xc7a\xe06BAHt\xd2BT\x9c\x90\x05K\x1c\xa1j\
\xc6\x1e\xec;\\\xdd\x15\xe0\xcc\xf5\x87\xf7#\xf5\xae\xad\xac<.k\xdb\xd1\xeb\
\xb4\xd7\x1f\xef\x94\xd6\xd5\xd9\xbd>\x9c\x83\xd9\xf5\x81k%\xd9\xe3\x97\x8aa\
!p\xbd\xcdN\x1f\xbc\xbfK\xd2\xaa\xba:\xfdz\x03H\xff\x87\x87\x074\xfe\xa7~\
\xd5]\x1d\xdb\x91\x14\xbeFA\x02\x07\xb0\xc3\xd0\xd5\xf3\x95\xf7\r\xe7r\xf3O!\
\xc7f\xd0\x91\xa8L\\\x06>\x8f\xec8\xe96\x1a\xe26\xee\xc1u-h\xb1\x82T>\x00\
\x03\x92\x83,\x0e\xecw\xa4\xa8\xbb,b%\x00\x98&oy\xc3\xd4\xdff\x966\n\x01\x98\
\xa3Q\xed\xc7\xc8\xc6\t\xf2\xdc\'\xe4fs\xeb\\\x9a\xca\xb5\x96\xe41R\x11\xc4%\
\x00\xc6\xae\xaf\xbbJ\xab\x1a\x0c\x1f\xc1\x85<<<\x1e\x96]6\xb6\xf3\xc4\x8cT8\
\x96\x03\xb8\xb6\xcb\xa8hC\xf6\x9f\x1f\xef`:l\x9e\xb1\xb2\xb6\xa6\xd8\xe9\
\xec\xe8x\x06\xc3\x81-\xe9`\x98\x16H\x04\x85Pf\xdf\xc76J\x9e$\xf9\xa3\n\xcbC\
,3?\xdd\xc6\xc6F\xa7\xd1$\xa2\x98\x14\xa9m\xb5\xcfO\xe0\xb4\xd9\xf19\x1c\xb7\
\x11\xb2\x04\xa0le\xd2HWY\x92#"^\x03\xd6.\xec\xea\x85\xe9!\t\x059K\x99\x8b\
\xeeB\xb8h\xe6\x83\xe6\xec(\xed\xe2$\xdd\xae\xb2\xe3\xda\x8a\x9f5\x87?\xd0\
\x04Xe\x94\xdcaO\xdb\xe3@!\x17\x1b\xe0\x0c4\x11\x85U\xd3\x12\x86J\xb8\xdeh\
\x8a4B\'kYkiiYa\xf2\x0f\x15\x19\x99\x80\xcb\xde\xbf\xa2\x03\xac\x9dt\x16\x14\
\xf8\x0b\xb7\xdd\x19\xac\xe6\xd3\xd9?\x92BPK\x00\xb9.\xbc\xda\xc2"4\xb54\xb4\
\xcd\x0c\xc6E\x88\x93<v\xba5\xa2\xdc{jN\x9b\x17\xc3\xdci\xf3\xf4""f\xe3L\\\
\x9a\xb2\x89$\x141\x16\x10AAA\xb5\x9f!\xfa\x93\x9c\x8d\x1b\x1cj<\xb6\x04Y\
\xd4^U\xe3 \xf1\x14a~uuu\xe9^.\x88Q\xcb\xc4@K\xf5\xf6\xe1\x1e\xb7K\xbd\nS\
\x0eDD\xdd\x99(\xa5\xc84\x81+\xa5\x9f{\xf2\xbc\x83w27$\xb3\xb7\xbc[\x0e\xbfh\
[hhh4\x00\xfd\x87\xeb\x81Lf\xcd\xf5\x16\x14\x97\n\t\x87X\x81\xb4\x8c\x8ce\
\x03\x07\xb5H<9L\x86\xc2\xb2\xb2(\xe7nsh%\xd7\xbd\xbd\xfczit\x893\xdc\xa1\
\xd5G\x1e\xc2\xcbo)%r\xc50\x18W\xe4H\x84\xe2\xacM\xfd\xf5U\t\x8a\x81\xe1_\
\xf6\x18\xca%\x14\n\xe3\xfb&]\xcaY\x08GUn\xbe\xae\xfc\xdba\x05;D@^u\x01\xc9<\
\x1e\x04\xd2v{\xb43n\x08\xe5tx<\xbdY\x8d\xcf\xe7\xfb\xa3fe,\xccO\xd5\xb3\xf0\
D\\\xfd\xed\xc7\x8c\xd8\xa9\xce\xad\x13\xe7L\xd0p\x14\x13U\xf3\xa0\x9dFu:\
\xd5Jt\x9cQ-\x90\x91v7#9&\x88)}E\xe2\xb3\x85G\xde\xcd\xcb\xaa\xc6\xcb\xcb\
\xcb\xe3\x80\xb25\'\xaa\x8f\tY\xd3A\xbc\xf7\xce\'on\xad\xd1\x1a\xb4YS]\r\xd8\
\x95j\x8c?\x07&\xcbj\xa7\xe2"\xee\xdfXN\xce\xf1R\x14\x85\xa3\xd1\x8a\xeb\xc3\
\x90`(\xdb\x17\xa6B\xe3T\x9f\x83\xeek:;\xd7c^\x8a-\x08\x08C\xc2\x97\xe4\x0c\
\xe9\x16Ko\x9e\xbd=<~\x14iat\xe5\xe3\xae\xee\xbd\xa4\x15\x01\x8dt\xd9~\xca\'\
h{\x0b\x16\xc3\xacH\x8b\t\x06\t-\xfe\xf4\x18\x05%zs\xc8\x01X\x08\x0ea\x04\
\xb5\xb5\xb5\xc5\xc5\xc3\xa6xu\x1b&\xb1\xa2SS\xb1\xf1\xe1\xb6,kX3d\xd4\xd5q\
\xca\x86 \xa3\x9e\xc7\x91\xe9,\xb3]Ttb"h\xfb\xe7\xc4\x1c\x98\xe5\x08\x8f\xcc\
o\xbd~L-\xc9\xa1`\xaf\xb9\x1ehp\xa3\x9a\xbb4\x18\xd4|\xc5\xa1\xf5\x07kd\x02\
\x8f\x14\xf5_\xfd\xce\xa7\x87M\xce\x11\x7f\xac\n\xb9\xc1D\xd2C\x9ae"\x18,\
\xcaX\x1a\xe9\x12\xa5\xee\x9d`\xb4\xf0\x88\xd4\xa9C=\xbf\xf7{\x13\x96\xb5\
\xf9\xf9Am\xca\rBr\x1b8\xd4\x06l\x8f\x15\x1d\xd1D\xe2\xc8\'E)\x14d\xe4\xffc^\
\xc0\xf8\xd7r\xccb\x11P\xc4\xdf\xe4|\x05j\x8a\x92\x8c;\t\xb0\xddq\x1bz:\xe1\
\xb6\xd7\x04\xb6\xfc\xa8\x1c\x10\xc5\xdf\xcf\xd2\x1d\x8b\xef\xea\xb1\xc9\xae\
\xa7\xea\xfbb@\xc8S\xf9\xdb\xf9\xc9\xb2\x8c\x81\x10D\xf6\'\xed\xdfxc.\x18\
\xcb\x8e\r\xfc\xd5\x04\xe0P=~\xcf\xc5\xed:\x87\xc8\xedB\xd0H\xb2\x8f\x95"\
\xf4\xa0\x95`4\xfa\x03\x19\x15U)BI\x8a\x19Xz\x0c9,\x16\xe5\xc8\xe6\x82\xe3\
\x07\xb2p%\x0e/,\x07\x93\xf3j(\xc4cjL\xc0\x88\x1e3\xa6\xbb\xb87\xc6\xa3\xb2T\
\x82\xa5\xf4\xca\xb3\xb7\xde$\xbbt\x9f\x965\xf6\x9e\x88\rL\xe7\x0b\xfc\x1f\
\x93\xad\x1fF\xd2\xdb\xb6\xc1\xd5\xaa\x91E!\x86\xc0-\xdb\xd6\x0c\xa9\x92\x83\
\x95\x80\x00H+\xe4F\xf8\xeey\xed\x8d\xe2J\x8b\x9b\xf3\xf3\xf38\xfe\x17\x8b9\
\xdd\xd8\x8bq{g\xdc{\xc8\x9a\xfb\xbb\x93\x05\xf5Z\x9d\x89\xab\x90\xc2\xa3\
\xcf\xa71\x01\xfe\xc7\r\xa7\xc2\\B\x81\xe4\xe4\xe4l\xb7-\x0fu\x1d\x87\x91\
\x84\xc8\x8f\xd9\xd9Y\xe38\xbeV\x8f\xdb\xbf\xfd\x1d\x16jLn\x07\xdeq0VVV\xa9\
\x8c\xfaz\xce\xce\x82\xc7\xe7\xe7>O\xbba\xe0\x83\xd4\xad\xdd\x1f\x14\xaf5\
\x9f\x1b\xca=b\xe7\xe4\xac\xd3h\x10>+\x1c<]\x0e\x08#\xf2\x1co1\xd0\xe1z\x98\
\x16\x0b\x99H\xa2\xb2\xdb\xea^;\xa7E\x1e\xc2\xff\xd9>\xa0\xba,O\xa2\xe3u\xde\
\x12\xad\x82\xaeecg\x97\xa6=<\x86R\xc4 \xcc\x92\xd1}Z\xdbu&\xa1\xd1\xf63\x07\
\xfa[\x9e\x06\xb8\x16\xe64U\xfdL\x88m?7iH_\x92\xfc\x9fo\x8f\xc2\xae)\xb2\x96\
\xd7\xf3\xde\x0e\xa8.\xea\xc6:@\xc1\x13\x12\xd6\x0f\xc4\x01\x10\x10\xe8\xcb\
\xff\xbf\x0b\xc0\xd3\xb6\xe9\x9aug\x1a\xedp+\x11\xbb<\xd8\xee%O\x99\xe4\xf8\
\\\xdc\x92\xfa5nts\xbd|\x07\xd5\x0e\xcc\x1e\x89\xa4\xac_B\xfe\x9f\xb8,o#/\
\xfe\xa5\xaa\x19\x1d\x9b^\xcb\xf9m3\x06\x92*\xcf\xbb1>\x7f1\xe8%X\x90\xda\
\x05\x1cq&\x17/z_/\x8c\xfd\x1c\xbcg\xaa\xb5\xdb\xe4:\xf1\x97\x93\xdd\x9b\x00\
\xbb\x9d\xe0=^\r"\xc59\x10\x8c\xa1"?\x1d\xa5\x17\xe7s\xf9\xf3\\\xb4\x07!\xbf\
\x19G\x845\x04P\x92h;I/\xe4z\xdfLpn{;\xcd\r\xed\x9f\xb6\xd5\xd4\x9c\xc3\t<\
\x050Y=\x7f\x9c5\xcd\xfc\x7f\x85\x9by\x9e\x01\xafe\x8c\x04\xfeg\xf7\xb5\x98\
\xaf\xb2\xa2\xa2\xc3\x1f\xdb\xb6\x8a\x04\x86\x84\xa2\x00 \x8f\x7fQ\xf7\xff\
\xf3XS\x18/;\x8e\x88\xbc\xf6\xfa\xa3\xa3i(\x9b\x9b\x9b%\xdc0\x19\xef\x10 \
\x19\xdd\xc9\xe94\x1b\x0f\x8c{\xb4\xd9lp4t\x83\xed\x97T\xd5c\x8c\r\x9c\xdf\
\xb1\xc4d-~`\xe3>\xc8\xae:T\x98\x8c})\xba\xa8\x96On\xca\x11\xc4}\xe4\x8a3\
\xe5\x93\x1f\xcf\x8c\xabE\xcf\xbd\xba\xa7\xb7\xbaJ\xfa\xc3@\x8c@\xdcv\xea\
\x80\xe6\x12\xcd\x9e\xcc!?\xce\x8d6%\xcc\xa9\xe5x\xffoiE\xdf\xef\xfd\xba\rU\
\xd0zs~\xfe\xe6\xff\xe9[`/\xcdx\xd3\xfav(D}\xbfn]\xd7\xd1\x9b\xe8\xd0\x9f@\
\x00\xe3\xaf2I\xf4\xca\xfc-\xf4u\x8dH\xd7\x84\xe8\xd4\x1f\xfe\xffK\xfbTm\x8d\
R\x15-\xd6+\x8ddX\xea\x80\x04\xfdo\x04|\xa1\x8b?\xe4\xecMw\xa9\xa0kQ\x9c\xce\
\xec\x85Mv\x99\x10c\x98\xf5?n\xd2\xae\'\xe2\xe1\t\xd5k\xd7\xf7\n\xaf\x18+\
\xa6\x011\xff\x82\x7f\xfd?0\xaf\x1b\xf6y\x7f\x10Ah\x97IZQj\xd3\xeb\xb2g\xa1L\
\xbe\xef\xb2\xf75\xf2g\xb0\x00\x91\x81\x10\xb9\xb5-e\x128\xe2\xbd7\x9aN\xc9\
\xa9L\xedC\x11\x8a;\xf9\xd1\xd5\xa3\xaa\'\xc3+\xcd\xdc\x96\xcaf\xb5\xda\xac5\
\xb2[/\x03\x08\xff}\x80\xbe\x8f\x9a\x9a\xd8:;\xcb\xc4\x11\xab\xd5h\xc1@ *<\
\x1d2\x12\xa7^~6\xaa\x00C\xb4\xee\x12\xa3\x18\xb9\x13\xf5\xe3\x06\x86bZF=\
\xe8\x90E\xb6\xb4\xcb\xfc#\x1b\x1d\x11T\x1b\x97\xf1\xb0b\x1bBG\x9f\xe5\x05\
\xf7\x1e\x98(\xbfb\xf7\xa8B\x17\x01\xae^>\xf1\xc5r\xf6\x99o\x94rg6\x1c\xa7\
\xe8\xdf\x9c\x0c\xecw\x1e\xfe\xd8}\xb6\xc7\x10L\xc5\xf3c\x135\xdf\n\xf9\x05F\
\x16\xc2\xd0A\xec{\x13\xc1\xba\x0cs\x92\r\x00\xf4\x08\x18\x81U\xaa\x86\x111\
\x8d\xf6\x94\'\xad\xbc\x9e\x8c\x88\xd3\xb7\xd8\xf5O\x1bKSG9\x0e\x94a\xde3|n.\
.\x13\t\x9d\xfc\x99\t\x94\xe7\xd1\x16\xb6t\xb8\r\x04\xc6}\xectHng\xab\xbb\
\x9e\x11b_\xc49r]L<Z\x1bv\x97\xd4\xf5\xc1\xe4\xdc,NX\xea0\xca\xc4\x81\x1c\
\xa7l\xfd\xdbq\xf2\xe6\xcf\xf5\x15\xee\xe9\xb8p\xcd=}\xdb\xc6\x8d:\xed\x07\
\x99\xff\xc7\xc7\x00\xcfe\xb7\xd3\xf9\xba\x1f\x9e\xdb\x94\xf1\xa3K5#\xbaN\
\xae\xbd\x91\x10\xec\xdd\xdb\xedL\xd4\x8ef\xbb\xfd.\x06\x9a\xef\x1d\x03\xd1\
\xf5\x98E\x96r\xf2/\xea$\xca\xacz\x92\x0c\x8d\xad\xa4+\x07B\x1b\x98\xf0h\xda\
\xd8@\x12\xae\xfdh\x0f\x1b\xfb\xc1`\xc7\x17\x03\x81\xd0\xa7H?\xc2\xde:\xc5\
\x82Z\xc3nT\x8e\x8c?\xec+:N\xc5>\x9e\xc2\xfdZHGV0\xb57G\x02\x9f\x1e\xf9\xab\
\xe3|\xber?\xf75\xc2\xaa\xeaz\xb5b\xaa+X\xfe%\x19\xf32}\x89\x14\xe6\xf4\xd1\
\'7\xd2\xdf\xee\x8f\xf3\xbd+\xda\xc7eIV2\x9eL\x88\xfb\xe2\xa6\xffvW&\xf0~\
\xcc\xbajlv\xc4 \x93\xadq?I\xcb\xd8\xe5E\x95\xf7[\xf7\xfad&\xda\xee\x171\xc2\
Q\x9e_\xaf(\xfaZQ{G\x9c"\x12\x1f\x1e\xb4\x10\xc9R\n\x95\x8a\xaa\xeb#\x01\xae\
\xb858\xcc4\xa6\x16Y\x17\x8b{\xac\xbd{\x06W\x9d\x84\xaaVM\xe7\xf2\xbd\xf6\
\x86\xf9m6\xf4!\x15\x055H\xbd\x8e&\x07\'\x98\'\xac\xd0,r+\xbd\x1c\x1b!\xe7\
\x1e\xfd\x06\xbb\x86\xfb\x7f\xaa\x89\xf4\xe65\xaaR\x14\x89\x13\xe0>\xbf\xf9\
\x80#v\xbf\x1e\xc1\xb1\xb4\\\x8bK\x84\x9c$\x1d\x80\xf7\x10[\xd7.\xa2\x87\xc0\
\x85\xf9&\xc2\x9d\xa3\xd7\xe9|\xe2\xb2\xa2K\x85`\x07\x01NCu\xbd/V\x93\xe3?\
\x9c\xa6\x81\xcd\xae&\xfa*\xed\xd1\x8a\x8a\xd0P@Akw"\xb1?\xcb\xbd\x97u\xba]\
\x0f\x17]\x97\x1c\xa3\xc1\x94\x9e\xe0\x9cR\xab\xf9\xd1\xc1\x0c\xab\xbf\x91\
\xd2\x12=\xa0\xe9{\xefhJ\xb6\x9d_i\xf5%:\x006\xdf\x102\xd4\x9a@D%\x08\x8b\
\x153t\xfc0\xdf\xdf\x84\xda\x9baZ\xf1\x98\x1e\x9e)}\xe0C.9\xa5\xa8\x165\xd7<\
.\x0e.\xf6\xf8t<\xf60k\x9c\x04\x11\xa2_\x1a\xc2?\xc2\x00\x19\x90\x1f\xdb9+\
\x1e6\xa7Bn\x8d\xfe\xed\xc0\x89k#\xce\xe7U\x81\x19r\tKQ-\xd2$P>\xb6a\x82\xdf\
\xf13\x06F1\xdc\x1b\x1a\xf7\x89\x9f\x17\xa6\x06\xc3\xd0\xbdtK\xdc\x99\xcf\
\xdb\x1ck\xf0x\xc5\xef\xb1\xad\x89\x86\xe6\xadB\xb9\x80\x0f\xa6H&\x98I\x10\
\xfd{\xb4\x01]\xf2\x17@\xb5\x10\x7f\xdbk\xad\x97\xde\xd3\x88\x0f\x90\xb6\x8a\
\xef\\\x05(\xafI\x82\x8c\x11\x1aN\xaa\x81T"\xf8\xf1\x13y\x90w\xb0\xdc\x1f\
\xd6\xd6\x90.\x10]\x88\xdfw\xab_\x94&2\xb9\x1b\xa6\x1f\x17\xfa\xfb\xa0}\xbb~\
\tD\x10\xd3\xd3\xce\x00\x9c\xb5\x0f1{\xcb,\x9e\xe2\xdd`\x10\xda\xf9Zg\xcb\
\xc9\xddB>/\x10\t\xeb{$z(\x13\x06\xddc\xc0\x17\xe4BxC6\xdena}\\CF\x8d*\xd2 \
\x9d\xda\xdb\x8em\xbe\xde\x87\xb0\xf7?\x02\xde\xa7\x19\xe6\xda\x9c\xb6\x99.\
\x1fF\xe4J\xfe\x811\xc9\xccd\xeb\xe2\x1b\x81\xec\x93\xb8\xd0B\xbc>H\xc7\xc2}\
\x1b&\xf5\xf0"\x1a\xdaO\xbf\x1c`aG!\x10\x97I\x91/^\xb6\x1e)\xe53x\x0bjQ\xd5\
\xc8\x90\xb7\x9e\x9f\xfb\xd8\x92\xd3\x8a:\xf1\xb4\x06D#\x93\xcd\xce\xed\xb3\
\x98\xc5\x08\xc5\xac\x06\x112dh\xf2\xc4Mv\xf0\x83\xbf\xdfx\x98\xb0rm\x84\r\
\x85\xcd-\x06G"\\\x05\x00\x85)\xdd\xec\xf7\xb3V|\xaf\xa5\xdb\xcf\x94,\xd0\
\x10\xe1I\x91\xef\x0cg~\x16O)\xa8\xc79S\'\xc3\xfa\x93\xc4\r\xf9p9\xc3\x1f~-\
\xcf\x80?\xfc\x1em\x7f\xcc4\x08AHX^!d\xf9\xad\xb3\x0b\\\xf8L\xbc\xe4\xd4\xc1\
\x05\x7f!\xf4\xfc:\xb4.7\xb2\x0c\xc1}@\xb2\xaa T\x82\x10\xa5\xd7\xce\x9a9\
\xb3\x07\xa2YN\xe0\xbc\x92\x86\xf7\xbf(\xda0\xea\t\xdc\x03\xa7\xf1\x83\xd9V$\
U,E\xfd(\x13D"\xf4o&\xac\xe3f\th\xf5v\xc4\x94\xf2B\x1e?\xa4\x94\x80\xdcu\n\
\x88\x94\x83\xbbC\xf2\x1b.\x8f\xff\xbc>q*\xe5\x85\x01|\x9e\xaf9\xf8\x9f\xe7!\
Mi\xba(\xc4L\xd8\xcf\x04,\x9a\xcd\xe7\xc2\xa6\x14x\x94\xdf3\xa0\x87\x0e\n\
\xe1\x9f\t\xb8\x1f\r\xf3\x89\xe9\x17\x12\xa4H\x0b\x0cQ\x1a5q\xa1\xc4\x90\xcc\
\x9a\x14\x10\xef\xcea Y4\x91Q\r-\xe0\x7f\xff\xbe\x02\xf8\x9f~^\x13+&\x06\xc2\
\xd8\x04\xcb\xb4\xbef\x98\xdbO\xda\xc8\xd8\x050\x08\xf1\x0b\xa2\x17\x16\x03\
\xea\xc7\x8f0B\x81\xf2CpG\x172C\x85l\xd1\xe8\xa0z\xd8\xe5~Q\xbf\x8f\x95m\x1b\
\xaf\x93\x12\xc9\n\x95\xda>\xbf\xf2\xc2\xbdJ\xbf\x8f\xbc\xab\xaf\xfe\xf4\xf2\
\xbc[\x9d.\xa7\x05]\xbe\xad\x07\x07\x19\\\xef\xce\xa2\x18\xa8\xc4\x9ew}o\xb6\
5G.\x84\xc7\xb9nk\x87M\x1f/\xb4W\xbc\x1a\xf9\x8f;>~\xef\x87\xd4-.\x0f\xcd\
\xca\x8c\x9f\x0fn\xed\xb3\x9f\xfb\x95\xb4i9>\xae\x9486\x97\xab<N\x03\x9app/\
\xad}\x8f[+\x9f\x1f\x97\r\xee\xcc\xbc\xfd\x10\xba9h[\xce\x0bo\x99\x9d\xb5\
\x9b\xe7KNG\xd3\xafa~\x82\x83P\xf5\x97\xdb\x9f\xef\xfd\x0f9\x8c\xdb\xbe\xaf#\
G\x0c\xc2\x92\x88\xa3\xddo\xfd"\x13*\x13{\x8c\xd5\xc1\xe6\x06t\xdc\xdc~.S\
\x8f\xa7\xf2\x95[-v\xdb\xa6\xb4\xea2\xefhy \x1a\x18\xac\xdd z\x01\xfd\xa6\
\xd2\x86\x0e\x90\x87\xe7\xd1\x83\xed\xf6\xd3u\xdc\x1c/aQ\xe0\x17^\x1f\x83\
\x85\xd2\xcc\xdb\x99\xa0\xb4v\xd7{\xce\x9d\xe7zll&\x8cu\xd7\xcf\x87e\xedLJ\
\xb9\x8d\xcdM\xd6\xc7H\xf96\xef\xdd(\x98\xe4\x03\xc7K~\xfb\xcf.\xc6>%\xc5\
\x1f\x05\x02)o{~\x9f\x90o\xd0\xd1#y\x9f\xde\x9f\x8c\xbe\xd7y\xbe/\xac\x10\
\x8c\x9b\x1eGgkMT\x8c\x9f\x04\x80x4\xdf^O\x17\x9f\x91`\x1b\xc9\t\xb0*\x16\
\x92\x87\x8c\x7fL\xf9\xbd\x7f\x98\xf7\xbd\xa4tv\x98CX\xf8\xed>\xfa\x9c\x9f\
\xd7U\xebv\x1d\xcc\x8dI\xb4|:\x0f\xddow?wQ\xd0\xb3\xce.{\x1emo;.\xd4\xf9"\
\xd4\x05\xd3\xed\x84\'5\t[\x90\xa2\x83&\xc6\x93U\xd5\xe8?\xb6\x1e\xe3g6_\xef\
\xeb\x8a\xac\x89KeC\xca\xcc\xea4<vBp\x18\x9f\xe6\xd3\x98\x8c)\xabA\x0cm\x87\
\'T\xa7Y\xa5\xba\xf5\x9e\xec//\x97\x1f\x7f\xb7\x7fu\xfe\x93\x92\xb1\x0cVb*\
\xfd\x9c-\x98\xf2}3\x96\xc9@\x14\x80DI\x07b\x88\x96U\xebD\xfcM\xe5k\xf3\xee{\
IV\xfe\xab\xeb1\xf2\xf6z\xb9\xdd\xeb\xfb\xfe\x0b\x14uLzrz\xa2\xdb`\x04\xfa\
\x97\x1d\xe2\xc4\xcd\xf3y_\x9a\xf5w\xd3\x936\x7f\xb7`w\xbeD\x15\x8d\xdf\xfd\
\xba\xd2\x8e$\xe5`\x8b\x11\xe9FrF\x04w@\x9e\x04\xb1\xed\xed_\x1a\x0e=\x97\
\xf5Me]\xc1\xe3\xec\x8c\x9a\xcfy\xe8\xed\xb4\x88\x0fT\xb4\x10GT\xc0\xc0\x94\
\x9dn\xd0|b\xcc\xe7\xf9r\x01\xa7\x9e\xbc|\x16,y\xa4=\xdc\x8c\xc8\x92\xa4~\
\x8f\xcb\xef}\x81w\xc9\xbf\xad\x7f\xfe\xca\xd9\xe9\xd9?\x1fj\xb4"O\nR\x07\
\x0f\xb4z]j\xfa\xae\x99j\xbcT<+\xde\xddq\xf6\xad9~<\xa7\xf3\x13\xceZ\xd8\xf9\
\xddV\x94\xf5y~\xbe\x03\x19\xbdn\xf2\xfc0\xcfNe~\xa5#`\x91\xe6U\xfd:2\x10\
\x86H\xb9\xd1\xb4<p\x9a\x00\xb6\x1f \xd1\xce\x1f\xd1\x8bU\x02\x14\x83I\x04\
\xbe\xe7\xd1"\x90i@\x9d\xcb\xbe\xfe2\xdf\xf2\xdb\t\xa0\xe0\xf4\x83\xcb\xfd\
\x02h~Y\x04Io}\x9c\xefG\xa9\xfb\xf8\x1co\xe7\xf69\x0ex\x03Q\x0e\t\x07$\x90\
\x9aq\xad9a\'\x8d\xa4\x94Y\x9bC\x1fh\'\'\xd1\x83!\xf2\xd9\x9a\xd3\xa4\xc9w^g\
\xf1\x96IY\x08kx\x8e\xf2\x1c(!\xc0\xb6\x99\xd7V\xcb\xee57"\t\xbf\x00\xc2\x94\
\n\xcc\xe4\xd5\xc8\xb9\xa5\x1a\xa1\xa0\xc9\x85t^\x18D_\x0f\x0f\x97\x056wO\
\xe1\xcef7\x83\xfc\x9d\xdd\xc8B\x04\xe5\xb2\xd6\xde\xeeZ\xbf\xdf\x99\x89\x0f\
\x9b\x14\x1d\xdetKl\xcd\xd3\x1e\xfc\xcd\xa1\xf3\xf5\xd8\xa3\xe41\xf7?l\x914\
\x9a\xfc^c\xd7V\xbcQ\xd7\xc5m\x7f\x17\xdf \x9em\xc5\xacsk\x04\x013U\xb4\x83"\
Y\x80\xd9*\x8a\x17\xa6M\xf5\xe2\x9e\x99\x88\xdb\xe4r\x02>#\x84n\xf9EPv}\xef\
\xbf\xea\xfc\x9c"\x99Y\xc1\xda\xac\x816\xeal\xbb\xe9\xf8T#\xf0\xc0{\xef<z\
\xeb\x14\xf4~|,8\xe5\xac\x11\xb4\xbc\x9c\x81\x92\x14\xcf\x10\x94\xa7\xe3\xba\
\x7f\x0f\xb3\xf5\x13e\x10H\x08\x0b\xc8\xfe\xfa\x9cO\x07o\x00a\x8b\x1e\x92\
\xcb|"\x99\x94 !\x0c\xf5\x1d\x1dT1\xfe\xb4(\xdc1\x98\x8bv\x8e\xb3A\x98\xdb\
\xb7\xb3\x17\xe5\xf7q\xaf\x1aU\xd7a\xb6/}\x9a\xba\xf1\x96\x83\xab\xae\xf65E\
\x9dq\xb44\xf0\x9e\x17\x88\x12\x13N\xd3\x9f\xb0\x1c\xb4<\xcb\xad\x9a8\xfdxO\
\x16\x89\x1e\xd2\xf1\x11`"\x190jX3gU\\[NVR\xc0\x07\xb4\xfdJ\n\x034-\xd6\xb4\
\x8eG\xe2\xe0{Rh\xb7\xfb\\\xabR\'C\xdc\xfe\\n\x0bw\xe786k\xbe\xf9\x1c\xf2\
\xcc\x04u\x16\x03\x82\x82\xe0Q\x92\x7f\x9b8\x02\x19\xc3\xbeT\x87\xce\x8by)\
\xe0M\xee\x00/4\x90\xca\x81L~\xee\x871\xdb\x9f\xbe\xd7\xdb>h\x0b\x99sL\xcf\
\x95Ir\x9e/h~\xe9\x1e\xe7-W>@\xdb\xa9\x05\xf3:\xad\x90\x11\x9c\xfeGf\xb6\xe0\
y@2\x1c\x16\xa9j\xe2\x84\x89\x01)\xb4\x87w=\x0c\x16\r\x93\x0f\xa5xY\xc5\x87\
\xc6q\x81\x90(\xec2 Y\x1b\xc6\xdd\xd70e\xd1\x1eT\xb2k\xea\xa8\x8e\x8b\xc0\
\xc6\xf6\xcb\xda\x06\x99\xd8\xdd\xa0\x1f7\xdb\xb5\xb3\xabxz\x8ff3O\xa2_\x1e\
\xe0\xfb\xa0\xb2\xfc\x18\xe6\x8d\x86@\x1c\xb1h@c\xbah_\x14\x14\x01\x8d\x19\
\xf1E\xe8i+\x03\'\xfb\xb3\xeb(o\xe8bS\xe3s\xb4w \xe0&\xc9\\\x98\x16\x882s)O\
\x19/\x81\xeaVq/\xc8\x12`\x8291,H\x9aq\xeb\xcd\xbf \x9e\t\x9b?\x9bG(%_.*y\
\xb2[\x96{&In@\xfa\xcd&^\xaa\xe0\x11\xc2\xa0\xc3\xa5Q\xcb\xdc\x87Xj!4\xcb\
\xeb\x1a4#\xf0N\xc7\xfdp\xe6\xfd\xa0\xa9\xaa\x1bC\x1d\xe3\x8d<!\xaa\xdf\xc8\
\xb4\x1e j\xc5\x94\xd0\x7f\xec\xa5~\xba \xb7\xd9x>T#\xcf\x1f\xb0E\xe4\xec\
\xca\xff\xb0\x17eN\xa7\x87\\\x03\t3\xc3c{R\xe6\xfbyL\xb6\xdaw\xb9\xe3uY\xdb\
\xf3\xcc\x03\xff\xe5\t\xaf7L\xd9\xfcr\n\xf5\xfemW\xb5\xf9\xa2\xfb5\x96gp+H\
\xc1\xffFQ\xbf\xad\xc6Z\xa0\xd57\x1c\x0c\xa1!\xafK8\x1d\xe6\x9a\x84+\xd6\x08\
\x95\x83\x8a@\xa3j6o\xc6$\t\xe8\x10\x1fX\x0b\xd1\nh\xcfN\xa4X\x80\xbc\xc8\tL\
\x9a\xde\xcb\xb0\xb2|\xae\xc3\x89\x12X\x11+z\x8ek4\x9b\xc4\x84o\x87lj/\x92\
\x95\x15\xf80r\xe1\x8e\x81\xc8R\x95U\xf2VV\xc2\x83S0\x03\xcd\xac\x7f+\x1a\
\xd9\x92C\x1b;\n\n^\xd6\x9a\x98\x1dHy\x89\xb5(\xa8d3\xb5\xf4z/\xda\xe4]\x1f\
\xee\xff\x17\n\x85m\x08\x01H n\x16B\xe0_\x18\xa4x\x00\xdd\xfa\xaf\x9d\xd9h\
\xd0\x11i\xc6d\x98\xd0;u\x19y\x15\x8da\xb4\xba\xb6:\xb31\xb7\xf9*\x9b\xcf\
\xe8\xdf\xfb\xba\x96\x9e\x0e\xc3\x07\x8a4wzz\x94&y1M<\x85\xfarY\xf2\\_\x8f\
\xcd\x13\xb0Zp\xa3\xd4\x0c2U\xa7/2\x154`\xf4\x86\xce\x7f\xdc}CXu\x89\x13H@\
\x0b\x9c\xd9\xba\xb3UQ\x1f\x92\x17\xa4\x15\xa5\x94\xa0\xa4\xca\xf1\x80mFuz\
\x8e\xb4TN\x8a\x89\xb9\x10k]\xa6\xa9\xd5\xa5\xe1\xfb\x0c\xbcu\xeb~\x7f\x18=\
\xca\xb3~\xdfywb\xec\xf2\x9e4\xe7\x0fRA\xcc\rU:Q\x96W6\xac\xbf\x88\xa0\xeat\
\xbd|\xae\xaa\xf3a_Z\x84\xde\x0eS\x93(\xf9\x9b\xe3\xf2P\xd5\xe7\xe5\xba\x80o\
G\xf7\xbe\xc1\xae\x1a\x853\xf4\x98\xcfN)c\x91\xae\xf1\xfeZ\x96;\xf9\x886\xdd\
u7?\xaf\xd9\xc4{AS\xc2\x9a!\xb3\xd0\r\x03\x94\x15\xb1\xec\x17>\xf1f(\xcf\xe2\
\xa3\xd5TE\xa81\xb0\xad\xc9{\xda\x94>\x95\xeb\xda\xf9\x92\\\n\x11\xc2$\x08\
\x1a\xd1\x9e\xe1\xf7\xda\x95\xe54V\x82\xe2E\xe3F\x15\xc5+\xdd\x1e\'\x89\x86\
\xe8\x7fD\xecl\x871\xa06\x1b\xcf7\xde\x0e\x10\xc2\x85\xf0\xcf\xab\xd7\xa2\
\x850\x0c\xb7A\xa5\x0c\\\xbe\xc2\x94\xa0\xdc\x19a\x80\x9d\x8fO\xdf\x1a%\x89F\
\xfc\t_R\xa0*\xb4\x16\x14z)4W\x1e\x1e\xff\x9b\x04q\xf7Y\x82<\xfe\xaa\xa3\x04\
=d\xfc/\x04IJ\xf0C\xe8\xe1\xb7\xac@!\x13\x8c\xe0\xf1\xa3|6)\xe9]\xafk\x8b}\
\x0f\xc8\xa1\xbc8g\x98Y\x1e,\xb1qT\xd2\xc8|\xde\'\x84b\xa3\xdb!\xbc\x7f\x84`\
C\x1b\xa0b\x19\x80v]F"L\t\x87K\xae,\x018s\x10\x855\xf4\xd7\x81@z"\xc2\x9c\
\x18$\xb0l\xed:\x9dXz\x18\xe7`/D\xef\xa0\x83 W\x0e1DC\xd9k\xb9\x97\xe7{\xea\
\x13\xf0\x9a\xe1\x03R\xbblf\xa3\n\xccu^\xe1\x84Q`5C\xfc&V5p\x06\xd4+ \xd2l!\
\xe4\x8aIV\x1a\xb0\x88 H\x07/3w\xc5e)\xe3\xf1\x1b\xf4\x0cB\xddTE~\xb6\xe0\
\xf1\x93\xa0>\x06\x02\x97`|\x95\x12i\xa2\x15:H\xfd\xbf\xa0]A\x94 \xd2\xcaU;5\
\xf7\xb2\xc2>:a\xd4\x13\xa2\x01\xa9\x83b\xc0\x9eTU\x10V}\x18J\x05\xd1\x17\
\x13b\x1c\x16cfqA\xe2t\x17\xb2\xca]S\xe5K.\x9a\xff\xd4W\xa4\n\xb2\x15V"\xff\
\xeet\x9ao\xa3 \x93-\xbf\xc3\x14\x14\xb4\xcf\xbf\xb4~\xb7yH\x98*\x11\x9e\xfe\
\xc2\x85M\x98\xda_\xf6\xaf\x8d\xa4\x91\xb0\xebC\x0ey\xf5\xb8\x18\xd2\x96"\
\xac\xcd\xbb\xeb\x88_\xbf\x1d\x89\xf7\x80\xf0bH\xa1!\x93I\xd5\xed1\xd8\xa7\
\xbb;\xf7\x83\xf75\xd4*\x93\xf1\xf6\xd7,\xa9\x9cy\xf77/~BE\xea\x8a\xc4,\xa1\
\xbe\xb3\xab}\x85\x0e+\x8b\xd2\x18O\x17\x1b\x89\x90V\xa2\x0e\xfc\\)\xbc\xfe\
\xb4\x8b\x193\xb29\x83$q\xbb\x87`\x9a\xc1\x11\xdf9\xfc\x84h$d=\x19\xab\x16\
\x8e\x82\x00/I\x90\x9ch\xad\xb0A\xe0\x7f\xda\xeb1\r' )
def getSCRTBitmap():
return wx.BitmapFromImage(getSCRTImage())
def getSCRTImage():
stream = cStringIO.StringIO(getSCRTData())
return wx.ImageFromStream(stream)
def getSCRTIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getSCRTBitmap())
return icon
#----------------------------------------------------------------------
# This file was generated by
#
from wx.lib.embeddedimage import PyEmbeddedImage
icon = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAACoNJ"
"REFUWIXdl21wnNV1x3/3Pi+7++yupF1pZWMLy0aABztuII4hhBASUibBwSYQ7ABjppRMKG2Y"
"dNpmSIeG4Gmhw5QQoE2cJoY0JYkJKJAgT2zTYOMX2mCwzThjG4wtG1uWZe1K2vfd5+U+9/aD"
"pAxD0+n33m/37X/Pved//+cc+P/eBIgP9t8/9j8mPwhgwPxfoB9cI0A8D/J5sBogMmCWggZ4"
"GZxlIOZBuB6UPbtpPdhnwR0Aa+PMAdeCml04C3wLOLvByYIF4ILeAOpTEAJ6EKx7IVmE7AB4"
"p8B9COIV0F4L5l7I7wa5HEaBKXv28Eeh+0KYdxjSDZA5MHuhMQ5nBmFqLejPgXsc5m6G3j+B"
"ZBnEOoj2wNQSGP0JBGeh8wz098DCTpi/GzLzIfwWTNwM/jhcXBTCEcZs3gkNIUBMQcercPnX"
"hbjpVSHmGrAE6BXGnOs2ZtPVsP86CLbD3Pvh6oJlfX7cmCzT/oxGtP7teTC0DeorYOlTcP1S"
"1/3MNss6vyRE0jMmviKOS4+H0cklae/jpywr9Zta7Y5XYcg2IPZA/h4h1ngdHXdeKaU16xbX"
"GH2sUjm6Ao48D+p66PuHROKBlz1vsXofDz4aBJd/qNU68jXw74Bbz2Wz637leclUMomUkglj"
"OBaGHf3NxqJyR6dMBD7/VCP/NNj2epB3Qe5i11093t3tZGxnmqbCUK83pa5Ulr0J7tVg/wDm"
"jiSTi9M9PVZKymkCGsPRsbHeObCoDYU3sh1fzvd0iznpNH4QEAQBtm3Tk8/T9jzZ4bqUm02T"
"g1iDlgAHQE5B0gC5rg66OjvRsSGKIl6EkWtA3g/uW+AoIYRt2+RzObKZDCpSdOvY/xeY85Lr"
"runu7RHd+TyRUsSlUpybmGg7pVLUbLXI53LYjkNKqfiz0PwhaHspmP1Q+1Ec77mzVLq+lkhY"
"lmWhx8f1mjA89R6MGOj5V0g8BvX97fbI9vHiglQqJaqVKtlSMRgMwj3HIPVgKrW030ujjWFi"
"fJyT1eorsTGv3wwDNcyatucltNZ4sVb3QW0OKOsX01+JRVpHyyJ12faUl/MDn4cmJo/dFkWb"
"noWgCgsfhq55UB+I42AHXO57KWtyaoobqtWft7TesgEuOtfZuby7p4dKpcIdxeLZ01p/9ybY"
"9SE4/bBS/T+27X7HtklXyq0Xlfr3fXBKGjB3QfMROFERtC1LEquYm7UefhCK35fyut2e99jd"
"rvvwMsjfAxNt25ZGG+aFYWjgv/bA0R3CyjiOgyUtlFKs1rr0NpxbCad+AIdu1XqL7/sEUcTC"
"OJ66CipLZzmwE9Sz4J+QMimlRCnFn8L401CYn86s9np7nV2Z9MAyWLAXsm3XlXEc88koHP8M"
"HPdhqiqxhBAIKXBdl+/Zdl8Kzv8V5DZCawkcuabVGmFyMtyh1H8ch6n1oH+vhM+CfUZKTwhB"
"HMfcAMoTYkVPNiO6cl2063Ueh74Hodu4CREpxRdUfPIFKH4f/H6tm8ZMK3I+n2d7rdY9VCrd"
"vkJr/0dw8CU42263v73a9xd805htBsoCjARYBeJxcKekTCJAK8XbkPuK637E8zwsYSEtyQ6Y"
"e5WUl7muQxAE3BfH774BlY3QesKYk0EQEEUR+VyOnkKBz3d1ffKIbd9XgWt3QW43vH6dMc+8"
"Bb97EpoGjAVQBvsv4MLXXfc2P5t1s/V6HIZhdUs2e4nneYRBQBBGmGazfMBxLqnm84lGo8Ey"
"3/9xP+zfAv4uyHTCDb7nubbjkE5P/4ZdQhQWKfXRrjhunAcjTTj5n1DeABHMBJQN4DwFF0+6"
"7lqVzToLajX9mFKJ5/L5jK7XqQcBjuMwUa9H48nkHKejQ3SUy8GbUfTMKLzzAoSXgDWslHfc"
"mOVGa2zHwfM8HNvmtGV572p9aTsM63thOISJ4xADSIBNIF+CpJLSMsZwgVLiu67b4zgOH6s3"
"VBhFSMviDeg1jitjpbhRRWcGYPw5iO8F+yGYqBjz6z2VyivFUoliqUi77ZPL5ykUClQKBe/1"
"dPrLd8GHC5CZDem2APEayC2QCoWQWmvmay3/uaNDJMOQTVF4qqCcASEE7wiR1q5DpBR/peKj"
"q6D8JfD+DApfAXsYhtNKPV2pVBq2UqvDIJRg6OnpwXZszoZh9o9arRstY377DJQBYwkQnZDM"
"w6UHPG8VrisajYaodnWRajRUKQh2FW17IMxmbVmry3fyOcIw4GijsW0IXtsP5x2F1bukXLLY"
"mFMfgxPr4PTNYVhsheFFJ8DLpD3S6QytVouDjUanF8ebPwFjO2d1YD9Y+yAVCSkSiQSHMhkc"
"x+FT7fbY38Ohfq0blm0x2JklnUoTBCEuvH0FmK/DpU9kMvcPed7fzIWBn0Hl57DvLAwOB8Gj"
"n5iYmCiWJlBKYVsWJcvKfBrSh2fcLw2YxWAfgpSypMhmMnT19tJutwmjaNsReHex1lVhIFXo"
"BQx9QdDeD2NXgP1tIa6pdmS9Ec/rXAIX/hoSK6H8d3BkFey5MQh+Wa1WabfbCCGJQB6ZIf8s"
"CeUDYB2DlJQSx3FwbJtCsxksh70Xw9mPaD0Va00+nyeKIm5X6tQFUOoFd4sQ57uOi+u6jMHH"
"75rOqtyVEO6H4lUw6USRiaIIrWPSxoS3Qvi3M3mkBHgA7K9CQUqJNoZmq8VNvj/8CBz7Syh/"
"1piSjmMQEIQhj8XxoR1QvhwS70jZbVkW6UyGW5LJKzfCioVwYRLmbYXzt0IhtG0hLYswilis"
"1GQf1DbPGGALMCfAzkJhgZREUUSj0aBbqa13wpkNIM8YU4y1JgojgiDgQa1PjELji5CbZ1kd"
"lpDkuzo5nM93rT43fs+zOs7fBlOT0DeUTHwxl8thSUmz2eRQFL16HKa6ZrJk+3mQk+C8KaZv"
"EgQBi5rN5rVwoAOmVkJ2CBoH4pgoigjDkGE43gf+1yD1pJQZ6VgIy6K7p4f1Klo2v1a/qBrr"
"6hW21W3yeXtuJkOtWqO/0Wh8B3ZXoMKsAQC3gH27ICuEoNls8o0oOLwPhrdCawsk/s2Yyk+0"
"xm+3mRcELQOjayD8BiTHpUxawqJULOE4NrnOLlqJZPK1OE7atk3aS1GpVvErZR23WoPr4XAa"
"WrMktAG+BfFmKe16vU4cxyyL4qE/htEpUO9BcD2M+r5PFEV8L4r2PQJjr4C6EkxVSisKApqT"
"Eya2HZFIJrBsC1vYGGOYKlfoaDbDt1qtrU8YM/gMnB6diQMA1iCI34GzV+uWCsPaOt/f/gC8"
"LOFEE8K9IDyw8koFE0FwbHEcv3AQDi6EZhpyWWlddzjwcxfUalt+E/hn3vb9ZLHddsJ22/Q1"
"m82/bjYPr/L9n/7SmBdXwsEumNo58/wzxQ7cDd6b0D8GhTeg9RycXgiTayAeBKsIhRT0Pwru"
"KIwNwZkSRP8IfU/C1X2wwIYTK8H/KuSHIFsF6zJo/xSKE/DeEzByLZSXv+/2vzcAkFdCogSO"
"C3ot+OtnyrEZpXRegeRGEE9B+GkIDJi7wbsB5twOqXVQHYBoApJ3Q2ITyA9DeBu0MtD4cwje"
"j/lBAxAgzHSlY/63gvQPzMs10xoid4JaCuYCkN+c0ZcMmLUQrwX9hzAB/hvDIQ467tn5ugAA"
"AABJRU5ErkJggg==")
geticonData = icon.GetData
geticonImage = icon.GetImage
geticonBitmap = icon.GetBitmap
geticonIcon = icon.GetIcon
|
gpl-3.0
| 8,281,288,084,170,631,000 | 76.25459 | 79 | 0.815934 | false |
GSA/PricesPaidAPI
|
SolrLodr.py
|
1
|
4723
|
#!/usr/local/bin/python
import solr
import sys, traceback
# This file is for (for example) Apache with mod_wsgi.
import sys, os
# import sys
# sys.path.insert(0, '../configuration/')
# The purpose of this file is to take the standard
# datafiles and load them into SOLR in such a way that they
# will be searchable.
# This is meant to be run from a command line because
# I assume it is to be invoked when you change the
# source data directory, which implies you are changing
# files and it will be easy to run it from a command line.
# Later, we can wrap this into something that allows
# a file to be uploaded through the API.
# We may someday need to manage the SOLR index with
# an administrative interface, but for now the goal is
# just to make it reflect the directory. I'm assuming
# those are the simplest way to do these things.
import Transaction
import time
from configs.ppApiConfig import PathToDataFiles, MAXIMUM_NUMBER_TO_LOAD, SolrDeleteExistingData, PathToActualInputFiles
# Note: For now, these are explict imports.
# Evntually, we want to make this automatic, and essentially
# create a dynamic array of adapters and loaders based on
# what we find in some directory so that it is easily
# extendable. But that would be over-engineering if we did it now.
from RevAucAdapter import getDictionaryFromRevAuc,loadRevAucFromCSVFile
from OS2Adapter import getDictionaryFromOS2,loadOS2FromCSVFile
from GSAAdvAdapter import getDictionaryFromGSAAdv,loadGSAAdvFromCSVFile
from LabEquipAdapter import getDictionaryFromLabEquipment,loadLabequipmentFromCSVFile
from USASpendingAdapter import getDictionaryFromUSASpending,loadUSASpendingFromCSVFile
from EDWGSAAdvAdapter import getDictionaryFromEDWGSAAdv,loadEDWGSAAdvFromCSVFile
from csv_rename import splitfiles
from os import listdir
from os.path import isfile, join
import re
import logging
import SearchApi
logger = logging.getLogger('PPSolrLodr')
hdlr = logging.FileHandler('../logs/PPSolrLodr.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.ERROR)
LIMIT_NUM_MATCHING_TRANSACTIONS = 5000*1000*100;
# create a connection to a solr server
# This needs to come from ppconfig
solrCon = solr.SolrConnection('http://localhost:8983/solr')
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
idcnt = 0;
def loadChunk(filename,chunk):
global idcnt
l = []
for t in chunk:
d = {}
# we need to look at the dictionary and map
# non standard fields to those matching our "dynamic field" name
# in the schema.
for key, value in t.dict.items():
v = unicode(value, errors='ignore')
# This unicode stuff needs to be changed at the source..
# We should not carry around bad data and then cover it up like this!
if (key in Transaction.STANDARD_FIELDS):
d[unicode(key,errors='ignore')] = v;
else:
# I think _txt might be clearer!
d[key+"_t"] = v;
# possibly the addtion of this id field should actually be done
# when we create the objects! That would make the class useful!
d['id'] = filename+"_"+str(idcnt);
idcnt = idcnt+1;
l.append(d);
try:
print "about to add "+str(len(l))
solrCon.add_many(l)
solrCon.commit()
print "success"
except:
print "failure"
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stderr)
logger.error("don't know what went wrong here")
def loadSolr(filename,transactions):
global idcnt
chunkedTransactions = list(chunks(transactions, 1000))
for chunk in chunkedTransactions:
loadChunk(filename,chunk)
# Before we load, we need to delete!
# This seems a little dangerous, but there is not much we can do.
# We really want to make this a command-line argument so
# that we can load one data file at a time.
# Default param for SolrDeleteExistingData in ppGuiConfig is F
if SolrDeleteExistingData=='T':
response = solrCon.delete_query('*:*')
solrCon.commit()
print "Solr Loader Starts"
onlyfiles = [ f for f in listdir(PathToActualInputFiles) if isfile(join(PathToActualInputFiles,f)) ]
onlycsvfiles = [ f for f in onlyfiles if re.search(".csv$",f)]
for filename in onlycsvfiles:
splitfiles(filename)
SearchApi.applyToLoadedFiles(filename,PathToDataFiles,None,loadSolr,MAXIMUM_NUMBER_TO_LOAD)
print "Solr Loader Ends"
|
unlicense
| 4,258,783,536,388,830,700 | 34.246269 | 119 | 0.712683 | false |
ntoll/code-dojo
|
adventure/week3/team3/adventure.py
|
1
|
6088
|
from cmd import Cmd
import re
DIRECTIONS = 'N', 'E', 'S', 'W'
NORTH, EAST, SOUTH, WEST = DIRECTIONS
class Player(object):
def __init__(self, location, name='Player'):
assert isinstance(location, Location)
self.location = location
self.name = name
class Location(object):
def __init__(self, name, description=""):
self.name = name
self.description = description
self.exits = dict()
self.props = []
def __str__(self):
return self.name
def add_direction(self, direction, other_location):
assert direction in DIRECTIONS
self.exits[direction] = other_location
def describe(self):
out = ''
out += "Current location: %s\n%s\n\n" % (self.name, self.description)
for direction, location in self.exits.items():
out += "\t%s (%s)\n" % (location, direction)
if self.props:
plural = len(self.props) > 1
out += "\n%s item%s may come in handy (hint hint):\n\t%s" \
% (['This', 'These'][plural], ['', 's'][plural], '\n\t'.join(prop.aliases[0] for prop in self.props))
return out
class Prop(object):
def __init__(self, name):
self.description = None
self.location = None
self.aliases = [name]
def test_location():
startroot = Location('Start room')
kitchen = Location('Kitchen')
startroot.add_direction(NORTH, kitchen)
def test_player():
lobby = Location('Lobby')
john = Player(lobby, 'John')
def load_universe(content):
location = first_location = None
locations = {}
props = {}
#parts = re.split(r"(?:\n|\r\n|\r){2,}", content.read())
parts = content.read().split('\r\n\r\n')
import pdb
for part in parts:
location = None
prop = None
for line in part.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
#if line == 'N:Hall':
# pdb.set_trace()
if not location and not prop:
# first line
if line.startswith(':'):
location = Location(line[1:])
locations[line[1:]] = location
if not first_location:
first_location = location
if line.startswith('*'):
prop = Prop(line[1:])
props[line[1:]] = prop
else:
if location:
#print 'line', line
if not location.description or line[1] != ':':
location.description+= line
else:
direction, destination = line.split(':', 1)
#print 'direction, destination', direction, destination
location.add_direction(direction, destination)
else:
if not prop.location:
items_location = locations[line]
prop.location = items_location
items_location.props.append(prop)
elif not prop.description:
prop.description = line
elif line.startswith("A:"):
# aliases
#A:flashlight
prop.aliases = [x.strip() for x in line[2:].split(',')]
for location in locations.values():
for direction, destination in location.exits.items():
try:
location.add_direction(direction, locations[destination])
except KeyError:
raise SystemError("Your universe file sucks! %s" % destination)
return locations, first_location
class Game(Cmd):
def __init__(self, gamefile, player_name):
Cmd.__init__(self)
self.locations, self.start_room = load_universe(file(gamefile))
self.player = Player(self.start_room, player_name)
print self.player.location.describe()
def do_move(self, direction):
direction = direction.upper()
newroom = self.player.location.exits.get(direction,None)
if newroom == None:
print "No pass around!"
return
self.player.location = self.player.location.exits[direction]
def do_look(self, where):
if where == "":
self.player.location.describe()
else:
# TODO validate where
newroom = self.player.location.exits.get(where,None)
print newroom.describe()
pass
def do_joke(self, ok):
print "that is not funny. What don't you try a pun?"
if hasattr(self, 'joke'):
print 'this is funny:%s' % self.joke
self.joke = ok
def postcmd(self, stop, x):
#pass
if not hasattr(self, 'joke'):
print self.player.location.describe()
#print self.player.location.describe()
def play(gamefile):
#start_room = _create_universe()
player_name = raw_input('Player name?: ') or 'No name'
g = Game(gamefile, player_name)
g.cmdloop()
''' while True:
if not player.location.exits:
print "No more exits! GAME OVER!"
break
next_direction = raw_input('Where to next? ').upper()
while next_direction not in player.location.exits.keys():
next_direction = raw_input('Where to next? (%s) ' %\
', '.join(player.location.exits.keys())).upper()
player.location = player.location.exits[next_direction]
'''
if __name__ == '__main__':
import sys
if sys.argv[1] == 'test':
test_location()
test_player()
sys.exit(0)
try:
play(sys.argv[1])
except KeyboardInterrupt:
pass
|
mit
| 1,014,150,448,326,991,400 | 29.813472 | 113 | 0.508377 | false |
amanzi/ats-dev
|
tools/meshing_ats/meshing_ats/meshing_ats.py
|
1
|
34933
|
"""Extrudes a 2D mesh to generate an ExodusII 3D mesh.
Works with and assumes all polyhedra cells (and polygon faces).
To see usage, run:
------------------------------------------------------------
python meshing_ats.py -h
Example distributed with this source, to run:
------------------------------------------------------------
$> cd four-polygon-test
$> python ../meshing_ats.py -n 10 -d 1 ./four_polygon.vtk
$> mkdir run0
$> cd run0
$> ats --xml_file=../test1-fv-four-polygon.xml
Requires building the latest version of Exodus
------------------------------------------------------------
Note that this is typically done in your standard ATS installation,
assuming you have built your Amanzi TPLs with shared libraries (the
default through bootstrap).
In that case, simply ensure that ${AMANZI_TPLS_DIR}/SEACAS/lib is in
your PYTHONPATH.
"""
from __future__ import print_function
import sys,os
import numpy as np
import collections
import argparse
try:
import exodus
except ImportError:
sys.path.append(os.path.join(os.environ["SEACAS_DIR"],"lib"))
import exodus
class SideSet(object):
def __init__(self, name, setid, elem_list, side_list):
assert(type(setid) == int)
assert(type(elem_list) == list or type(elem_list) == np.ndarray)
assert(type(side_list) == list or type(side_list) == np.ndarray)
self.name = name
self.setid = setid
self.elem_list = elem_list
self.side_list = side_list
class LabeledSet(object):
def __init__(self, name, setid, entity, ent_ids):
assert entity in ['CELL', 'FACE', 'NODE']
assert(type(setid) == int)
assert(type(ent_ids) == list or type(ent_ids) == np.ndarray)
self.name = name
self.setid = setid
self.entity = entity
self.ent_ids = np.array(ent_ids)
class Mesh2D(object):
def __init__(self, coords, connectivity, labeled_sets=None, check_handedness=True):
"""
Creates a 2D mesh from coordinates and a list cell-to-node connectivity lists.
coords : numpy array of shape (NCOORDS, NDIMS)
connectivity : list of lists of integer indices into coords specifying a
(clockwise OR counterclockwise) ordering of the nodes around
the 2D cell
labeled_sets : list of LabeledSet objects
"""
assert type(coords) == np.ndarray
assert len(coords.shape) == 2
self.dim = coords.shape[1]
self.coords = coords
self.conn = connectivity
if labeled_sets is not None:
self.labeled_sets = labeled_sets
else:
self.labeled_sets = []
self.validate()
self.edge_counts()
if check_handedness:
self.check_handedness()
def validate(self):
assert self.coords.shape[1] == 2 or self.coords.shape[1] == 3
assert type(self.conn) is list
for f in self.conn:
assert type(f) is list
assert len(set(f)) == len(f)
for i in f:
assert i < self.coords.shape[0]
for ls in self.labeled_sets:
if ls.entity == "NODE":
size = len(self.coords)
elif ls.entity == "CELL":
size = len(self.conn)
for i in ls.ent_ids:
assert i < size
return True
def num_cells(self):
return len(self.conn)
def num_nodes(self):
return self.coords.shape[0]
def num_edges(self):
return len(self.edges())
@staticmethod
def edge_hash(i,j):
return tuple(sorted((i,j)))
def edges(self):
return self.edge_counts().keys()
def edge_counts(self):
try:
return self._edges
except AttributeError:
self._edges = collections.Counter(self.edge_hash(f[i], f[(i+1)%len(f)]) for f in self.conn for i in range(len(f)))
return self._edges
def check_handedness(self):
for conn in self.conn:
points = np.array([self.coords[c] for c in conn])
cross = 0
for i in range(len(points)):
im = i - 1
ip = i + 1
if ip == len(points):
ip = 0
p = points[ip] - points[i]
m = points[i] - points[im]
cross = cross + p[1] * m[0] - p[0] * m[1]
if cross < 0:
conn.reverse()
def plot(self, color=None, ax=None):
if color is None:
import colors
cm = colors.cm_mapper(0,self.num_cells()-1)
colors = [cm(i) for i in range(self.num_cells())]
else:
colors = color
verts = [[self.coords[i,0:2] for i in f] for f in self.conn]
from matplotlib import collections
gons = collections.PolyCollection(verts, facecolors=colors)
from matplotlib import pyplot as plt
if ax is None:
fig,ax = plt.subplots(1,1)
ax.add_collection(gons)
ax.autoscale_view()
@classmethod
def read_VTK(cls, filename):
try:
return cls.read_VTK_Simplices(filename)
except AssertionError:
return cls.read_VTK_Unstructured(filename)
@classmethod
def read_VTK_Unstructured(cls, filename):
with open(filename,'r') as fid:
points_found = False
polygons_found = False
while True:
line = fid.readline().decode('utf-8')
if not line:
# EOF
break
line = line.strip()
if len(line) == 0:
continue
split = line.split()
section = split[0]
if section == 'POINTS':
ncoords = int(split[1])
points = np.fromfile(fid, count=ncoords*3, sep=' ', dtype='d')
points = points.reshape(ncoords, 3)
points_found = True
elif section == 'POLYGONS':
ncells = int(split[1])
n_to_read = int(split[2])
gons = []
data = np.fromfile(fid, count=n_to_read, sep=' ', dtype='i')
idx = 0
for i in range(ncells):
n_in_gon = data[idx]
gon = list(data[idx+1:idx+1+n_in_gon])
# check handedness -- need normals to point up!
cross = []
for i in range(len(gon)):
if i == len(gon)-1:
ip = 0
ipp = 1
elif i == len(gon)-2:
ip = i+1
ipp = 0
else:
ip = i+1
ipp = i+2
d2 = points[gon[ipp]] - points[gon[ip]]
d1 = points[gon[i]] - points[gon[ip]]
cross.append(np.cross(d2, d1))
if (np.array([c[2] for c in cross]).mean() < 0):
gon.reverse()
gons.append(gon)
idx += n_in_gon + 1
assert(idx == n_to_read)
polygons_found = True
if not points_found:
raise RuntimeError("Unstructured VTK must contain sections 'POINTS'")
if not polygons_found:
raise RuntimeError("Unstructured VTK must contain sections 'POLYGONS'")
return cls(points, gons)
@classmethod
def read_VTK_Simplices(cls, filename):
"""Stolen from meshio, https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py"""
import vtk_io
with open(filename,'r') as fid:
data = vtk_io.read_buffer(fid)
points = data[0]
if len(data[1]) != 1:
raise RuntimeError("Simplex VTK file is readable by vtk_io but not by meshing_ats. Includes: %r"%data[1].keys())
gons = [v for v in data[1].itervalues()][0]
gons = gons.tolist()
# check handedness
for gon in gons:
cross = []
for i in range(len(gon)):
if i == len(gon)-1:
ip = 0
ipp = 1
elif i == len(gon)-2:
ip = i+1
ipp = 0
else:
ip = i+1
ipp = i+2
d2 = points[gon[ipp]] - points[gon[ip]]
d1 = points[gon[i]] - points[gon[ip]]
cross.append(np.cross(d2, d1))
if (np.array([c[2] for c in cross]).mean() < 0):
gon.reverse()
return cls(points, gons)
@classmethod
def from_Transect(cls, x, z, width=1):
"""Creates a 2D surface strip mesh from transect data"""
# coordinates
if (type(width) is list or type(width) is np.ndarray):
variable_width = True
y = np.array([0,1])
else:
variable_width = False
y = np.array([0,width])
Xc, Yc = np.meshgrid(x, y)
if variable_width:
assert(Yc.shape[1] == 2)
assert(len(width) == Yc.shape[0])
assert(min(width) > 0.)
Yc[:,0] = -width/2.
Yc[:,1] = width/2.
Xc = Xc.flatten()
Yc = Yc.flatten()
Zc = np.concatenate([z,z])
# connectivity
nsurf_cells = len(x)-1
conn = []
for i in range(nsurf_cells):
conn.append([i, i+1, nsurf_cells + i + 2, nsurf_cells + i + 1])
coords = np.array([Xc, Yc, Zc])
return cls(coords.transpose(), conn)
@classmethod
def from_Transect_Guide(cls, x, z, guide):
"""Creates a 2D surface strip mesh from transect data"""
assert type(guide) == np.ndarray
assert guide.shape[1] == 3
# coordinates
Xc = x
Yc = np.zeros_like(x)
Zc = z
nsteps = guide.shape[0]
xnew = Xc
ynew = Yc
znew = Zc
for i in range(nsteps):
xnew = xnew + guide[i][0]
ynew = ynew + guide[i][1]
znew = znew + guide[i][2]
Xc = np.concatenate([Xc, xnew])
Yc = np.concatenate([Yc, ynew])
Zc = np.concatenate([Zc, znew])
# y = np.array([0,1,2])
# Xc, Yc = np.meshgrid(x, y)
# Xc = Xc.flatten()
# Yc = Yc.flatten()
# Zc = np.concatenate([z,z,z])
# connectivity
ns = len(x)
conn = []
for j in range(nsteps):
for i in range(ns-1):
conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ])
coords = np.array([Xc, Yc, Zc])
return cls(coords.transpose(), conn)
@classmethod
def from_Transect_GuideX(cls, x, z, guide, nsteps):
"""Creates a 2D surface strip mesh from transect data"""
assert type(guide) == np.ndarray
assert guide.shape[1] == 3
# coordinates
Xc = x
Yc = np.zeros_like(x)
Zc = z
nsteps = guide.shape[0]
xnew = np.zeros_like(x)
ynew = np.zeros(len(x))
znew = np.zeros_like(x)
xnew[:] = Xc[:]
ynew[:] = Yc[:]
znew[:] = Zc[:]
for i in range(nsteps):
print(Yc)
for j in range(len(x)):
xnew[j] = xnew[j] + guide[j][0]
ynew[j] = ynew[j] + guide[j][1]
znew[j] = znew[j] + guide[j][2]
Xc = np.concatenate([Xc, xnew])
Yc = np.concatenate([Yc, ynew])
Zc = np.concatenate([Zc, znew])
# y = np.array([0,1,2])
# Xc, Yc = np.meshgrid(x, y)
# Xc = Xc.flatten()
# Yc = Yc.flatten()
# Zc = np.concatenate([z,z,z])
# connectivity
ns = len(x)
conn = []
for j in range(nsteps):
for i in range(ns-1):
conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ])
coords = np.array([Xc, Yc, Zc])
return cls(coords.transpose(), conn)
class Mesh3D(object):
def __init__(self, coords, face_to_node_conn, elem_to_face_conn,
side_sets=None, labeled_sets=None, material_ids=None):
"""
Creates a 3D mesh from coordinates and connectivity lists.
coords : numpy array of shape (NCOORDS, 3)
face_to_node_conn : list of lists of integer indices into coords specifying an
(clockwise OR counterclockwise) ordering of the nodes around
the face
elem_to_face_conn : list of lists of integer indices into face_to_node_conn
specifying a list of faces that make up the elem
"""
assert type(coords) == np.ndarray
assert len(coords.shape) == 2
assert coords.shape[1] == 3
self.dim = coords.shape[1]
self.coords = coords
self.face_to_node_conn = face_to_node_conn
self.elem_to_face_conn = elem_to_face_conn
if labeled_sets is not None:
self.labeled_sets = labeled_sets
else:
self.labeled_sets = []
if side_sets is not None:
self.side_sets = side_sets
else:
self.side_sets = []
if material_ids is not None:
self.material_id_list = collections.Counter(material_ids).keys()
self.material_ids = material_ids
else:
self.material_id_list = [10000,]
self.material_ids = [10000,]*len(self.elem_to_face_conn)
self.validate()
def validate(self):
assert self.coords.shape[1] == 3
assert type(self.face_to_node_conn) is list
for f in self.face_to_node_conn:
assert type(f) is list
assert len(set(f)) == len(f)
for i in f:
assert i < self.coords.shape[0]
assert type(self.elem_to_face_conn) is list
for e in self.elem_to_face_conn:
assert type(e) is list
assert len(set(e)) == len(e)
for i in e:
assert i < len(self.face_to_node_conn)
for ls in self.labeled_sets:
if ls.entity == "NODE":
size = self.num_nodes()
if ls.entity == "FACE":
size = self.num_faces()
elif ls.entity == "CELL":
size = self.num_cells()
for i in ls.ent_ids:
assert i < size
for ss in self.side_sets:
for j,i in zip(ss.elem_list, ss.side_list):
assert j < self.num_cells()
assert i < len(self.elem_to_face_conn[j])
def num_cells(self):
return len(self.elem_to_face_conn)
def num_faces(self):
return len(self.face_to_node_conn)
def num_nodes(self):
return self.coords.shape[0]
def write_exodus(self, filename, face_block_mode="one block"):
"""Write the 3D mesh to ExodusII using arbitrary polyhedra spec"""
# put cells in with blocks, which renumbers the cells, so we have to track sidesets.
# Therefore we keep a map of old cell to new cell ordering
#
# also, though not required by the spec, paraview and visit
# seem to crash if num_face_blocks != num_elem_blocks. So
# make face blocks here too, which requires renumbering the faces.
# -- first pass, form all elem blocks and make the map from old-to-new
new_to_old_elems = []
elem_blks = []
for i_m,m_id in enumerate(self.material_id_list):
# split out elems of this material, save new_to_old map
elems_tuple = [(i,c) for (i,c) in enumerate(self.elem_to_face_conn) if self.material_ids[i] == m_id]
new_to_old_elems.extend([i for (i,c) in elems_tuple])
elems = [c for (i,c) in elems_tuple]
elem_blks.append(elems)
old_to_new_elems = sorted([(old,i) for (i,old) in enumerate(new_to_old_elems)], lambda a,b: int.__cmp__(a[0],b[0]))
# -- deal with faces, form all face blocks and make the map from old-to-new
face_blks = []
if face_block_mode == "one block":
# no reordering of faces needed
face_blks.append(self.face_to_node_conn)
elif face_block_mode == "n blocks, not duplicated":
used_faces = np.zeros((len(self.face_to_node_conn),),'bool')
new_to_old_faces = []
for i_m,m_id in enumerate(self.material_id_list):
# split out faces of this material, save new_to_old map
def used(f):
result = used_faces[f]
used_faces[f] = True
return result
elem_blk = elem_blks[i_m]
faces_tuple = [(f,self.face_to_node_conn[f]) for c in elem_blk for (j,f) in enumerate(c) if not used(f)]
new_to_old_faces.extend([j for (j,f) in faces_tuple])
faces = [f for (j,f) in faces_tuple]
face_blks.append(faces)
# get the renumbering in the elems
old_to_new_faces = sorted([(old,j) for (j,old) in enumerate(new_to_old_faces)], lambda a,b: int.__cmp__(a[0],b[0]))
elem_blks = [[[old_to_new_faces[f][1] for f in c] for c in elem_blk] for elem_blk in elem_blks]
elif face_block_mode == "n blocks, duplicated":
elem_blks_new = []
offset = 0
for i_m, m_id in enumerate(self.material_id_list):
used_faces = np.zeros((len(self.face_to_node_conn),),'bool')
def used(f):
result = used_faces[f]
used_faces[f] = True
return result
elem_blk = elem_blks[i_m]
tuple_old_f = [(f,self.face_to_node_conn[f]) for c in elem_blk for f in c if not used(f)]
tuple_new_old_f = [(new,old,f) for (new,(old,f)) in enumerate(tuple_old_f)]
old_to_new_blk = np.zeros((len(self.face_to_node_conn),),'i')-1
for new,old,f in tuple_new_old_f:
old_to_new_blk[old] = new + offset
elem_blk_new = [[old_to_new_blk[f] for f in c] for c in elem_blk]
#offset = offset + len(ftuple_new)
elem_blks_new.append(elem_blk_new)
face_blks.append([f for i,j,f in tuple_new_old_f])
elem_blks = elem_blks_new
elif face_block_mode == "one block, repeated":
# no reordering of faces needed, just repeat
for eblock in elem_blks:
face_blks.append(self.face_to_node_conn)
else:
raise RuntimeError("Invalid face_block_mode: '%s', valid='one block', 'n blocks, duplicated', 'n blocks, not duplicated'"%face_block_mode)
# open the mesh file
num_elems = sum(len(elem_blk) for elem_blk in elem_blks)
num_faces = sum(len(face_blk) for face_blk in face_blks)
ep = exodus.ex_init_params(title=filename,
num_dim=3,
num_nodes=self.num_nodes(),
num_face=num_faces,
num_face_blk=len(face_blks),
num_elem=num_elems,
num_elem_blk=len(elem_blks),
num_side_sets=len(self.side_sets))
e = exodus.exodus(filename, mode='w', array_type='numpy', init_params=ep)
# put the coordinates
e.put_coord_names(['coordX', 'coordY', 'coordZ'])
e.put_coords(self.coords[:,0], self.coords[:,1], self.coords[:,2])
# put the face blocks
for i_blk, face_blk in enumerate(face_blks):
face_raveled = [n for f in face_blk for n in f]
e.put_polyhedra_face_blk(i_blk+1, len(face_blk), len(face_raveled), 0)
e.put_node_count_per_face(i_blk+1, np.array([len(f) for f in face_blk]))
e.put_face_node_conn(i_blk+1, np.array(face_raveled)+1)
# put the elem blocks
assert len(elem_blks) == len(self.material_id_list)
for i_blk, (m_id, elem_blk) in enumerate(zip(self.material_id_list, elem_blks)):
elems_raveled = [f for c in elem_blk for f in c]
e.put_polyhedra_elem_blk(m_id, len(elem_blk), len(elems_raveled), 0)
e.put_elem_blk_name(m_id, "MATERIAL_ID_%d"%m_id)
e.put_face_count_per_polyhedra(m_id, np.array([len(c) for c in elem_blk]))
e.put_elem_face_conn(m_id, np.array(elems_raveled)+1)
# add sidesets
e.put_side_set_names([ss.name for ss in self.side_sets])
for ss in self.side_sets:
for elem in ss.elem_list:
assert old_to_new_elems[elem][0] == elem
new_elem_list = [old_to_new_elems[elem][1] for elem in ss.elem_list]
e.put_side_set_params(ss.setid, len(ss.elem_list), 0)
e.put_side_set(ss.setid, np.array(new_elem_list)+1, np.array(ss.side_list)+1)
# finish and close
e.close()
@classmethod
def extruded_Mesh2D(cls, mesh2D, layer_types, layer_data, ncells_per_layer, mat_ids):
"""
Regularly extrude a 2D mesh to make a 3D mesh.
mesh2D : a Mesh2D object
layer_types : either a string (type) or list of strings (types)
layer_data : array of data needed (specific to the type)
ncells_per_layer : either a single integer (same number of cells in all
: layers) or a list of number of cells in the layer
mat_ids : either a single integer (one mat_id for all layers)
: or a list of integers (mat_id for each layer)
: or a 2D numpy array of integers (mat_id for each layer and
each column: [layer_id, surface_cell_id])
types:
- 'constant' : (data=float thickness) uniform thickness
- 'function' : (data=function or functor) thickness as a function
: of (x,y)
- 'snapped' : (data=float z coordinate) snap the layer to
: provided z coordinate, telescoping as needed
- 'node' : thickness provided on each node of the surface domain
- 'cell' : thickness provided on each cell of the surface domain,
: interpolate to nodes
NOTE: dz is uniform through the layer in all but the 'snapped' case
NOTE: 2D mesh is always labeled 'surface', extrusion is always downwards
"""
# make the data all lists
# ---------------------------------
def is_list(data):
if type(data) is str:
return False
try:
len(data)
except TypeError:
return False
else:
return True
if is_list(layer_types):
if not is_list(layer_data):
layer_data = [layer_data,]*len(layer_types)
else:
assert len(layer_data) == len(layer_types)
if not is_list(ncells_per_layer):
ncells_per_layer = [ncells_per_layer,]*len(layer_types)
else:
assert len(ncells_per_layer) == len(layer_types)
elif is_list(layer_data):
layer_types = [layer_types,]*len(layer_data)
if not is_list(ncells_per_layer):
ncells_per_layer = [ncells_per_layer,]*len(layer_data)
else:
assert len(ncells_per_layer) == len(layer_data)
elif is_list(ncells_per_layer):
layer_type = [layer_type,]*len(ncells_per_layer)
layer_data = [layer_data,]*len(ncells_per_layer)
else:
layer_type = [layer_type,]
layer_data = [layer_data,]
ncells_per_layer = [ncells_per_layer,]
# helper data and functions for mapping indices from 2D to 3D
# ------------------------------------------------------------------
if min(ncells_per_layer) < 0:
raise RuntimeError("Invalid number of cells, negative value provided.")
ncells_tall = sum(ncells_per_layer)
ncells_total = ncells_tall * mesh2D.num_cells()
nfaces_total = (ncells_tall+1) * mesh2D.num_cells() + ncells_tall * mesh2D.num_edges()
nnodes_total = (ncells_tall+1) * mesh2D.num_nodes()
np_mat_ids = np.array(mat_ids, dtype=int)
if np_mat_ids.size == np.size(np_mat_ids, 0):
if np_mat_ids.size == 1:
np_mat_ids = np.full((len(ncells_per_layer), mesh2D.num_cells()), mat_ids[0], dtype=int)
else:
np_mat_ids = np.empty((len(ncells_per_layer), mesh2D.num_cells()), dtype=int)
for ilay in range(len(ncells_per_layer)):
np_mat_ids[ilay, :] = np.full(mesh2D.num_cells(), mat_ids[ilay], dtype=int)
def col_to_id(column, z_cell):
"""Maps 2D cell ID and index in the vertical to a 3D cell ID"""
return z_cell + column * ncells_tall
def node_to_id(node, z_node):
"""Maps 2D node ID and index in the vertical to a 3D node ID"""
return z_node + node * (ncells_tall+1)
def edge_to_id(edge, z_cell):
"""Maps 2D edge hash and index in the vertical to a 3D face ID of a vertical face"""
return (ncells_tall + 1) * mesh2D.num_cells() + z_cell + edge * ncells_tall
# create coordinates
# ---------------------------------
coords = np.zeros((mesh2D.coords.shape[0],ncells_tall+1, 3),'d')
coords[:,:,0:2] = np.expand_dims(mesh2D.coords[:,0:2],1)
if mesh2D.dim == 3:
coords[:,0,2] = mesh2D.coords[:,2]
# else the surface is at 0 depth
cell_layer_start = 0
for layer_type, layer_datum, ncells in zip(layer_types, layer_data, ncells_per_layer):
if layer_type.lower() == 'constant':
dz = float(layer_datum) / ncells
for i in range(1,ncells+1):
coords[:,cell_layer_start+i,2] = coords[:,cell_layer_start,2] - i * dz
else:
# allocate an array of coordinates for the bottom of the layer
layer_bottom = np.zeros((mesh2D.coords.shape[0],),'d')
if layer_type.lower() == 'snapped':
# layer bottom is uniform
layer_bottom[:] = layer_datum
elif layer_type.lower() == 'function':
# layer thickness is given by a function evaluation of x,y
for node_col in range(mesh2D.coords.shape[0]):
layer_bottom[node_col] = coords[node_col,cell_layer_start,2] - layer_datum(coords[node_col,0,0], coords[node_col,0,1])
elif layer_type.lower() == 'node':
# layer bottom specifically provided through thickness
layer_bottom[:] = coords[:,cell_layer_start,2] - layer_datum
elif layer_type.lower() == 'cell':
# interpolate cell thicknesses to node thicknesses
import scipy.interpolate
centroids = mesh2D.cell_centroids()
interp = scipy.interpolate.interp2d(centroids[:,0], centroids[:,1], layer_datum, kind='linear')
layer_bottom[:] = coords[:,cell_layer_start,2] - interp(mesh2D.coords[:,0], mesh2D.coords[:,1])
else:
raise RuntimeError("Unrecognized layer_type '%s'"%layer_type)
# linspace from bottom of previous layer to bottom of this layer
for node_col in range(mesh2D.coords.shape[0]):
coords[node_col,cell_layer_start:cell_layer_start+ncells+1,2] = np.linspace(coords[node_col,cell_layer_start,2], layer_bottom[node_col], ncells+1)
cell_layer_start = cell_layer_start + ncells
# create faces, face sets, cells
bottom = []
surface = []
faces = []
cells = [list() for c in range(ncells_total)]
# -- loop over the columns, adding the horizontal faces
for col in range(mesh2D.num_cells()):
nodes_2 = mesh2D.conn[col]
surface.append(col_to_id(col,0))
for z_face in range(ncells_tall + 1):
i_f = len(faces)
f = [node_to_id(n, z_face) for n in nodes_2]
if z_face != ncells_tall:
cells[col_to_id(col, z_face)].append(i_f)
if z_face != 0:
cells[col_to_id(col, z_face-1)].append(i_f)
faces.append(f)
bottom.append(col_to_id(col,ncells_tall-1))
# -- loop over the columns, adding the vertical faces
added = dict()
vertical_side_cells = []
vertical_side_indices = []
for col in range(mesh2D.num_cells()):
nodes_2 = mesh2D.conn[col]
for i in range(len(nodes_2)):
edge = mesh2D.edge_hash(nodes_2[i], nodes_2[(i+1)%len(nodes_2)])
try:
i_e = added[edge]
except KeyError:
# faces not yet added to facelist
i_e = len(added.keys())
added[edge] = i_e
for z_face in range(ncells_tall):
i_f = len(faces)
assert i_f == edge_to_id(i_e, z_face)
f = [node_to_id(edge[0], z_face),
node_to_id(edge[1], z_face),
node_to_id(edge[1], z_face+1),
node_to_id(edge[0], z_face+1)]
faces.append(f)
face_cell = col_to_id(col, z_face)
cells[face_cell].append(i_f)
# check if this is an external
if mesh2D._edges[edge] == 1:
vertical_side_cells.append(face_cell)
vertical_side_indices.append(len(cells[face_cell])-1)
else:
# faces already added from previous column
for z_face in range(ncells_tall):
i_f = edge_to_id(i_e, z_face)
cells[col_to_id(col, z_face)].append(i_f)
# Do some idiot checking
# -- check we got the expected number of faces
assert len(faces) == nfaces_total
# -- check every cell is at least a tet
for c in cells:
assert len(c) > 4
# -- check surface sideset has the right number of entries
assert len(surface) == mesh2D.num_cells()
# -- check bottom sideset has the right number of entries
assert len(bottom) == mesh2D.num_cells()
# -- len of vertical sides sideset is number of external edges * number of cells, no pinchouts here
num_sides = ncells_tall * sum(1 for e,c in mesh2D.edge_counts().iteritems() if c == 1)
assert num_sides == len(vertical_side_cells)
assert num_sides == len(vertical_side_indices)
# make the material ids
material_ids = np.zeros((len(cells),),'i')
for col in range(mesh2D.num_cells()):
z_cell = 0
for ilay in range(len(ncells_per_layer)):
ncells = ncells_per_layer[ilay]
for i in range(z_cell, z_cell+ncells):
material_ids[col_to_id(col, i)] = np_mat_ids[ilay, col]
z_cell = z_cell + ncells
# make the side sets
side_sets = []
side_sets.append(SideSet("bottom", 1, bottom, [1,]*len(bottom)))
side_sets.append(SideSet("surface", 2, surface, [0,]*len(surface)))
side_sets.append(SideSet("external_sides", 3, vertical_side_cells, vertical_side_indices))
# reshape coords
coords = coords.reshape(nnodes_total, 3)
for e,s in zip(side_sets[0].elem_list, side_sets[0].side_list):
face = cells[e][s]
fz_coords = np.array([coords[n] for n in faces[face]])
#print "bottom centroid = ", np.mean(fz_coords, axis=0)
for e,s in zip(side_sets[1].elem_list, side_sets[1].side_list):
face = cells[e][s]
fz_coords = np.array([coords[n] for n in faces[face]])
#print "surface centroid = ", np.mean(fz_coords, axis=0)
# instantiate the mesh
return cls(coords, faces, cells, side_sets=side_sets, material_ids=material_ids)
def commandline_options():
parser = argparse.ArgumentParser(description='Extrude a 2D mesh to make a 3D mesh')
parser.add_argument("-n", "--num-cells", default=10, type=int,
help="number of cells to extrude")
parser.add_argument("-d", "--depth", default=40.0, type=float,
help="depth to extrude")
parser.add_argument("-o", "--outfile", default=None, type=str,
help="output filename")
parser.add_argument("-p", "--plot", default=False, action="store_true",
help="plot the 2D mesh")
parser.add_argument("infile",metavar="INFILE", type=str,
help="input filename of surface mesh")
options = parser.parse_args()
if options.outfile is None:
options.outfile = ".".join(options.infile.split(".")[:-1])+".exo"
if os.path.isfile(options.outfile):
print('Output file "%s" exists, cowardly not overwriting.'%options.outfile)
sys.exit(1)
if not os.path.isfile(options.infile):
print('No input file provided')
parser.print_usage()
sys.exit(1)
return options
if __name__ == "__main__":
options = commandline_options()
m2 = Mesh2D.read_VTK(options.infile)
if options.plot:
m2.plot()
m3 = Mesh3D.extruded_Mesh2D(m2, [options.depth,], [options.num_cells,], [10000,])
m3.write_exodus(options.outfile)
|
bsd-3-clause
| 6,506,556,784,610,115,000 | 36.724622 | 166 | 0.507972 | false |
pecryptfs/pecryptfs
|
pecryptfs/cmd_genfile.py
|
1
|
2753
|
#!/usr/bin/env python3
# pecryptfs - Portable Userspace eCryptfs
# Copyright (C) 2015 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import List
import argparse
import os
from pecryptfs.ecryptfs import generate_encrypted_file
from pecryptfs.auth_token import AuthToken
def main():
parser = argparse.ArgumentParser(description="eCryptfs Encrypted File Generator")
parser.add_argument('files', metavar='FILE', type=str, nargs='+', help='Filenames to decrypt')
parser.add_argument('-p', '--password', type=str, default="Test",
help='Password to use for decryption, prompt when none given')
parser.add_argument('-s', '--salt', type=str, default="0011223344556677",
help='Salt to use for decryption')
parser.add_argument('-o', '--output', type=str, help='Output directory')
parser.add_argument('-c', '--cipher', type=str, help='Cipher to use', default="aes")
parser.add_argument('-k', '--key-bytes', type=int, help='Key bytes to use', default=24)
parser.add_argument('-v', '--verbose', action='store_true', help='Be verbose')
args = parser.parse_args()
output_directory = args.output
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
cipher = args.cipher
key_bytes = args.key_bytes
auth_token = AuthToken(args.password, args.salt)
for input_filename in args.files:
filenames: List[str] = []
data = generate_encrypted_file(auth_token, cipher, key_bytes)
output_filename = "{}-{}.raw".format(cipher, key_bytes)
with open(os.path.join(output_directory, output_filename), "wb") as fout:
fout.write(data)
if args.verbose:
print("Password: {}".format(args.password))
print("Salt: {}".format(args.salt))
print("Filename: {}".format(input_filename))
print()
for cipher, key_bytes, f in filenames:
print("{:8} {:2} {}".format(cipher, key_bytes, f))
else:
for cipher, key_bytes, f in filenames:
print(f)
# EOF #
|
gpl-3.0
| 4,288,613,616,355,062,000 | 36.712329 | 98 | 0.656375 | false |
ptphp/PyLib
|
src/fangte/fetch/fetch58_bak.py
|
1
|
29601
|
# -*- coding: utf-8 -*-
import time
import datetime
import random
import cookielib
import urllib
import urllib2
from urlparse import urlparse
from config import *
from common import *
from BeautifulSoup import BeautifulSoup
class BaseCrawl(object):
#房源类型 1 出售 2 出租 3 求购 4 求租
flag = None
isStoped = False
response = None
header = None
#房源信息模板
infoT = {}
#传入参数
param = {}
#全局队列
queue = []
pageNo = 0
isFetched = False
#超过时间的条数
overTimeNum = 0
def __init__(self,param,que):
self.queue = que
self.param = param
self.header = header
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.endtime=str(datetime.date.today() -datetime.timedelta(days=7))
self._initRe()
def getContent(self):
if self.__cInit__(self.infoT['url']) :
self.response = re.sub(" |\n|\r|\t| | |联系我时,请说是在58同城上看到的,谢谢!","",self.response)
self.response = re.sub("rlist\d\">.*?</ul>","",self.response)
try:
if self.param['flag'] == 1:
self.sell();
if self.param['flag'] == 2:
self.rent();
if self.param['flag'] == 3:
self.buy();
if self.param['flag'] == 4:
self.req();
except Exception,what:
print what
if (time.time() - int(self.infoT['posttime']))>self.param['args']["timelimit"]:
self.overTimeNum +=1
if self.overTimeNum > 5:
self.pageNo = 0
self.isStoped = True
self.overTimeNum = 0
def getPhoneself(self):
if self.__cInit__(self.infoT['url']) :
sHtml = self.response
self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False)
def __getLinks(self,url):
if not self.__cInit__(url):
return
self.response = re.sub("\n|\r|\t| | ","",self.response)
page_main = regx_data(self.page_main_regex,self.response,"",0)
self.page_main_trs_regex = "<tr logr=\".*?\">(.*?)</tr>"
page_main_trs = regx_lists(self.page_main_trs_regex,page_main,"",0)
if page_main_trs and len(page_main_trs)>0:
for tr in page_main_trs:
if self.isStoped:
self.pageNo = 0
break
self._initTemple(self.param['flag'],self.param['city'])
try:
if self.param['flag'] == 1:
self.__parseSellTrs(tr)
if self.param['flag'] == 2:
self.__parseRentTrs(tr)
if self.param['flag'] == 3:
self.__parseBuyTrs(tr)
if self.param['flag'] == 4:
self.__parseReqTrs(tr)
except Exception,what:
print what
else:
if not self.isFetched:
self.queue.append(self.infoT)
self.isFetched = False
time.sleep(0.1)
self.infoT = {}
self.pageNo +=1
else:
self.pageNo = 0
def __parseBuyTrs(self,tr):
soup = BeautifulSoup(tr)
at = soup.find('a',{'class':'t'})
#标题
if at:
self.infoT['title'] = at.string
#链接
self.infoT['url'] = at['href']
if checkPath("pagecash",self.infoT['url']):
self.isFetched = True
return
else:
return
#图片
img = soup.find('td',{'class':'img'})
if img:
if img.img['src'].find("noimg") == -1:
self.infoT['thumb'] = img.img['src']
#信息
t = soup.find('td',{'class':'t'})
self.infoT['belong'] = regx_data(self.house_belong_dict_regex,str(t),"",False)
self.infoT['houseType'] = regx_data(self.house_type_regex,str(t),"",False)
self.infoT['posttime'] = self.postTime(regx_data("更新时间:(.*?)<",str(t),"",False))
#self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False)
#if self.infoT['room']:
#self.infoT['room'] = re.sub("一|二|三|四|五|六|七|八|九|十","1|2|3|4|5|6|7|8|9|10",self.infoT['room'])
self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False)
self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False)
agencyname = regx_data("(个人)",str(t),"",False)
if agencyname:
self.infoT['isPerson'] = 1
else:
self.infoT['isPerson'] = 0
#价格
num = soup('td',{'class':'tc'})
if num and len(num) > 1:
if str(num[0]).find("面议") == -1:
price = num[0].b.string
if price.find('-') == -1:
self.infoT['price'] = price
else:
self.infoT['price'] = price.split("-")[0]
self.infoT['price_max'] = price.split("-")[1]
del price
area = num[1].b.string
if area.find('-') == -1:
self.infoT['area'] = area
else:
self.infoT['area'] = area.split("-")[0]
self.infoT['area_max'] = area.split("-")[1]
del area
self.infoT['search']= re.sub("<.*?>","",str(soup))
del soup
del t
del img
del at
del num
del agencyname
self.getContent()
def __parseReqTrs(self,tr):
soup = BeautifulSoup(tr)
at = soup.find('a',{'class':'t'})
#标题
if at:
self.infoT['title'] = at.string
#链接
self.infoT['url'] = at['href']
if checkPath("pagecash",self.infoT['url']):
self.isFetched = True
return
else:
return
agencyname = regx_data("(个人)",str(soup),"",False)
if agencyname:
self.infoT['isPerson'] = 1
else:
self.infoT['isPerson'] = 0
#价格
if soup.find('b',{'class':'pri'}):
self.infoT['price'] = soup.find('b',{'class':'pri'}).string
if self.infoT['price']:
if self.infoT['price'].find('-') != -1:
self.infoT['price_max'] = self.infoT['price'].split("-")[1]
self.infoT['price'] = self.infoT['price'].split("-")[0]
self.infoT['room'] = soup("td")[2].string
#时间
tds = soup("td")[3]
if tds:
self.infoT['posttime']= self.postTime(tds.string)
#rint tds.string
self.infoT['search']= re.sub("<.*?>","",str(soup))
del soup
del at
del agencyname
del tds
self.getContent()
def __parseSellTrs(self,tr):
soup = BeautifulSoup(tr)
at = soup.find('a',{'class':'t'})
#标题
if at:
self.infoT['title'] = at.string
#链接
self.infoT['url'] = at['href']
if checkPath("pagecash",self.infoT['url']):
self.isFetched = True
return
else:
return
#图片
img = soup.find('td',{'class':'img'})
if img:
if img.img['src'].find("noimg") == -1:
self.infoT['thumb'] = img.img['src']
#信息
t = soup.find('td',{'class':'t'})
self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,str(t),"",False)
self.infoT['floor'] = regx_data(self.house_floor_regex,str(t),"",False)
self.infoT['belong'] = regx_data(self.house_belong_dict_regex,str(t),"",False)
self.infoT['houseType'] = regx_data(self.house_type_regex,str(t),"",False)
self.infoT['toward'] = regx_data(self.house_toward_regex,str(t),"",False)
self.infoT['age'] = regx_data("(\d+)年",str(t),"",False)
self.infoT['posttime'] = self.postTime(regx_data("更新时间:(.*?)<",str(t),"",False))
#self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False)
#if self.infoT['room']:
#self.infoT['room'] = re.sub("一|二|三|四|五|六|七|八|九|十","1|2|3|4|5|6|7|8|9|10",self.infoT['room'])
self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False)
self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False)
agencyname = regx_data("(个人)",str(t),"",False)
if agencyname:
self.infoT['isPerson'] = 1
else:
self.infoT['isPerson'] = 0
#价格
num = soup('td',{'class':'tc'})
if num and len(num) > 1:
if str(num[0]).find("面议") == -1:
self.infoT['price'] = num[0].b.string
self.infoT['area'] = num[1].b.string
self.infoT['search']= re.sub("<.*?>","",str(soup))
del soup
del t
del img
del at
del agencyname
self.getContent()
def __parseRentTrs(self,tr):
soup = BeautifulSoup(tr)
at = soup.find('a',{'class':'t'})
#标题
if at:
self.infoT['title'] = at.string
#链接
self.infoT['url'] = at['href']
if checkPath("pagecash",self.infoT['url']):
self.isFetched = True
return
else:
return
#图片
img = soup.find('td',{'class':'img'})
if img:
if img.img['src'].find("noimg") == -1:
self.infoT['thumb'] = img.img['src']
#信息
t = soup.find('td',{'class':'t'})
self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,str(t),"",False)
self.infoT['floor'] = regx_data(self.house_floor_regex,str(t),"",False)
self.infoT['area'] = regx_data(self.house_totalarea_regex,str(t),"",False)
self.infoT['fitment'] = regx_data(self.house_fitment_regex,str(t),"",False)
self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False)
self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False)
self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False)
self.infoT['equ'] = regx_data("配置:(.*?)<",str(soup),"",False)
agencyname = regx_data("(个人)",str(t),"",False)
if agencyname:
self.infoT['isPerson'] = 1
else:
self.infoT['isPerson'] = 0
#价格
if soup.find('b',{'class':'pri'}):
self.infoT['price'] = soup.find('b',{'class':'pri'}).string
#时间
tds = soup("td")[4]
if tds:
self.infoT['posttime']= self.postTime(tds.string)
#rint tds.string
self.infoT['search']= re.sub("<.*?>","",str(soup))
del soup
del t
del img
del at
del agencyname
del tds
self.getContent()
def __cInit__(self,url):
try:
request = urllib2.Request(url, None, self.header)
self.response = urllib2.urlopen(request).read()
except Exception,what:
return False
else:
return True
def req(self):
sHtml = self.response
self.response = None
#个人 OR 经纪人
#agencyname = regx_data(self.agencyname_regex,sHtml,"个人房源",False)
#if not agencyname:
#agencyname = '个人房源'
#联系人
self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False)
#价格
if not self.infoT['price']:
self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False)
#500以下
if not self.infoT['price'] :
self.infoT['price'] = regx_data(self.house_price1_regex,sHtml,0,False)
#以上
if not self.infoT['price'] :
self.infoT['price'] = regx_data(self.house_price2_regex,sHtml,0,False)
#标题
if not self.infoT['title']:
self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False)
#发布时间
if not self.infoT['posttime']:
self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False))
#house_posttime = postTime(house_posttime,1)
#室
if not self.infoT['room']:
self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False)
#区
self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False)
#地段
#print self.house_section_regex
self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False)
#详细
self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>")
#电话
if self.param['getPhone']:
self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False)
def rent(self):
sHtml = self.response
self.response = None
#个人 OR 经纪人
#agencyname = regx_data(self.agencyname_regex,sHtml,"",False)
#联系人
self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False)
#楼层
if not self.infoT['floor']:
self.infoT['floor'] = regx_data(self.house_floor_regex,sHtml,"",False)
#顶层
if not self.infoT['topfloor']:
self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,sHtml,"",False)
#面积
if not self.infoT['area']:
self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False)
#价格
if not self.infoT['price']:
self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False)
#标题
if not self.infoT['title']:
self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False)
#发布时间
if not self.infoT['posttime']:
self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) )
#house_posttime = postTime(house_posttime,1)
#室
if not self.infoT['room']:
self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False)
#厅
if not self.infoT['hall']:
self.infoT['hall'] = regx_data(self.house_hall_regex,sHtml,"",False)
#卫
if not self.infoT['toilet']:
self.infoT['toilet'] = regx_data(self.house_toilet_regex,sHtml,"",False)
#押金
if not self.infoT['deposit']:
self.infoT['deposit'] = regx_data(self.house_deposit_regex,sHtml,"",False)
#小区
self.infoT['borough'] = regx_data(self.borough_name_regex,sHtml,"",False)
#地址
self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False)
#区
self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False)
#地段
self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False)
#详细
self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>")
#图片
self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big")
_t = regx_data(self.house_toward_t_regex,sHtml,"",False)
#装修
if not self.infoT['fitment']:
self.infoT['fitment'] = regx_data(self.house_fitment_regex,_t,"",False)
#朝向
if not self.infoT['toward']:
self.infoT['toward'] = regx_data(self.house_toward_regex,_t,"",False)
#类型
if not self.infoT['houseType']:
self.infoT['houseType'] = regx_data(self.house_type_regex,_t,"",False)
#电话
if self.param['getPhone']:
self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False)
def buy(self):
sHtml = self.response
self.response = None
#个人 OR 经纪人
#agencyname = regx_data(self.agencyname_regex,sHtml,"",False)
#联系人
if not self.infoT['owner']:
self.infoT['owner']= regx_data(self.username_regex,sHtml,"个人",False)
#面积
if not self.infoT['area']:
self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False)
#价格
if not self.infoT['price']:
self.infoT['price']= regx_data(self.house_price_regex,sHtml,0,False)
#标题
if not self.infoT['title']:
self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False)
#发布时间
if not self.infoT['posttime']:
self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) )
#house_posttime = postTime(house_posttime,1)
#室
if not self.infoT['room']:
self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False)
#地址
self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False)
#详细
self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>")
#图片
self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big")
#电话
if self.param['getPhone']:
self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False)
def sell(self):
sHtml = self.response
self.response = None
#个人 OR 经纪人
#agencyname = regx_data(self.agencyname_regex,sHtml,"",False)
#联系人
self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False)
#楼层
if not self.infoT['floor']:
self.infoT['floor'] = regx_data(self.house_floor_regex,sHtml,"",False)
#顶层
if not self.infoT['topfloor']:
self.infoT['topfloor']= regx_data(self.house_topfloor_regex,sHtml,"",False)
#面积
if not self.infoT['area']:
self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False)
#价格
if not self.infoT['price']:
self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False)
#标题
if not self.infoT['title']:
self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False)
#发布时间
if not self.infoT['posttime']:
self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) )
#house_posttime = postTime(house_posttime,1)
#室
if not self.infoT['room']:
self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False)
#厅
if not self.infoT['hall']:
self.infoT['hall'] = regx_data(self.house_hall_regex,sHtml,"",False)
#卫
if not self.infoT['toilet']:
self.infoT['toilet'] = regx_data(self.house_toilet_regex,sHtml,"",False)
#产权
if not self.infoT['belong']:
self.infoT['belong'] = regx_data(self.house_belong_regex,sHtml,"",False)
#房龄 99年
self.infoT['age'] = regx_data(self.house_age_regex,sHtml,"",False)
#小区
self.infoT['borough'] = regx_data(self.borough_name_regex,sHtml,"",False)
#地址
self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False)
#区
self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False)
#地段
self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False)
#详细
self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>")
#图片
self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big")
_t = regx_data(self.house_toward_t_regex,sHtml,"",False)
#装修
if not self.infoT['fitment']:
self.infoT['fitment'] = regx_data(self.house_fitment_regex,_t,"",False)
#朝向
if not self.infoT['toward']:
self.infoT['toward'] = regx_data(self.house_toward_regex,_t,"",False)
#类型
if not self.infoT['houseType']:
self.infoT['houseType'] = regx_data(self.house_type_regex,_t,"",False)
#电话
if self.param['getPhone']:
self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False)
def _initRe(self):
self.page_main_regex = "<div id=\"main\">(.*?)<div id=\"links\"> "
self.agencyname_regex="agencyname:'(.*?)',"
self.username_regex="username:'(.*?)',"
self.house_title_regex="<h1>(.*)</h1>"
self.house_floor_regex="第(\d+)层"
self.house_topfloor_regex="共(\d+)层"
self.house_room_regex="(\d+|一|二|三|四|五|六|七|八|九|十)室"
self.house_hall_regex="(\d+)厅"
self.house_toilet_regex="(\d+)卫"
self.house_posttime_regex="发布时间:(.*?)浏览"
self.house_age_regex="(\d+)年"
self.house_region_regex = "locallist.*?listname.*?name:'(.*?)'"
self.house_section_regex = "<li><i>区域:</i><a.*?<a.*?>(.*?)</a></li>"
self.house_desc_regex = "class=\"maincon\">(.*?)</div>"
self.house_phone_regex = "(http://image.58.com/showphone.aspx.*?)'"
self.house_pics_regex = "(http://\d+.pic.58control.cn/p\d+/tiny/n_\d+.jpg)"
self.house_toward_regex = "(东|南|西|北|南北|东西|东南|东北|西北)"
self.house_fitment_regex = "(毛坯|简单装修|中等装修|精装修|豪华装修)"
self.house_belong_dict_regex = "(商品房|经济适用房|公房|用权)"
self.house_type_regex = "(平房|普通住宅|商住两用|公寓|别墅)"
self.borough_name_regex = "<li><i>小区:</i><.*?>(.*?)<.*?></li>"
self.borough_name1_regex = "<li><i>小区:</i>(.*?)</li>"
if self.param['flag'] ==1:
self.house_addr_regex = "address\">(.*?)<"
self.house_totalarea_regex="(\d+)㎡"
self.house_belong_regex="<li><i>产权:</i>(.*?)</li>"
self.house_price_regex="(\d+)万元"
self.house_toward_t_regex = "房龄:</i>(.*?)<"
elif self.param['flag'] ==2:
self.house_totalarea_regex="(\d+)㎡"
self.house_price_regex="(\d+)元/月"
self.house_equ_regex="vartmp='(.*?)';"
self.house_deposit_regex="(押一付三|押一付一|押二付一|半年付|年付)"
self.house_toward_t_regex = "基本情况:</i>(.*?)<"
self.house_addr_regex = "address\">(.*?)<"
elif self.param['flag'] ==3:
self.house_belong_regex="<li><i>产权:</i>(.*?)</li>"
self.house_totalarea_regex="(\d+-\d+)㎡"
self.house_addr_regex="<li><i>地段:</i>(.*?)</li>"
self.house_price_regex="(\d+-\d+)万元"
elif self.param['flag'] ==4:
self.house_price_regex="(\d+-\d+)元"
self.house_price1_regex="(\d+)元以下"
self.house_price2_regex="(\d+)元以上"
self.house_room_regex="(一|两|三|四)居室"
def _initTemple(self,flag,city):
self.infoT = {
'flag':flag,#房源类型 1 出售 2 出租 3 求购 4 求租
'title':'',
'posttime':'',
'price':0,
'price_max':0,
'deposit':'',
'belong':'',
'room':0,
'hall':0,
'toilet':0,
'yt':0,
'area':0,
'area_max':0,
'houseType':'',
'fitment':'',
'floor':0,
'topfloor':0,
'toward':'',
'age':1,
'equ':'',
'city':city,
'region':'',
'borough':'',
'section':'',
'addr':'',
'phone':'',
'owner':'',
'desc':'',
'search':'',
'url':'',
'thumb':'',
'webFlag':1,
'isPerson':1,
}
def postTime(self,posttime):
if posttime and posttime.find('now') != -1:
posttime = int(time.time())
if not posttime:
return
posttime = str(posttime).replace('前','')
#print posttime
if posttime.find("<") != -1 or posttime.find(">") != -1:
posttime = re.sub('<.*?>','' ,pottime)
if posttime.find('-') !=-1:
if len(posttime.split("-"))==3:
s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2]))
else:
s = datetime.datetime(2011,int(posttime.split('-')[0],),int(posttime.split('-')[1]))
posttime = int(time.mktime(s.timetuple()))
elif posttime.find('分钟') !=-1:
n = int(posttime.replace('分钟',''))*60
posttime = int(time.time() - n)
elif posttime.find('小时') !=-1:
n = int(posttime.replace('小时',''))*60*60
posttime = int(time.time() - n)
else:
posttime = int(time.time())
return posttime
if (time.time() - self.fd['posttime']) > 3600*24*7:
return
print "++++++++++++++++"
print time.strftime('%Y %m %d', time.localtime(self.fd['posttime']))
def run(self):
self.pageNo = 1
while 1:
if self.isStoped == True:
break
if self.pageNo:
url = self.baseUrl(self.param['args'],self.pageNo)
self.__getLinks(url)
def baseUrl(self,args,pn):
if args['region'] != '':
args['region'] = args['region']+"/"
else:
args['region'] = ''
if args['option']!= '':
args['option'] = args['option']+"/"
else:
args['option'] = ''
if self.param['flag'] == 1:
baseUrl = 'http://%s.58.com/%sershoufang/0/%spn%d/?final=1&searchtype=3&sourcetype=5&key=%s' % (args['city'],args['region'],args['option'],pn,args['q'])
if self.param['flag'] == 2:
baseUrl = 'http://%s.58.com/%szufang/0/%spn%d/?final=1&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']);
if self.param['flag'] == 3:
args['option'] = args['option'][:-1]
baseUrl = 'http://%s.58.com/%sershoufang/0/%sh2/pn%d/?final=1&key=%s&searchtype=3&sourcetype=5' % (args['city'],args['region'],args['option'],pn,args['q'])
if self.param['flag'] == 4:
baseUrl = 'http://%s.58.com/%sqiuzu/0/%spn%d/?final=1&key=%s' % (args['city'],args['region'],args['option'],pn,args['q'])
return baseUrl
q = []
if __name__=="__main__":
url1 = 'http://sh.58.com/ershoufang/7489033818376x.shtml'
url2 = 'http://sh.58.com/zufang/7468246420482x.shtml'
url3 = 'http://sh.58.com/ershoufang/7544211350792x.shtml'
url4 = 'http://sh.58.com/qiuzu/7543125341446x.shtml'
link2 = 'http://sh.58.com/zufang/0/?selpic=2'
link1 = 'http://sh.58.com/ershoufang/'
link3 = 'http://sh.58.com/ershoufang/h2/'
link4 = 'http://sh.58.com/qiuzu/0/'
data = {}
data['flag'] = 1
data['city'] = 1
data['getPhone'] = 1
cc = BaseCrawl(data,q)
cc.run()
|
apache-2.0
| -6,180,525,446,337,366,000 | 38.575967 | 172 | 0.475727 | false |
AntaresConsulting/odoo-marble
|
product_marble/models/stock.py
|
1
|
25218
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, _
from openerp.osv import osv, fields
# from openerp.tools.translate import _
from operator import itemgetter
import inspect
import _common as comm
import logging
_logger = logging.getLogger(__name__)
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = "stock.picking"
_description = "Picking List"
_tipo_de_move = [
('raw', 'Raw'),
('insu', 'Input'),
('bac', 'Bacha'),
]
def _get_tipo_de_move(self, cr, uid, context=None):
return sorted(self._tipo_de_move, key=itemgetter(1))
def _get_types(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids):
if len(pick.move_lines) > 0:
res.update({pick.id : pick.move_lines[0].prod_type})
return res
@api.cr_uid_ids_context
def do_enter_transfer_details_marble(self, cr, uid, picking, context=None):
resp = super(stock_picking, self).do_enter_transfer_details(cr, uid, picking, context=context)
return resp['res_id']
_columns = {
'move_prod_type': fields.selection(_get_tipo_de_move, string='Product Type picking', select=True),
'prod_type': fields.function(_get_types, type='char', string='Product Type', store=False),
}
stock_picking()
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_inherit = "stock.pack.operation"
_description = "Packing Operation"
#dimension_id = openerp.fields.Many2one('product.marble.dimension', string='Dimension', ondelete='set null')
#dimension_unit = openerp.fields.Integer(string='Units')
#prod_type = openerp.fields.Char(related='product_id.prod_type', string='Product Type')
_columns = {
'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', domain=[('state','=','done')]),
'dimension_unit': fields.integer('Units', size=3), # units
'prod_type' : fields.related('product_id', 'prod_type', type='char', relation='product.template', string='Product Type'),
}
_defaults = {
'dimension_id': False,
'dimension_unit': 0,
}
def _before_save(self, cr, uid, vals, context):
obj_pick = self.pool.get('stock.picking')
pick_id = vals.get('picking_id',False)
prod_id = vals.get('product_id',False)
# localizo el 'stock_move' x picking + product, luego obteng su units a registrar en stock.pack.operation.-
for mov in obj_pick.browse(cr, uid, pick_id, context=context).move_lines:
if mov.product_id.id == prod_id and mov.product_id.prod_type == comm.RAW:
vals.update(dimension_id = mov.dimension_id.id)
vals.update(dimension_unit = mov.dimension_unit)
break
def create(self, cr, uid, vals, context=None):
self._before_save(cr, uid, vals, context)
#_logger.info('>> stock_pack_opetarion >> create >> 12- vals = %s', vals)
return super(stock_pack_operation, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
#_logger.info('>> stock_pack_opetarion >> write >> 20- vals = %s', vals)
self._before_save(cr, uid, vals, context)
#_logger.info('>> stock_pack_opetarion >> write >> 21- vals = %s', vals)
return super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
stock_pack_operation()
class stock_move(osv.osv):
_inherit = "stock.move"
# defino tipo de movimiento en Locacion de Stock:
# return 0 = no afecta a Stock,
# 1 = entra prod. en Stock (in: input),
# -1 = sale prod. en Stock (out: output)
def stock_move(self, cr, uid, mov=None, zeroVal=None):
zeroValue = 0 if zeroVal == None else zeroVal
if not mov:
_logger.info(">> stock_move >> Stock.Move no definido.")
return zeroValue
loc_propio = [comm.get_location_stock(self, cr, uid), \
comm.get_location_recortes_stock(self, cr, uid)]
loc_orig_parents = comm.get_loc_parents(self, mov.location_id, [])
loc_dest_parents = comm.get_loc_parents(self, mov.location_dest_id, [])
loc_orig_propio = (loc_propio[0] in loc_orig_parents) or (loc_propio[1] in loc_orig_parents)
loc_dest_propio = (loc_propio[0] in loc_dest_parents) or (loc_propio[1] in loc_dest_parents)
#_logger.info(">> stock_move >> 1- loc_propio = %s", loc_propio)
#_logger.info(">> stock_move >> 2- loc_orig_parents = %s", loc_orig_parents)
#_logger.info(">> stock_move >> 3- loc_orig_propio = %s", loc_orig_propio)
#_logger.info(">> stock_move >> 4- loc_dest_parents = %s", loc_dest_parents)
#_logger.info(">> stock_move >> 5- loc_dest_propio = %s", loc_dest_propio)
if loc_orig_propio and loc_dest_propio:
_logger.info(">> stock_move = 0 (NULO): movimiento interno en sectores propios.")
return zeroValue
if not loc_orig_propio and not loc_dest_propio:
_logger.info(">> stock_move = 0 (NULO): movimiento interno en sectores no propios.")
return zeroValue
if not loc_orig_propio and loc_dest_propio:
_logger.info(">> stock_move = 1 (IN): ingreso de mercaderia en almacen/sector.")
return 1
if loc_orig_propio and not loc_dest_propio:
_logger.info(">> stock_move = -1 (OUT): egreso de mercaderia en almacen/sector.")
return -1
_logger.warning(">> ERROR >> stock_move = 0 >> ¿Entrada o Salida? operación no definida...")
return zeroValue
def _get_sign_qty(self, cr, uid, ids, field_name, arg, context=None):
if not ids:
return {}
res = {}
bal = 0.00
ids_by_date = self.search(cr, uid, [('id','in',ids)], order='date')
for m in self.browse(cr, uid, ids_by_date):
fields = {}
# sign = self._get_sign(m)
sign = self.stock_move(cr, uid, m, 1)
fields['qty_dimension'] = sign * m.dimension_unit
fields['qty_product'] = sign * m.product_qty
bal += fields['qty_product']
fields['qty_balance'] = bal
res[m.id] = fields
# _logger.info(">> _get_field_with_sign >> 5 >> res = %s", res)
return res
def _get_types(self, cr, uid, ids, field_name, arg, context=None):
#_logger.info(">> _get_types >> 1- ids = %s", ids)
res = {}
if not ids: return res
if not isinstance(ids, (list,tuple)): ids = [ids]
types = comm.get_prod_types(self, cr, uid, context)
#_logger.info(">> _get_types >> 2- types = %s", types)
for ms_id in self.browse(cr, uid, ids, context):
cid = ms_id.product_id.categ_id.id
#_logger.info(">> _get_types >> 3- cid = %s", cid)
res.update({ms_id.id : types.get(cid,'*')})
#_logger.info(">> _get_types >> 4- res = %s", res)
return res
def _is_raw(self, cr, uid, ids, field_name, arg, context=None):
#"""
#Determina si [ids stock_move] tiene producto, del tipo is_raw si/no...
#"""
#res = {}
#if not ids:
# return res
# para cada stock_move -> recupero su correspondiente prod_id
#prod_ids = [sm.product_id.id for sm in self.browse(cr, uid, ids)]
# recupero is_raw por cada producto: {prod_id: is_raw}
#data = comm.is_raw_material_by_product_id(self, cr, uid, prod_ids)
# convierto de {prod_id: is_raw} -> {stock_move_id: is_raw}:
#res = {ids[k]: (data[prod_ids[k]] or False) for k in range(len(ids))}
# _logger.info("10 >> _is_raw >> res = %s", res)
#return res
res = { sm.id : (sm.product_id.prod_type == comm.RAW) for sm in self.browse(cr, uid, ids) }
#_logger.info("10 >> _is_raw >> res = %s", res)
return res
def _get_move_name(self, cr, uid, pro_id=False, dim_id=False):
name = ''
if not pro_id:
return name
obj_pro = self.pool.get('product.product')
name = obj_pro.name_get(cr, uid, [pro_id], context=None)[0][1]
if not dim_id or \
not comm.is_raw_material_by_product_id(self, cr, uid, [pro_id])[pro_id]:
return name
obj_dim = self.pool.get('product.marble.dimension')
d = obj_dim.browse(cr, uid, [dim_id])[0]
name = "%s >> %s" % (name, d.dimension)
return name
# ------------------------------------------------------------------------
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
res = super(stock_move, self).onchange_product_id(cr, uid, ids, prod_id, loc_id, loc_dest_id, partner_id)
#_logger.info(">> onchange_product_id >> 1- res = %s", res)
v = {}
if (not res) or (not prod_id):
return v
no_prod_id = ('product_id' not in res['value'])
if no_prod_id:
res['value'].update({'product_id':prod_id})
v = self.calculate_dim(cr, uid, res['value'])
if no_prod_id:
del v['product_id']
res['value'].update(v)
#_logger.info(">> onchange_product_id >> 2- res = %s", res)
return res
def onchange_calculate_dim(self, cr, uid, ids, pro_id, pro_uom, pro_qty, dim_id, dim_unit):
v = {
'product_id' : pro_id,
'product_uom' : pro_uom,
'product_uom_qty' : pro_qty,
'dimension_id' : dim_id,
'dimension_unit' : dim_unit,
'is_raw' : False,
'prod_type' : comm.OTHER,
}
# _logger.info(">> onchange_calculate_dim >> 0- val = %s", val)
val = self.calculate_dim(cr, uid, v)
# _logger.info(">> onchange_calculate_dim >> 1- val = %s", val)
return {'value': val}
def calculate_dim(self, cr, uid, val):
#_logger.info(" >> calculate_dim >> 100- val = %s", val)
pro_id = val.get('product_id', False)
pro_uom = val.get('product_uom', False)
pro_uos = val.get('product_uos', False)
pro_qty = val.get('product_uom_qty', 0.00)
dim_id = val.get('dimension_id', False)
dim_unit = val.get('dimension_unit', 0.00)
is_raw = val.get('is_raw', False)
prod_type = val.get('prod_type', comm.OTHER)
if not pro_id:
return val
#pro = self.pool.get('product.product').browse(cr, uid, pro_id)
#_logger.info(" >> calculate_dim >> 1- prod = %s", pro)
#pro = self.pool.get('product.product').browse(cr, uid, pro_id).categ_id
#_logger.info(" >> calculate_dim >> 2- prod = %s", pro)
#cid = self.pool.get('product.product').browse(cr, uid, pro_id).categ_id.id
#prod_type = comm.get_prod_types(self, cr, uid).get(cid, comm.OTHER)
#val['prod_type'] = prod_type
prod_type = self.pool.get('product.product').browse(cr, uid, pro_id).prod_type
val['prod_type'] = prod_type
m2 = 0.00
#is_raw = comm.is_raw_material_by_product_id(self, cr, uid, [pro_id])[pro_id]
is_raw = (prod_type == comm.RAW)
if prod_type not in ('raw', 'bacha'):
val['description'] = self._get_move_name(cr, uid, pro_id, dim_id)
return val
elif prod_type == 'bacha':
val['description'] = self._get_move_name(cr, uid, pro_id, dim_id)
val['product_uom'] = comm.get_uom_units_id(self,cr,uid)
return val
m2 = 0.00
if dim_id:
#obj = self.pool.get('product.marble.dimension')
#data = obj.read(cr, uid, [dim_id], ['m2'], context=None)
#m2 = data[0]['m2'] if (len(data) > 0 and len(data[0]) > 0) else 0.00
m2 = self.pool.get('product.marble.dimension').browse(cr, uid, dim_id).m2
pro_qty = dim_unit * m2
pro_uom = comm.get_uom_m2_id(self,cr,uid)
v = {}
v['product_id'] = pro_id
v['product_uos'] = pro_uos
v['product_uom'] = pro_uom
v['product_uom_qty'] = pro_qty
v['dimension_id'] = dim_id
v['dimension_unit'] = dim_unit
v['is_raw'] = is_raw
v['prod_type'] = prod_type
v['description'] = self._get_move_name(cr, uid, pro_id, dim_id)
#_logger.info(" >> calculate_dim >> 101- v = %s", v)
return v
# ------------------------------------------------------------------------
def _check_data_before_save(self, cr, uid, sm_id, val):
#_logger.info(">> _check_data_before_save >> 1- sm_id = %s", sm_id)
#_logger.info(">> _check_data_before_save >> 2- val = %s", val)
if 'product_id' not in val: return
# defino campos a evaluar
fields_list = ['product_id','product_uom','product_uom_qty','dimension_id','dimension_unit','is_raw','description']
# si (NO existe algun elemento de [fields_list] en [val]) >> me voy, no precesar...
if not any(e in fields_list for e in val.keys()):
return
to_update = {}
no_update = {}
obj = (sm_id and self.pool.get('stock.move').browse(cr, uid, sm_id)) or False
#_logger.info(">> _check_data_before_save >> 3- obj = %s", obj)
# divido [info suministrada por actuatizar] e [info calculada, no para actualizar, requerida]
for field in fields_list:
if (field in val):
to_update[field] = val[field]
continue
# >> si (field es 'read-only') >> la data no viaja...
elif (field in ['product_uom', 'product_uom_qty', 'description']):
to_update[field] = val.get(field,'')
continue
else:
no_update[field] = (obj and (obj[0][field].id if ('class' in str(type(obj[0][field]))) else obj[0][field])) or False
param = dict(to_update.items() + no_update.items())
v = self.calculate_dim(cr, uid, param)
# actualizo valores de retorno
for field in to_update:
if (field not in val) and (not v[field]):
# no copiarlo...
pass
else:
val[field] = v[field]
# -------------------------------------------------
# si 'is_raw' >> valido datos requeridos...
valu = v
mov = obj and obj[0]
#_logger.info(">> _check_data_before_save >> 6- mov = %s", mov)
is_raw = valu.get('is_raw',False) or (mov and mov.is_raw)
dim_id = valu.get('dimension_id',0) or (mov and mov.dimension_id.id)
dim_unit = valu.get('dimension_unit',0) or (mov and mov.dimension_unit)
pro_qty = valu.get('product_uom_qty',0) or (mov and mov.product_uom_qty)
msg = self._check_data_required(cr, uid, is_raw, dim_id, dim_unit, pro_qty)
if msg:
raise osv.except_osv(_('Error'), _(msg))
return
def _check_data_required(self, cr, uid, is_raw, dim_id, dim_unit, prod_qty):
if not is_raw:
return ''
if not dim_id:
return 'You cannot save a Move-Stock without Dimension (id)'
if not dim_unit:
return 'You cannot save a Move-Stock without Quantity Dimension (qty)'
if not prod_qty:
return 'You cannot save a Move-Stock without Quantity Product (uom qty)'
return ''
# ------------------------------------------------------------------------
def create(self, cr, uid, data, context=None):
#_logger.info('>> stock_move >> create >> 1- data = %s', data)
self._check_data_before_save(cr, uid, [], data)
#_logger.info('>> stock_move >> create >> 2- data = %s', data)
return super(stock_move, self).create(cr, uid, data, context=context)
def write(self, cr, uid, ids, vals, context=None):
#for ms_id in ids:
# self._check_data_before_save(cr, uid, ms_id, vals)
#_logger.info('>> stock_move >> write >> 11- ids = %s', ids)
#_logger.info('>> stock_move >> write >> 12- vals = %s', vals)
#if len(ids) > 1:
# raise osv.except_osv(_('Error'), 'TODO: A corregir. Mas de un registro a escribir....')
sm_id = ids[0] if len(ids) >= 1 else False
self._check_data_before_save(cr, uid, sm_id, vals)
#_logger.info('>> stock_move >> write >> 13- vals = %s', vals)
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
# --- extend: registro en balance ---
def action_done(self, cr, uid, ids, context=None):
if not super(stock_move, self).action_done(cr, uid, ids, context=context):
return False
#_logger.info(">> _action_done >> 01 >> ids = %s", ids)
obj_bal = self.pool.get('product.marble.dimension.balance')
#obj_mov = [move for move in self.browse(cr, uid, ids, context=context) if move.state == 'done' and move.product_id.is_raw]
obj_mov = [move for move in self.browse(cr, uid, ids, context=context) if move.state == 'done' and (move.product_id.prod_type == comm.RAW)]
if not obj_mov:
return True
#_logger.info(">> _action_done >> 02 >> obj_mov = %s", obj_mov)
# obj_mov is raw -> verifico:
# >> si (move.location = stock_loc or move.location_dest = stock_loc)
# >> registro en Balance.
# stock_loc = comm.get_location_stock(self, cr, uid)
# bal_list = [mov for mov in obj_mov if stock_loc in [mov.location_id.id, mov.location_dest_id.id]]
# bal_list = [mov for mov in obj_mov if self.stock_move(cr, uid, mov) != 0]
bal_list = [mov for mov in obj_mov]
#_logger.info(">> _action_done >> 02 >> stock_loc = %s", stock_loc)
#_logger.info(">> _action_done >> 03 >> bal_list = %s", bal_list)
for mov in bal_list:
# valid data required
#msg = self._check_data_required(cr, uid, mov.product_id.is_raw, mov.dimension_id, mov.dimension_unit, mov.product_uom_qty)
is_raw = (mov.product_id.prod_type == comm.RAW)
msg = self._check_data_required(cr, uid, is_raw, mov.dimension_id, mov.dimension_unit, mov.product_uom_qty)
if msg:
raise osv.except_osv(_('Error'), _(msg))
#_logger.info(">> _action_done >> 888- stock_move = %s", self.stock_move(cr, uid, mov))
# set data..
val = {
'prod_id': mov.product_id.id,
'dim_id': mov.dimension_id.id,
'dimension_unit': mov.dimension_unit,
'dimension_m2': mov.product_uom_qty,
# 'typeMove': 'in' if stock_loc == mov.location_dest_id.id else 'out'
'typeMove': 'in' if self.stock_move(cr, uid, mov) > 0 else 'out'
}
#_logger.info(">> _action_done >> 04- val = %s", val)
obj_bal.register_balance(cr, uid, val, context)
#_logger.info(">> _action_done >> 05- OK >> val = %s", val)
return True
_columns = {
'description': fields.char('Description'),
'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', select=True, states={'done': [('readonly', True)]}, domain=[('state','=','done')]),
'dimension_unit': fields.integer('Units', size=3, states={'done': [('readonly', True)]}),
'is_raw': fields.function(_is_raw, type='boolean', string='Is Marble'),
'prod_type' : fields.related('product_id', 'prod_type', type='char', relation='product.template', string='Product Type'),
'employee_id': fields.many2one('hr.employee', 'Empleado', select=True, states={'done': [('readonly', True)]}, domain=[('active','=',True)]),
'employee_image': fields.related('employee_id', 'image_small', type='binary', relation='hr.employee', string='Part Number', store=True, readonly=True),
'partner_picking_id': fields.related('picking_id', 'partner_id', type='many2one', relation='res.partner', string='Patern', store=False),
'qty_dimension': fields.function(_get_sign_qty, string='Unidades', multi="sign"),
'qty_product': fields.function(_get_sign_qty, string='Area (m2)', multi="sign"),
'qty_balance': fields.function(_get_sign_qty, string='Balance (m2)', multi="sign"),
'use_client_location': fields.boolean('Does the customer provides the products?', readonly=True),
}
_defaults = {
'dimension_id': False,
'dimension_unit': 0,
}
stock_move()
class stock_inventory_line(osv.osv):
_inherit = "stock.inventory.line"
_name = "stock.inventory.line"
_description = "Inventory Line"
_columns = {
'is_raw': fields.boolean('Is Raw', readonly=True),
'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', domain=[('state','=','done')]),
'dimension_unit': fields.integer('Real Dim. [Units]', size=3), # units
'dimension_m2': fields.float('Real Dim. [M2]', digits=(5,3)), # m2
'dimension_unit_theoretical': fields.integer('Theoretical Dim. [Units]', size=3, readonly=True), # units
'dimension_m2_theoretical': fields.float('Theoretical Dim. [M2]', digits=(5,3), readonly=True), # m2
}
defaults = {
'is_raw': False,
'dimension_id': False,
'dimension_unit': 0,
'dimension_m2': 0,
'dimension_unit_theoretical': 0,
'dimension_m2_theoretical': 0,
}
# overwrite: stock > stock_inventory_line - odoo v8.0 - line: 2727 - 27555
# sobre escribo metodo para incorporar 'dimensiones' en caso de ser materia prima
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
if inventory_line.is_raw:
diff_unit = inventory_line.dimension_unit_theoretical - inventory_line.dimension_unit
diff = inventory_line.dimension_m2_theoretical - inventory_line.dimension_m2
else:
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
# each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
'dimension_id': inventory_line.dimension_id.id # dimension
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
# found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff # dim >> m2 [faltante]
vals['dimension_unit'] = (inventory_line.is_raw and -diff_unit) or 0 # dim >> unidades [faltante]
else:
# found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff # dim >> m2 [excedente]
vals['dimension_unit'] = (inventory_line.is_raw and diff_unit) or 0 # dim >> unidades [excedente]
#_logger.info(">> _inv >> 01- vals = %s", vals)
#_logger.info(">> _inv >> 02- uom_qty = %s", vals['product_uom_qty'])
#_logger.info(">> _inv >> 03- dim_uni = %s", vals['dimension_unit'])
return stock_move_obj.create(cr, uid, vals, context=context)
stock_inventory_line()
#
|
gpl-2.0
| -4,009,658,643,104,912,400 | 41.884354 | 164 | 0.560398 | false |
CiscoSystems/dashboard-quantum-beta
|
django-openstack/django_openstack/api.py
|
1
|
19242
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external apis.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, django_openstack developers not working on django_openstack.api
shouldn't need to understand the finer details of APIs for Nova/Glance/Swift et
al.
"""
from django.conf import settings
import cloudfiles
import glance.client
import httplib
import json
import logging
import openstack.compute
import openstackx.admin
import openstackx.api.exceptions as api_exceptions
import openstackx.extras
import openstackx.auth
from urlparse import urlparse
LOG = logging.getLogger('django_openstack.api')
class APIResourceWrapper(object):
""" Simple wrapper for api objects
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattr__(self, attr):
if attr in self._attrs:
# __getattr__ won't find properties
return self._apiresource.__getattribute__(attr)
else:
LOG.debug('Attempted to access unknown attribute "%s" on'
' APIResource object of type "%s" wrapping resource of'
' type "%s"' % (attr, self.__class__,
self._apiresource.__class__))
raise AttributeError(attr)
class APIDictWrapper(object):
""" Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from openstackx
"""
def __init__(self, apidict):
self._apidict = apidict
def __getattr__(self, attr):
if attr in self._attrs:
try:
return self._apidict[attr]
except KeyError, e:
raise AttributeError(e)
else:
LOG.debug('Attempted to access unknown item "%s" on'
'APIResource object of type "%s"'
% (attr, self.__class__))
raise AttributeError(attr)
def __getitem__(self, item):
try:
return self.__getattr__(item)
except AttributeError, e:
# caller is expecting a KeyError
raise KeyError(e)
def get(self, item, default=None):
try:
return self.__getattr__(item)
except AttributeError:
return default
class Container(APIResourceWrapper):
"""Simple wrapper around cloudfiles.container.Container"""
_attrs = ['name']
class Console(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.consoles.Console"""
_attrs = ['id', 'output', 'type']
class Flavor(APIResourceWrapper):
"""Simple wrapper around openstackx.admin.flavors.Flavor"""
_attrs = ['disk', 'id', 'links', 'name', 'ram', 'vcpus']
class Image(APIDictWrapper):
"""Simple wrapper around glance image dictionary"""
_attrs = ['checksum', 'container_format', 'created_at', 'deleted',
'deleted_at', 'disk_format', 'id', 'is_public', 'location',
'name', 'properties', 'size', 'status', 'updated_at']
def __getattr__(self, attrname):
if attrname == "properties":
return ImageProperties(super(Image, self).__getattr__(attrname))
else:
return super(Image, self).__getattr__(attrname)
class ImageProperties(APIDictWrapper):
"""Simple wrapper around glance image properties dictionary"""
_attrs = ['architecture', 'image_location', 'image_state', 'kernel_id',
'project_id', 'ramdisk_id']
class KeyPair(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.keypairs.Keypair"""
_attrs = ['fingerprint', 'key_name', 'private_key']
class Server(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'hostId', 'id', 'imageRef', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'virtual_interfaces']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
def __getattr__(self, attr):
if attr == "attrs":
return ServerAttributes(super(Server, self).__getattr__(attr))
else:
return super(Server, self).__getattr__(attr)
@property
def image_name(self):
image = image_get(self.request, self.imageRef)
return image.name
class ServerAttributes(APIDictWrapper):
"""Simple wrapper around openstackx.extras.server.Server attributes
Preserves the request info so image name can later be retrieved
"""
_attrs = ['description', 'disk_gb', 'host', 'image_ref', 'kernel_id',
'key_name', 'launched_at', 'mac_address', 'memory_mb', 'name',
'os_type', 'project_id', 'ramdisk_id', 'scheduled_at',
'terminated_at', 'user_data', 'user_id', 'vcpus', 'hostname']
class Services(APIResourceWrapper):
_attrs = ['disabled', 'host', 'id', 'last_update', 'stats', 'type', 'up',
'zone']
class SwiftObject(APIResourceWrapper):
_attrs = ['name']
class Tenant(APIResourceWrapper):
"""Simple wrapper around openstackx.auth.tokens.Tenant"""
_attrs = ['id', 'description', 'enabled']
class Token(APIResourceWrapper):
"""Simple wrapper around openstackx.auth.tokens.Token"""
_attrs = ['id', 'serviceCatalog', 'tenant_id', 'username']
class Usage(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.usage.Usage"""
_attrs = ['begin', 'instances', 'stop', 'tenant_id',
'total_active_disk_size', 'total_active_instances',
'total_active_ram_size', 'total_active_vcpus', 'total_cpu_usage',
'total_disk_usage', 'total_hours', 'total_ram_usage']
class User(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.users.User"""
_attrs = ['email', 'enabled', 'id', 'tenantId']
def url_for(request, service_name, admin=False):
catalog = request.session['serviceCatalog']
if admin:
rv = catalog[service_name][0]['adminURL']
else:
rv = catalog[service_name][0]['internalURL']
return rv
def check_openstackx(f):
"""Decorator that adds extra info to api exceptions
The dashboard currently depends on openstackx extensions being present
in nova. Error messages depending for views depending on these
extensions do not lead to the conclusion that nova is missing
extensions.
This decorator should be dropped and removed after keystone and
dashboard more gracefully handle extensions and openstackx extensions
aren't required by the dashboard in nova.
"""
def inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except api_exceptions.NotFound, e:
e.message = e.details or ''
e.message += ' This error may be caused by missing openstackx' \
' extensions in nova. See the dashboard README.'
raise
return inner
def compute_api(request):
compute = openstack.compute.Compute(
auth_token=request.session['token'],
management_url=url_for(request, 'nova'))
# this below hack is necessary to make the jacobian compute client work
# TODO(mgius): It looks like this is unused now?
compute.client.auth_token = request.session['token']
compute.client.management_url = url_for(request, 'nova')
LOG.debug('compute_api connection created using token "%s"'
' and url "%s"' %
(request.session['token'], url_for(request, 'nova')))
return compute
def account_api(request):
LOG.debug('account_api connection created using token "%s"'
' and url "%s"' %
(request.session['token'],
url_for(request, 'identity', True)))
return openstackx.extras.Account(
auth_token=request.session['token'],
management_url=url_for(request, 'identity', True))
def glance_api(request):
o = urlparse(url_for(request, 'glance'))
LOG.debug('glance_api connection created for host "%s:%d"' %
(o.hostname, o.port))
return glance.client.Client(o.hostname, o.port)
def admin_api(request):
LOG.debug('admin_api connection created using token "%s"'
' and url "%s"' %
(request.session['token'], url_for(request, 'nova', True)))
return openstackx.admin.Admin(auth_token=request.session['token'],
management_url=url_for(request, 'nova', True))
def extras_api(request):
LOG.debug('extras_api connection created using token "%s"'
' and url "%s"' %
(request.session['token'], url_for(request, 'nova')))
return openstackx.extras.Extras(auth_token=request.session['token'],
management_url=url_for(request, 'nova'))
def auth_api():
LOG.debug('auth_api connection created using url "%s"' %
settings.OPENSTACK_KEYSTONE_URL)
return openstackx.auth.Auth(
management_url=settings.OPENSTACK_KEYSTONE_URL)
def swift_api():
return cloudfiles.get_connection(
settings.SWIFT_ACCOUNT + ":" + settings.SWIFT_USER,
settings.SWIFT_PASS,
authurl=settings.SWIFT_AUTHURL)
def console_create(request, instance_id, kind=None):
return Console(extras_api(request).consoles.create(instance_id, kind))
def flavor_create(request, name, memory, vcpu, disk, flavor_id):
return Flavor(admin_api(request).flavors.create(
name, int(memory), int(vcpu), int(disk), flavor_id))
def flavor_delete(request, flavor_id, purge=False):
admin_api(request).flavors.delete(flavor_id, purge)
def flavor_get(request, flavor_id):
return Flavor(compute_api(request).flavors.get(flavor_id))
@check_openstackx
def flavor_list(request):
return [Flavor(f) for f in extras_api(request).flavors.list()]
def image_create(request, image_meta, image_file):
return Image(glance_api(request).add_image(image_meta, image_file))
def image_delete(request, image_id):
return glance_api(request).delete_image(image_id)
def image_get(request, image_id):
return Image(glance_api(request).get_image(image_id)[0])
def image_list_detailed(request):
return [Image(i) for i in glance_api(request).get_images_detailed()]
def image_update(request, image_id, image_meta=None):
image_meta = image_meta and image_meta or {}
return Image(glance_api(request).update_image(image_id,
image_meta=image_meta))
def keypair_create(request, name):
return KeyPair(extras_api(request).keypairs.create(name))
def keypair_delete(request, keypair_id):
extras_api(request).keypairs.delete(keypair_id)
@check_openstackx
def keypair_list(request):
return [KeyPair(key) for key in extras_api(request).keypairs.list()]
def server_create(request, name, image, flavor, user_data, key_name):
return Server(extras_api(request).servers.create(
name, image, flavor, user_data=user_data, key_name=key_name),
request)
def server_delete(request, instance):
compute_api(request).servers.delete(instance)
def server_get(request, instance_id):
response = compute_api(request).servers.get(instance_id), request
LOG.info(response)
return Server(compute_api(request).servers.get(instance_id), request)
@check_openstackx
def server_list(request):
return [Server(s, request) for s in extras_api(request).servers.list()]
def server_reboot(request,
instance_id,
hardness=openstack.compute.servers.REBOOT_HARD):
server = server_get(request, instance_id)
server.reboot(hardness)
def service_get(request, name):
return Services(admin_api(request).services.get(name))
@check_openstackx
def service_list(request):
return [Services(s) for s in admin_api(request).services.list()]
def service_update(request, name, enabled):
return Services(admin_api(request).services.update(name, enabled))
def token_get_tenant(request, tenant_id):
tenants = auth_api().tenants.for_token(request.session['token'])
for t in tenants:
if str(t.id) == str(tenant_id):
return Tenant(t)
LOG.warning('Unknown tenant id "%s" requested' % tenant_id)
def token_list_tenants(request, token):
return [Tenant(t) for t in auth_api().tenants.for_token(token)]
def tenant_create(request, tenant_id, description, enabled):
return Tenant(account_api(request).tenants.create(tenant_id,
description,
enabled))
def tenant_get(request, tenant_id):
return Tenant(account_api(request).tenants.get(tenant_id))
@check_openstackx
def tenant_list(request):
return [Tenant(t) for t in account_api(request).tenants.list()]
def tenant_update(request, tenant_id, description, enabled):
return Tenant(account_api(request).tenants.update(tenant_id,
description,
enabled))
def token_create(request, tenant, username, password):
return Token(auth_api().tokens.create(tenant, username, password))
def token_info(request, token):
# TODO(mgius): This function doesn't make a whole lot of sense to me. The
# information being gathered here really aught to be attached to Token() as
# part of token_create. May require modification of openstackx so that the
# token_create call returns this information as well
hdrs = {"Content-type": "application/json",
"X_AUTH_TOKEN": settings.OPENSTACK_ADMIN_TOKEN,
"Accept": "text/json"}
o = urlparse(token.serviceCatalog['identity'][0]['adminURL'])
conn = httplib.HTTPConnection(o.hostname, o.port)
conn.request("GET", "/v2.0/tokens/%s" % token.id, headers=hdrs)
response = conn.getresponse()
data = json.loads(response.read())
admin = False
LOG.info(data)
for role in data['auth']['user']['roleRefs']:
if role['roleId'] == 'Admin':
admin = True
return {'tenant': data['auth']['user']['tenantId'],
'user': data['auth']['user']['username'],
'admin': admin}
@check_openstackx
def usage_get(request, tenant_id, start, end):
return Usage(extras_api(request).usage.get(tenant_id, start, end))
@check_openstackx
def usage_list(request, start, end):
return [Usage(u) for u in extras_api(request).usage.list(start, end)]
def user_create(request, user_id, email, password, tenant_id):
return User(account_api(request).users.create(
user_id, email, password, tenant_id))
def user_delete(request, user_id):
account_api(request).users.delete(user_id)
def user_get(request, user_id):
return User(account_api(request).users.get(user_id))
@check_openstackx
def user_list(request):
return [User(u) for u in account_api(request).users.list()]
def user_update_email(request, user_id, email):
return User(account_api(request).users.update_email(user_id, email))
def user_update_password(request, user_id, password):
return User(account_api(request).users.update_password(user_id, password))
def user_update_tenant(request, user_id, tenant_id):
return User(account_api(request).users.update_tenant(user_id, tenant_id))
def swift_container_exists(container_name):
try:
swift_api().get_container(container_name)
return True
except cloudfiles.errors.NoSuchContainer:
return False
def swift_object_exists(container_name, object_name):
container = swift_api().get_container(container_name)
try:
container.get_object(object_name)
return True
except cloudfiles.errors.NoSuchObject:
return False
def swift_get_containers():
return [Container(c) for c in swift_api().get_all_containers()]
def swift_create_container(name):
if swift_container_exists(name):
raise Exception('Container with name %s already exists.' % (name))
return Container(swift_api().create_container(name))
def swift_delete_container(name):
swift_api().delete_container(name)
def swift_get_objects(container_name, prefix=None):
container = swift_api().get_container(container_name)
return [SwiftObject(o) for o in container.get_objects(prefix=prefix)]
def swift_copy_object(orig_container_name, orig_object_name,
new_container_name, new_object_name):
container = swift_api().get_container(orig_container_name)
if swift_object_exists(new_container_name, new_object_name) == True:
raise Exception('Object with name %s already exists in container %s'
% (new_object_name, new_container_name))
orig_obj = container.get_object(orig_object_name)
return orig_obj.copy_to(new_container_name, new_object_name)
def swift_upload_object(container_name, object_name, object_data):
container = swift_api().get_container(container_name)
obj = container.create_object(object_name)
obj.write(object_data)
def swift_delete_object(container_name, object_name):
container = swift_api().get_container(container_name)
container.delete_object(object_name)
def swift_get_object_data(container_name, object_name):
container = swift_api().get_container(container_name)
return container.get_object(object_name).stream()
|
apache-2.0
| 248,219,022,108,928,350 | 31.94863 | 79 | 0.653934 | false |
leotrs/decu
|
test/notsosimple_project/src/script.py
|
1
|
1196
|
"""
testscript.py
-------------
This is a test script for decu.
"""
from decu import Script, experiment, figure, run_parallel
import numpy as np
import matplotlib.pyplot as plt
class TestScript(Script):
@experiment(data_param='data')
def exp(self, data, param, param2):
"""Compute x**param for each data point."""
self.log.info('Working hard for {}..'.format(TestScript.exp.run))
return np.power(data, param) + param2
@figure()
def plot_result(self, data, result):
"""Plot results of experiment."""
plt.plot(data, result)
@figure()
def plot_many_results(self, data, results):
"""Plot results of experiment."""
plt.figure()
for res in results:
plt.plot(data, res)
def main(self):
"""Run some experiments and make some figures."""
data = np.arange(5)
result1 = self.exp(data, param=4, param2=10)
self.plot_result(data, result1)
param_list = [(data, x, y) for x, y in
zip(np.arange(5), np.arange(5, 10))]
result2 = run_parallel(self.exp, param_list)
self.plot_many_results(data, result2, suffix='parallel')
|
mit
| 5,614,853,097,660,750,000 | 26.813953 | 73 | 0.594482 | false |
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/buildmaster/tests/test_buildfarmjobbehavior.py
|
1
|
11277
|
# Copyright 2010-2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Unit tests for BuildFarmJobBehaviorBase."""
__metaclass__ = type
from datetime import datetime
import os
import shutil
import tempfile
from twisted.internet import defer
from zope.component import getUtility
from zope.security.proxy import removeSecurityProxy
from lp.archiveuploader.uploadprocessor import parse_build_upload_leaf_name
from lp.buildmaster.enums import BuildStatus
from lp.buildmaster.interactor import BuilderInteractor
from lp.buildmaster.interfaces.buildfarmjobbehavior import (
IBuildFarmJobBehavior,
)
from lp.buildmaster.model.buildfarmjobbehavior import BuildFarmJobBehaviorBase
from lp.buildmaster.tests.mock_slaves import WaitingSlave
from lp.registry.interfaces.pocket import PackagePublishingPocket
from lp.services.config import config
from lp.soyuz.interfaces.processor import IProcessorSet
from lp.testing import TestCaseWithFactory
from lp.testing.factory import LaunchpadObjectFactory
from lp.testing.fakemethod import FakeMethod
from lp.testing.layers import (
LaunchpadZopelessLayer,
ZopelessDatabaseLayer,
)
from lp.testing.mail_helpers import pop_notifications
class FakeBuildFarmJob:
"""Dummy BuildFarmJob."""
pass
class TestBuildFarmJobBehaviorBase(TestCaseWithFactory):
"""Test very small, basic bits of BuildFarmJobBehaviorBase."""
layer = ZopelessDatabaseLayer
def _makeBehavior(self, buildfarmjob=None):
"""Create a `BuildFarmJobBehaviorBase`."""
if buildfarmjob is None:
buildfarmjob = FakeBuildFarmJob()
else:
buildfarmjob = removeSecurityProxy(buildfarmjob)
return BuildFarmJobBehaviorBase(buildfarmjob)
def _makeBuild(self):
"""Create a `Build` object."""
x86 = getUtility(IProcessorSet).getByName('386')
distroarchseries = self.factory.makeDistroArchSeries(
architecturetag='x86', processor=x86)
distroseries = distroarchseries.distroseries
archive = self.factory.makeArchive(
distribution=distroseries.distribution)
pocket = PackagePublishingPocket.RELEASE
spr = self.factory.makeSourcePackageRelease(
distroseries=distroseries, archive=archive)
return spr.createBuild(
distroarchseries=distroarchseries, pocket=pocket, archive=archive)
def test_getBuildCookie(self):
buildfarmjob = self.factory.makeTranslationTemplatesBuildJob()
build = buildfarmjob.build
behavior = self._makeBehavior(buildfarmjob)
self.assertEqual(
'%s-%s' % (build.job_type.name, build.id),
behavior.getBuildCookie())
def test_getUploadDirLeaf(self):
# getUploadDirLeaf returns the current time, followed by the build
# cookie.
now = datetime.now()
build_cookie = self.factory.getUniqueString()
upload_leaf = self._makeBehavior().getUploadDirLeaf(
build_cookie, now=now)
self.assertEqual(
'%s-%s' % (now.strftime("%Y%m%d-%H%M%S"), build_cookie),
upload_leaf)
class TestGetUploadMethodsMixin:
"""Tests for `IPackageBuild` that need objects from the rest of LP."""
layer = LaunchpadZopelessLayer
def makeBuild(self):
"""Allow classes to override the build with which the test runs."""
raise NotImplemented
def setUp(self):
super(TestGetUploadMethodsMixin, self).setUp()
self.build = self.makeBuild()
self.behavior = IBuildFarmJobBehavior(
self.build.buildqueue_record.specific_job)
def test_getUploadDirLeafCookie_parseable(self):
# getUploadDirLeaf should return a directory name
# that is parseable by the upload processor.
upload_leaf = self.behavior.getUploadDirLeaf(
self.behavior.getBuildCookie())
(job_type, job_id) = parse_build_upload_leaf_name(upload_leaf)
self.assertEqual(
(self.build.job_type.name, self.build.id), (job_type, job_id))
class TestHandleStatusMixin:
"""Tests for `IPackageBuild`s handleStatus method.
This should be run with a Trial TestCase.
"""
layer = LaunchpadZopelessLayer
def makeBuild(self):
"""Allow classes to override the build with which the test runs."""
raise NotImplementedError
def setUp(self):
super(TestHandleStatusMixin, self).setUp()
self.factory = LaunchpadObjectFactory()
self.build = self.makeBuild()
# For the moment, we require a builder for the build so that
# handleStatus_OK can get a reference to the slave.
self.builder = self.factory.makeBuilder()
self.build.buildqueue_record.markAsBuilding(self.builder)
self.slave = WaitingSlave('BuildStatus.OK')
self.slave.valid_file_hashes.append('test_file_hash')
self.interactor = BuilderInteractor()
self.behavior = self.interactor.getBuildBehavior(
self.build.buildqueue_record, self.builder, self.slave)
# We overwrite the buildmaster root to use a temp directory.
tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tempdir)
self.upload_root = tempdir
tmp_builddmaster_root = """
[builddmaster]
root: %s
""" % self.upload_root
config.push('tmp_builddmaster_root', tmp_builddmaster_root)
# We stub out our builds getUploaderCommand() method so
# we can check whether it was called as well as
# verifySuccessfulUpload().
removeSecurityProxy(self.build).verifySuccessfulUpload = FakeMethod(
result=True)
def assertResultCount(self, count, result):
self.assertEquals(
1, len(os.listdir(os.path.join(self.upload_root, result))))
def test_handleStatus_OK_normal_file(self):
# A filemap with plain filenames should not cause a problem.
# The call to handleStatus will attempt to get the file from
# the slave resulting in a URL error in this test case.
def got_status(ignored):
self.assertEqual(BuildStatus.UPLOADING, self.build.status)
self.assertResultCount(1, "incoming")
d = self.behavior.handleStatus(
self.build.buildqueue_record, 'OK',
{'filemap': {'myfile.py': 'test_file_hash'}})
return d.addCallback(got_status)
def test_handleStatus_OK_absolute_filepath(self):
# A filemap that tries to write to files outside of
# the upload directory will result in a failed upload.
def got_status(ignored):
self.assertEqual(BuildStatus.FAILEDTOUPLOAD, self.build.status)
self.assertResultCount(0, "failed")
self.assertIdentical(None, self.build.buildqueue_record)
d = self.behavior.handleStatus(
self.build.buildqueue_record, 'OK',
{'filemap': {'/tmp/myfile.py': 'test_file_hash'}})
return d.addCallback(got_status)
def test_handleStatus_OK_relative_filepath(self):
# A filemap that tries to write to files outside of
# the upload directory will result in a failed upload.
def got_status(ignored):
self.assertEqual(BuildStatus.FAILEDTOUPLOAD, self.build.status)
self.assertResultCount(0, "failed")
d = self.behavior.handleStatus(
self.build.buildqueue_record, 'OK',
{'filemap': {'../myfile.py': 'test_file_hash'}})
return d.addCallback(got_status)
def test_handleStatus_OK_sets_build_log(self):
# The build log is set during handleStatus.
self.assertEqual(None, self.build.log)
d = self.behavior.handleStatus(
self.build.buildqueue_record, 'OK',
{'filemap': {'myfile.py': 'test_file_hash'}})
def got_status(ignored):
self.assertNotEqual(None, self.build.log)
return d.addCallback(got_status)
def _test_handleStatus_notifies(self, status):
# An email notification is sent for a given build status if
# notifications are allowed for that status.
expected_notification = (
status in self.behavior.ALLOWED_STATUS_NOTIFICATIONS)
def got_status(ignored):
if expected_notification:
self.failIf(
len(pop_notifications()) == 0,
"No notifications received")
else:
self.failIf(
len(pop_notifications()) > 0,
"Notifications received")
d = self.behavior.handleStatus(
self.build.buildqueue_record, status, {})
return d.addCallback(got_status)
def test_handleStatus_DEPFAIL_notifies(self):
return self._test_handleStatus_notifies("DEPFAIL")
def test_handleStatus_CHROOTFAIL_notifies(self):
return self._test_handleStatus_notifies("CHROOTFAIL")
def test_handleStatus_PACKAGEFAIL_notifies(self):
return self._test_handleStatus_notifies("PACKAGEFAIL")
def test_handleStatus_ABORTED_cancels_cancelling(self):
self.build.updateStatus(BuildStatus.CANCELLING)
def got_status(ignored):
self.assertEqual(
0, len(pop_notifications()), "Notifications received")
self.assertEqual(BuildStatus.CANCELLED, self.build.status)
d = self.behavior.handleStatus(
self.build.buildqueue_record, "ABORTED", {})
return d.addCallback(got_status)
def test_handleStatus_ABORTED_recovers_building(self):
self.builder.vm_host = "fake_vm_host"
self.behavior = self.interactor.getBuildBehavior(
self.build.buildqueue_record, self.builder, self.slave)
self.build.updateStatus(BuildStatus.BUILDING)
def got_status(ignored):
self.assertEqual(
0, len(pop_notifications()), "Notifications received")
self.assertEqual(BuildStatus.NEEDSBUILD, self.build.status)
self.assertEqual(1, self.builder.failure_count)
self.assertEqual(1, self.build.failure_count)
self.assertIn("clean", self.slave.call_log)
d = self.behavior.handleStatus(
self.build.buildqueue_record, "ABORTED", {})
return d.addCallback(got_status)
@defer.inlineCallbacks
def test_handleStatus_ABORTED_cancelling_sets_build_log(self):
# If a build is intentionally cancelled, the build log is set.
self.assertEqual(None, self.build.log)
self.build.updateStatus(BuildStatus.CANCELLING)
yield self.behavior.handleStatus(
self.build.buildqueue_record, "ABORTED", {})
self.assertNotEqual(None, self.build.log)
def test_date_finished_set(self):
# The date finished is updated during handleStatus_OK.
self.assertEqual(None, self.build.date_finished)
d = self.behavior.handleStatus(
self.build.buildqueue_record, 'OK',
{'filemap': {'myfile.py': 'test_file_hash'}})
def got_status(ignored):
self.assertNotEqual(None, self.build.date_finished)
return d.addCallback(got_status)
|
agpl-3.0
| 1,663,565,479,296,469,000 | 37.752577 | 78 | 0.667376 | false |
rackerlabs/qonos
|
qonos/openstack/common/eventlet_backdoor.py
|
1
|
4764
|
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from qonos.openstack.common._i18n import _LI
from qonos.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
|
apache-2.0
| 620,482,699,583,324,400 | 31.855172 | 78 | 0.645466 | false |
MJuddBooth/pandas
|
pandas/tests/reshape/test_reshape.py
|
1
|
25248
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
from collections import OrderedDict
import numpy as np
from numpy import nan
import pytest
from pandas.compat import u
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
from pandas.core.sparse.api import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
if sparse:
dtype_name = 'Sparse[{}, {}]'.format(
self.effective_dtype(dtype).name,
fill_value
)
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
tm.assert_series_equal(result.get_dtype_counts(), expected)
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
tm.assert_series_equal(result.get_dtype_counts().sort_index(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = pd.DataFrame({
"A_a": pd.SparseArray([1, 0, 1], dtype='uint8'),
"A_b": pd.SparseArray([0, 1, 0], dtype='uint8'),
"B_b": pd.SparseArray([1, 1, 0], dtype='uint8'),
"B_c": pd.SparseArray([0, 0, 1], dtype='uint8'),
})
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ)})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected = expected[['C'] + cols]
typ = pd.SparseArray if sparse else pd.Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat([
pd.Series([1, 2, 3], name='C'),
pd.Series([1, 0, 1], name='bad_a', dtype='Sparse[uint8]'),
pd.Series([0, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([1, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([0, 0, 1], name='bad_c', dtype='Sparse[uint8]'),
], axis=1)
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'B': ['b', 'b', 'c'],
'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0]}, dtype=np.uint8)
expected[['C']] = df[['C']]
if sparse:
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
if sparse:
cols = ['A..a', 'A..b', 'B..b', 'B..c']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'C': [1, 2, 3],
'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c']})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].apply(
lambda x: pd.SparseSeries(x)
)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': arr([1, 0, 1, 0], dtype=typ),
'A_b': arr([0, 1, 0, 0], dtype=typ),
'A_nan': arr([0, 0, 0, 1], dtype=typ),
'B_b': arr([1, 1, 0, 0], dtype=typ),
'B_c': arr([0, 0, 1, 0], dtype=typ),
'B_nan': arr([0, 0, 0, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ),
'cat_x': arr([1, 0, 0], dtype=typ),
'cat_y': arr([0, 1, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('get_dummies_kwargs,expected', [
({'data': pd.DataFrame(({u'ä': ['a']}))},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'ä']})},
pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 pd.get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sparse', [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]),
('Nation', ['AB', 'CD'])]))
df = get_dummies(df, columns=['Nation'], sparse=sparse)
df2 = df.reindex(columns=['GDP'])
tm.assert_frame_equal(df[['GDP']], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
class TestCategoricalReshape(object):
def test_reshaping_multi_index_categorical(self):
# construct a MultiIndexed DataFrame formerly created
# via `tm.makePanel().to_frame()`
cols = ['ItemA', 'ItemB', 'ItemC']
data = {c: tm.makeTimeDataFrame() for c in cols}
df = pd.concat({c: data[c].stack() for c in data}, axis='columns')
df.index.names = ['major', 'minor']
df['str'] = 'foo'
dti = df.index.levels[0]
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(dti))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=dti)
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
codes=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
| -5,854,816,796,630,431,000 | 39.319489 | 79 | 0.486846 | false |
BinMatrix/camshift_ros
|
scripts/camshift_node.py
|
1
|
9812
|
#!/usr/bin/env python
'''
Camshift node
================
This is a ros node that shows mean-shift based tracking
You select a color objects such as your face and it tracks it.
This subscrib from "/image" topic for reading image,
and publish the information of target to "/TargetPositionSize"
or "/roi" topic.
The position and size have been normalized in "/TargetPositionSize".
http://www.robinhewitt.com/research/track/camshift.html
Usage:
------
To initialize tracking, select the object with mouse
Keys:
-----
ESC/q - exit
b - toggle back-projected probability visualization
s - save roi to file
l - load roi from file to calculate hist
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2
import time
import os
# debug with pudb
# import pudb; pu.db
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
# from mav_msgs.msg import TargetPositionSize
from sensor_msgs.msg import Image, RegionOfInterest, CameraInfo
class App:
def __init__(self):
self.roi_file = os.path.expanduser("~/roi.jpg")
cv2.namedWindow('camshift', 1)
cv2.setMouseCallback('camshift', self.onmouse)
self.frame = None
self.vis = None
self.vis_roi = None
self.selection = None
self.drag_start = None
self.show_backproj = False
self.track_window = None
self.track_box = None #rotated rect
self.expand_ratio = 0.2
self.hist = None
self.last_track = None
self.fps = 0
self.fps_values = list()
self.fps_n_values = 10
self.time_star = time.time()
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(
"/image", Image, self.callback)
# self.target_pub = rospy.Publisher(
# "/TargetPositionSize", TargetPositionSize)
self.roi_pub = rospy.Publisher("roi", RegionOfInterest)
def onmouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.track_window = None
if event == cv2.EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1)
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count * bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i * bin_w + 2, 255), ((i + 1) * bin_w -
2, 255 - h), (int(180.0 * i / bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def show_hist_new(self):
bin_count = self.hist.shape[0]
bin_w = 1
img = np.zeros((256, bin_count * bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i * bin_w, 255), ((i + 1) * bin_w,
255 - h), (int(180.0 * i / bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def expand_window(self, last_track):
x, y, w, h = last_track
row, col = self.frame.shape[:2]
n_x0 = np.maximum(0, x - int(w * self.expand_ratio) - 1)
n_y0 = np.maximum(0, y - int(h * self.expand_ratio) - 1)
n_w = np.minimum(col, w + int(w * self.expand_ratio * 2) + 1)
n_h = np.minimum(row, h + int(h * self.expand_ratio * 2) + 1)
return (n_x0, n_y0, n_w, n_h)
def cvBox2D_to_cvRect(self, roi):
try:
if len(roi) == 3:
(center, size, angle) = roi
pt1 = (
int(center[0] - size[0] / 2), int(center[1] - size[1] / 2))
pt2 = (
int(center[0] + size[0] / 2), int(center[1] + size[1] / 2))
rect = [pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]]
else:
rect = list(roi)
except:
return [0, 0, 0, 0]
return rect
def publish_target(self):
target = TargetPositionSize()
height, width = self.frame.shape[:2]
x, y, w, h = self.track_window
target.center_x = (x + w / 2.0) / width * 2 - 1
target.center_y = 1 - (y + h / 2.0) / height * 2
target.size_x = float(w) / width
target.size_y = float(h) / height
self.target_pub.publish(target)
def publish_roi(self):
roi_box = self.track_window
# roi_box = self.track_box
try:
roi_box = self.cvBox2D_to_cvRect(roi_box)
except:
return
# Watch out for negative offsets
roi_box[0] = max(0, roi_box[0])
roi_box[1] = max(0, roi_box[1])
try:
roi = RegionOfInterest()
roi.x_offset = int(roi_box[0])
roi.y_offset = int(roi_box[1])
roi.width = int(roi_box[2])
roi.height = int(roi_box[3])
self.roi_pub.publish(roi)
except:
rospy.loginfo("Publishing ROI failed")
def display_fps(self):
time_end = time.time()
img_fps = int(1 / (time_end - self.time_star))
self.time_star = time_end
self.fps_values.append(img_fps)
if len(self.fps_values) > self.fps_n_values:
self.fps_values.pop(0)
self.fps = int(sum(self.fps_values) / len(self.fps_values))
cv2.putText(self.vis, "FPS: " + str(self.fps), (10, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
self.frame = np.array(cv_image, dtype=np.uint8)
self.vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(
hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, w, h = self.selection
hsv_roi = hsv[y0:y0 + h, x0:x0 + w]
mask_roi = mask[y0:y0 + h, x0:x0 + w]
self.hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
# self.hist = cv2.calcHist([hsv_roi], [0], mask_roi, [360], [0, 180])
cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX)
self.hist = self.hist.reshape(-1)
self.show_hist()
# self.show_self.hist_new(self.hist)
self.vis_roi = self.vis[y0:y0 + h, x0:x0 + w]
cv2.bitwise_not(self.vis_roi, self.vis_roi)
# highlight befitting object when selecting
# self.vis[mask == 0] = 0
if self.track_window:
# lost the target, expand last valid track window
if self.track_window == (0, 0, 0, 0):
self.track_window = self.expand_window(self.last_track)
# print("Re-search at : ", self.track_window)
self.last_track = self.track_window
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
self.track_box, self.track_window = cv2.CamShift(
prob, self.track_window, term_crit)
# publish position and size of target, has been normalized.
# self.publish_target()
self.publish_roi()
if self.show_backproj:
self.vis[:] = prob[..., np.newaxis]
try:
cv2.ellipse(self.vis, self.track_box, (0, 0, 255), 2)
except:
print(self.track_box)
# Compute the FPS and display in image
self.display_fps()
cv2.imshow('camshift', self.vis)
ch = 0xFF & cv2.waitKey(1)
if ch == 27 or ch == ord('q'):
os._exit(0)
if ch == ord('b'):
self.show_backproj = not self.show_backproj
if ch == ord('s'):
if self.track_window == None:
print("There has no tracked object!")
return
x, y, w, h = self.track_window
cv2.imwrite(self.roi_file, self.frame[y:y+h, x:x+w])
print("Saved to ", self.roi_file)
if ch == ord('l'):
if not os.path.isfile(self.roi_file):
print(self.roi_file, " is not exist!")
return
roi = cv2.imread(self.roi_file)
print("Loaded from ", self.roi_file)
roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_mask = cv2.inRange(
roi_hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
self.hist = cv2.calcHist([roi_hsv], [0], roi_mask, [16], [0, 180])
cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX)
self.hist = self.hist.reshape(-1)
self.show_hist()
row, col = self.frame.shape[:2]
self.track_window = (0, 0, col, row)
if __name__ == '__main__':
rospy.init_node('camshift', anonymous=True)
cs = App()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
|
gpl-3.0
| -7,288,317,239,279,681,000 | 35.073529 | 110 | 0.528027 | false |
rafallo/p2c
|
torrent/torrent.py
|
1
|
10334
|
# -*- coding: utf-8 -*-
import hashlib
import libtorrent as lt
import logging
from threading import Timer, Event
import os
import time
from p2c.exceptions import SessionNotBindedException, TorrentHasNotMetadataYet
import settings
from torrent.movie import Movie
SOURCE_TYPES = ("MAGNET", "TORRENT")
logger = logging.getLogger(__name__)
class Torrent(object):
def __init__(self, source_type, source, name):
"""
:type source: str magnet or torrent file path
:type name: str
:type source_type: str
"""
if not source_type in SOURCE_TYPES:
raise Exception(
"source_type must be one of {0}".format(SOURCE_TYPES))
self.name = name
self.source_type = source_type
self.source = source
self.torrent_handler = None
self._torrent_info = None
# dict where key is path and value is Movie instance
# this is files which are downloading or downloaded
self.files = None
# piece_length in this torrent
self.piece_length = None
# amount of pieces which made up DOWNLOAD_PIECE_SIZE
self._jump = None
# if first prioritizing task was run once
self._prioritized = False
self.priority_interval = settings.PRIORITY_INTERVAL
self._priority_thread_stop = Event()
self._priority_timer = None
# currently downloading Movie
self._downloading = None
def __del__(self):
self._stop_torrent_threads()
def __str__(self):
return self.name
def set_source(self, source, session):
self.source = source
if self.source:
self.bind_session(session)
def bind_session(self, session):
"""
Creates torrent handler based on source_type
"""
add_data = {}
if self.source_type == "TORRENT":
add_data['ti'] = lt.torrent_info(self.source)
elif self.source_type == "MAGNET":
add_data['url'] = self.source
add_data['save_path'] = self._get_download_dir()
add_data['storage_mode'] = lt.storage_mode_t(1)
self.torrent_handler = session.add_torrent(add_data)
self._prioritize_to_none()
def get_filelist(self):
info = self.get_torrent_info(wait=True)
return [file.path for file in info.files()]
def get_movies_filelist(self):
if self.files is None:
self._create_movies()
return list(self.files.keys())
def get_movies(self):
if self.files is None:
self._create_movies()
return list(self.files.values())
def download_file(self, filename:str):
if not filename in self.get_movies_filelist():
raise Exception("filename not found in torrent")
self._prioritize_to_none()
self._downloading = self.files[filename]
self._run_torrent_threads()
def pause_download(self):
self._stop_torrent_threads()
self.torrent_handler.pause()
self._downloading = None
def has_torrent_info(self):
"""
Checks if torrent has downloaded metadata
"""
try:
self.get_torrent_info()
return True
except (TorrentHasNotMetadataYet, SessionNotBindedException):
return False
def get_torrent_info(self, wait=False):
"""
Gets torrent's metadata
"""
if self._torrent_info != None:
return self._torrent_info
if self.torrent_handler is None:
if wait:
while not self.torrent_handler is None:
time.sleep(0.1)
else:
raise SessionNotBindedException
if not self.torrent_handler.has_metadata():
if wait:
while not self.torrent_handler.has_metadata():
time.sleep(0.1)
else:
raise TorrentHasNotMetadataYet
self._torrent_info = self.torrent_handler.get_torrent_info()
return self._torrent_info
def get_status(self):
"""
Gets torrent's status with field like download rate, peers number,
state and progress level
"""
status = self.torrent_handler.status()
state_str = ['queued', 'checking', 'downloading metadata',
'downloading', 'finished', 'seeding', 'allocating',
'checking fastresume']
data = {
'download_rate': status.download_rate,
'download_payload_rate': status.download_payload_rate,
'num_peers': status.num_peers,
'state': state_str[status.state],
'progress': status.progress
}
return data
def get_seconds_to_buffer(self):
rate = self.get_status()['download_rate']
if(rate > 100 * 1024):
# round to 100 kbs, 200 kbs, 300 kbs
rate = int(rate / (100 * 1024)) * 100 * 1024
movie = self.get_downloading_movie()
# minimum rate
if movie and rate > 30 * 1024:
return int(movie.pieces_to_play * movie.piece_length / rate)
def get_downloading_movie(self):
return self._downloading
def _create_movies(self):
info = self.get_torrent_info()
files = info.files()
self.piece_length = info.piece_length()
self.priority_interval = settings.PRIORITY_INTERVAL * self.piece_length / (
1024 ** 2)
self._jump = int(settings.DOWNLOAD_PIECE_SIZE / self.piece_length) + 1
self.files = {}
for file in files:
ext = os.path.splitext(file.path)[1]
if ext and ext[1:].lower() in settings.SUPPORTED_MOVIE_EXTENSIONS:
first_piece = int(file.offset / self.piece_length)
last_piece = int((file.size + file.offset) / self.piece_length)
self.files[file.path] = Movie(path=file.path,
size=file.size, first_piece=first_piece,
last_piece=last_piece,
piece_length=self.piece_length,
download_dir=self._get_download_dir())
def _update_movies_progress(self):
"""
Updates movie progress based on number of downloaded pieces
"""
p_downloaded = self.torrent_handler.status().pieces
movie = self.get_downloading_movie()
first_piece, last_piece = movie.first_piece, movie.last_piece
# logger.debug("first_piece: {}".format(first_piece))
# logger.debug("last_piece: {}".format(last_piece ))
counter = 0
for item in p_downloaded[first_piece:last_piece]:
if item == True:
counter += 1
else:
break
# logger.debug("download_pieces inside thread is: {}".format(counter))
movie.downloaded_pieces = counter
def _manage_pieces_priority(self):
"""
Sets priority blocks. First pieces should be downloaded first swo its
have the highest priority.
"""
p_downloaded = self.torrent_handler.status().pieces
movie = self.get_downloading_movie()
if not movie:
return
first_piece, last_piece = movie.cur_first_piece, movie.cur_last_piece
if not False in p_downloaded[first_piece:first_piece + self._jump + 1]:
# all block downloaded
first_piece += self._jump
movie.cur_first_piece = first_piece
# prioritezing
# [7, 7, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...]
if first_piece + self._jump + self._jump <= last_piece:
for piece in range(first_piece + 4 * self._jump,
last_piece + 1):
# logger.debug("the lowest priority for: {}".format(piece))
self.torrent_handler.piece_priority(piece, 0)
if first_piece + self._jump <= last_piece:
for piece in range(first_piece + 2 * self._jump,
min(last_piece + 1, first_piece + 4 * self._jump)):
# logger.debug("low priority for: {}".format(piece))
self.torrent_handler.piece_priority(piece, 2)
if first_piece <= last_piece:
for piece in range(first_piece,
min(last_piece + 1, first_piece + 2 * self._jump)):
# logger.debug("the highest priority for: {}".format(piece))
self.torrent_handler.piece_priority(piece, 7)
# for mp4 get 512KB end of file
# TODO: bug below
# for piece in range(
# last_piece - int(self.piece_length / 512 * 1024) + 1,
# last_piece):
# logger.debug("the highest priority for (512KB end of file): {}".format(piece))
# self.torrent_handler.piece_priority(piece, 7)
self._update_movies_progress()
if not self._priority_thread_stop.is_set():
if self._priority_timer:
self._priority_timer.cancel()
self._priority_timer = None
self._run_torrent_threads()
def _run_torrent_threads(self):
# logger.debug("run threads for {}".format(self.priority_interval))
if not self._priority_thread_stop.is_set():
if not self._priority_timer:
self._priority_timer = Timer(self.priority_interval,
self._manage_pieces_priority)
self._priority_timer.start()
def _stop_torrent_threads(self):
self._priority_thread_stop.set()
if self._priority_timer:
self._priority_timer.cancel()
def _prioritize_to_none(self):
if not self._prioritized and self.has_torrent_info():
self._prioritized = True
info = self.get_torrent_info()
for piece in range(0, info.num_pieces()):
self.torrent_handler.piece_priority(piece, 0)
def _get_download_dir(self):
path = os.path.join(settings.DOWNLOAD_DIR,
hashlib.md5(self.name.encode()).hexdigest())
try:
os.makedirs(path)
except OSError:
pass
return path
|
mit
| 2,340,498,763,809,516,500 | 35.259649 | 111 | 0.561641 | false |
dhaitz/CalibFW
|
plotting/modules/plot_sandbox.py
|
1
|
75936
|
# -*- coding: utf-8 -*-
"""
plotting sanbox module for merlin.
This module is to be used for testing or development work.
"""
import plotbase
import copy
import plot1d
import getroot
import math
import plotresponse
import plotfractions
import plot2d
import plot_tagging
import fit
import os
def recogen_alpha_ptbins(files, opt):
""" recogen vs alpha as well as Z pT vs alpha in pT bins. """
zptbins = [
"1",
"zpt>30 && zpt<50",
"zpt>50 && zpt<70",
"zpt>70 && zpt<120",
"zpt>120"
]
texts = [
"$\mathrm{inclusive}$",
"$30 < \mathrm{Z} p_\mathrm{T} < 50\ \mathrm{GeV}$",
"$50 < \mathrm{Z} p_\mathrm{T} < 70\ \mathrm{GeV}$",
"$70 < \mathrm{Z} p_\mathrm{T} < 120\ \mathrm{GeV}$",
"$\mathrm{Z}\ p_\mathrm{T} > 120\ \mathrm{GeV}$",
]
fig, axes = plotbase.newPlot(subplots = len(zptbins * 2), subplots_X = len(zptbins))
settings = plotbase.getSettings(opt, quantity='recogen_alpha')
for ax1, ax2, selection, text in zip(axes[:(len(axes)/2)], axes[(len(axes)/2):], zptbins, texts):
plot1d.datamcplot("recogen_alpha", files, opt, fig_axes = [fig, ax1],
changes={
'allalpha': True,
'y': [0.99, 1.1],
'subplot': True,
'nbins': 6,
'fit': 'slope',
'x': [0, 0.3],
'text': text,
'selection': [selection],
}
)
plot1d.datamcplot("zpt_alpha", files, opt, fig_axes = [fig, ax2],
changes={
'allalpha': True,
'y': [0, 300],
'subplot': True,
'nbins': 6,
'x': [0, 0.3],
'text': text,
'selection': [selection],
}
)
plotbase.Save(fig, settings)
def corrs(files, opt):
fig, ax = plotbase.newPlot()
settings = plotbase.getSettings(opt, quantity='recogen_genpt')
for quantity, marker, color, label in zip(
['raw/recogen_genpt', 'l1/recogen_genpt', 'l1l2l3/recogen_genpt'],
['o', 'D', '-'],
['black', '#7293cb', '#e1974c'],
['raw', 'L1', 'L1L2L3']
):
plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={
'algorithm': "",
'markers':[marker],
'colors':[color],
'labels':[label, ""],
'correction':"",
'subplot':True,
'grid': True,
'y': [0.9, 1.5],
'legloc': 'upper right',
'x': [20, 100],
'yname': 'recogen',
'xname':'genpt'
})
settings['filename'] = plotbase.getDefaultFilename('recogen', opt, settings)
plotbase.Save(fig, settings)
def corrbins(files, opt):
fig, ax = plotbase.newPlot()
settings = plotbase.getSettings(opt, quantity='recogen')
for quantity, marker, color, label, n in zip(
['l1l2l3/recogen3040', 'l1l2l3/recogen5080', 'l1l2l3/recogen100'],
['o', 'f', '-'],
['black', '#7293cb', '#e1974c'],
['pT 20-40', 'pT 50-80', 'pT >100'],
range(10)
):
plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={
'algorithm': "",
'markers':[marker],
'colors':[color],
'labels':[label, ""],
'correction':"",
'subplot':True,
'grid': True,
'fitlabel_offset':-0.07*n,
'legloc': 'upper right',
'x': [0, 2],
'xname':'recogen'
})
settings['filename'] = plotbase.getDefaultFilename('recogen-bins', opt, settings)
plotbase.Save(fig, settings)
def zmassFitted(files, opt, changes=None, settings=None):
""" Plots the FITTED Z mass peak position depending on pT, NPV, y."""
quantity = "zmass"
# iterate over raw vs corr electrons
for mode in ['raw', 'corr']:
filenames = ['work/data_ee_%s.root' % mode, 'work/mc_ee_powheg_%s.root' % mode]
files, opt = plotbase.openRootFiles(filenames, opt)
# iterate over quantities
for xq, xbins in zip(
['npv', 'zpt', 'zy'],
[
[a - 0.5 for a, b in opt.npv] + [opt.npv[-1][1] - 0.5],
opt.zbins,
[(i/2.)-2. for i in range(0, 9)],
]
):
# iterate over Z pt (inclusive/low,medium,high)
for ptregion, ptselection, ptstring in zip(["_inclusivept", "_lowpt", "_mediumpt", "_highpt"],
[
"1",
"zpt<60",
"zpt>60 && zpt < 120",
"zpt>120",
],
[
"",
"Z $p_\mathrm{T}$ < 60 GeV",
"60 < Z $p_\mathrm{T}$ < 120 GeV",
"Z $p_\mathrm{T}$ > 120 GeV",
]):
# iterate over electron eta regions
for etaregion, etaselection, etastring in zip(
["_all", "_EBEB", "_EBEE", "_EEEE"],
[
"1",
"abs(eminuseta) < 1.5 && abs(epluseta) < 1.5",
"((abs(eminuseta) < 1.5 && abs(epluseta) > 1.6) || (abs(epluseta) < 1.5 && abs(eminuseta) > 1.6))",
"abs(eminuseta) > 1.6 && abs(epluseta) > 1.6",
],
[
"",
"EB-EB",
"EB-EE & EE-EB",
"EE-EE",
]):
# we dont need pt-binned Z pT plots:
if xq == 'zpt' and ptselection is not "1":
continue
rootobjects, rootobjects2 = [], []
fig = plotbase.plt.figure(figsize=[7, 10])
ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax.number = 1
ax2 = plotbase.plt.subplot2grid((3, 1), (2, 0))
ax2.number = 2
fig.add_axes(ax)
fig.add_axes(ax2)
# print the Z pt and electron eta region on the plot
ax.text(0.98, 0.98, ptstring, va='top', ha='right', transform=ax.transAxes)
ax.text(0.98, 0.9, etastring, va='top', ha='right', transform=ax.transAxes)
changes = {
'y': [90.8, 94.8],
'yname': r'$m^{\mathrm{Z}}$ (peak position from Breit-Wigner fit) / GeV',
'legloc': 'upper left',
'title': mode + " electrons",
'labels': ['Data', 'Powheg'],
}
settings = plotbase.getSettings(opt, changes=changes, quantity=quantity + "_" + xq)
# iterate over files
markers = ['o', 'D']
ys, yerrs, xs = [], [], []
for i, f in enumerate(files):
bins = xbins
y, yerr, x = [], [], []
# iterate over bins
for lower, upper in zip(bins[:-1], bins[1:]):
changes = {
'selection': ['(%s > %s && %s < %s) && (%s) && (%s)' % (xq,
lower, xq, upper, ptselection, etaselection)],
'nbins': 40,
'folder': 'zcuts',
'x': [71, 101],
}
local_settings = plotbase.getSettings(opt, changes, None, quantity)
# get the zmass, fit, get the xq distribution; append to lists
rootobjects += [getroot.histofromfile(quantity, f, local_settings, index=i)]
p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], breitwigner=True, limits=local_settings['x'])
y += [p1]
yerr += [p1err]
changes['x'] = [lower, upper]
local_settings = plotbase.getSettings(opt, changes, None, quantity)
rootobjects2 += [getroot.histofromfile(xq, f, local_settings, index=i)]
x += [rootobjects2[-1].GetMean()]
# fine line to indicate bin borders
ax.add_line(plotbase.matplotlib.lines.Line2D((lower, upper), (y[-1],y[-1]), color='black', alpha=0.05))
ys.append(y)
yerrs.append(yerr)
xs.append(x)
#plot
ax.errorbar(x, y, yerr, drawstyle='steps-mid', color=settings['colors'][i],
fmt=markers[i], capsize=0, label=settings['labels'][i])
# format and save
if xq == 'zpt':
settings['xlog'] = True
settings['x'] = [30, 1000]
settings['xticks'] = [30, 50, 70, 100, 200, 400, 1000]
plot1d.formatting(ax, settings, opt, [], [])
# calculate ratio values
ratio_y = [d/m for d, m in zip(ys[0], ys[1])]
ratio_yerrs = [math.sqrt((derr/d)**2 + (merr/m)**2)for d, derr, m, merr in zip(ys[0], yerrs[0], ys[1], yerrs[1])]
ratio_x = [0.5 * (d + m) for d, m in zip(xs[0], xs[1])]
#format ratio plot
ax2.errorbar(ratio_x, ratio_y, ratio_yerrs, drawstyle='steps-mid', color='black',
fmt='o', capsize=0, label='ratio')
ax.axhline(1.0)
fig.subplots_adjust(hspace=0.1)
ax.set_xticklabels([])
ax.set_xlabel("")
settings['ratio'] = True
settings['legloc'] = None
settings['xynames'][1] = 'ratio'
plot1d.formatting(ax2, settings, opt, [], [])
ax2.set_ylim(0.99, 1.01)
settings['filename'] = plotbase.getDefaultFilename(quantity + "_" + xq + "_" + mode + ptregion + etaregion, opt, settings)
plotbase.Save(fig, settings)
def zmassEBEE(files, opt):
""" Plot the Z mass depending on where the electrons are reconstructed.
3 bins: EB-EB, EB-EE, EE-EE
"""
selections = [
'abs(eminuseta)<1.5 && abs(epluseta)<1.5',
'(abs(eminuseta)>1.5 && abs(epluseta)<1.5) || abs(eminuseta)<1.5 && abs(epluseta)>1.5',
'abs(eminuseta)>1.5 && abs(epluseta)>1.5',
]
filenames = ['zmass_ebeb', 'zmass_ebee', 'zmass_eeee']
titles = ['Barrel electrons only', 'One electron barrel, one endcap', 'Endcap electrons only']
for selection, filename, title in zip(selections, filenames, titles):
plot1d.plot1dratiosubplot("zmass", files, opt, changes = {
'x': [81, 101],
'selection': [selection, "hlt * (%s)" % selection],
'fit': 'bw',
'nbins': 40,
'filename': filename,
'title': title,
'folder': 'zcuts',
})
def eid(files, opt):
quantity = 'mvaid'
"""changes = {
'x': [0, 1.0001],
#'log': True,
'folder': 'electron_all',
'nbins':50,
'subplot':True,
'markers': ['f'],
}
settings = plotbase.getSettings(opt, quantity=quantity)
fig, ax = plotbase.newPlot()
for c, l, s in zip(['#236BB2', '#E5AD3D'],
['fake', 'true'],
['1', 'deltar < 0.3 && deltar>0']):
changes.update({
'labels': [l],
'colors': [c],
'selection': s,
})
plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes=changes)
settings['filename'] = plotbase.getDefaultFilename(quantity, opt, settings)
plotbase.Save(fig, settings)"""
## id vs deltar
for quantity in ["mvaid", "mvatrigid", "looseid", "mediumid", "tightid"]:
plot1d.datamcplot("%s_deltar" % quantity, files, opt, changes = {
'folder': 'electron_all',
'nbins': 50,
'xynames': ['$\Delta$R(reco, gen)', quantity],
'x': [0, 0.5],
'legloc': None,
})
def plots_2014_07_03(files, opt):
""" Plots for JEC presentation 03.07. """
#### 2D histograms
for obj, x, nbins in zip(['muon', 'jet', 'electron'],
[[-2.5, 2.5], [-5.3, 5.3]]*2,
[400, 1000, 300]):
changes = {
'out': 'out/2014_07_03',
'y': [-3.2, 3.2],
}
changes.update({
'folder': obj + "_all",
'nbins': nbins,
'x':x,
'filename': obj + '_phi_eta',
'xynames': ['%s eta' % obj,
'%s phi' % obj, obj + 's'],
})
if obj is 'electron':
filenames = ["data_ee_noc", "mc_ee_corr_test"]
else:
filenames = ["data_noc", "mc_rundep_noc"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
plot2d.twoD("phi_eta", files, opt, changes = changes)
if obj is not 'electron':
changes.update({
'year': 2011,
'filename': obj + '_phi_eta_2011',
'lumi': 5.1,
'energy': 7,
})
filenames = ["data11_noc"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
plot2d.twoD("phi_eta", files, opt, changes = changes)
##### PU Jet ID
filenames = ["dataPUJETID", "data"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'normalize': False,
'ratiosubplot': 'True',
'ratiosubploty': [0.8, 1.2],
'out': 'out/2014_07_03',
'x': [30, 250],
'title': 'Data',
'labels': ['PUJetID applied', 'default'],
}
plot1d.datamcplot('zpt', files, opt, changes=changes)
for typ in ['mpf', 'ptbalance']:
plotresponse.responseratio(files, opt, over='zpt', types=[typ], changes={
'labels': ['PUJetID applied', 'default'],
'out': 'out/2014_07_03',
'x': [30, 1000],
'xlog': True,
})
##### timedep
filenames = ["data", "mc_rundep"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'out': 'out/2014_07_03',
'filename': "timedep",
}
timedep(files, opt, changes=changes)
###### MPF fix
filenames = [
"/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-18_10-41/out.root",
"/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root"
]
files = [getroot.openfile(f) for f in filenames]
plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={
'labels': ['MCRD-fixed', 'MCRD'],
'xlog': True,
'filename': "mpf_zpt-fixed",
'out': 'out/2014_07_03',
'x': [30, 1000],
'xticks': [30, 50, 70, 100, 200, 400, 1000],
})
# mpf slopes
filenames = ["data", "mc_rundep"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'filename': "mpfslopes-fixed",
'labels': ['data', 'MCRD'],
'out': 'out/2014_07_03',
'allalpha': True,
'selection': 'alpha<0.3',
}
mpfslopes(files, opt, changes)
changes.update({
'filename': "mpfslopes",
'labels': ['data', 'MCRD'],
})
filenames = [
'/storage/a/dhaitz/excalibur/artus/data_2014-04-10_21-21/out.root',
'/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root'
]
files = [getroot.openfile(f) for f in filenames]
mpfslopes(files, opt, changes)
# SYNC
os.system("rsync ${EXCALIBUR_BASE}/out/2014_07_03 ekplx26:plots -r")
def timedep(files, opt, changes = None):
""" Plots for the time dependence, requested by Mikko 2014-06-25."""
settings = plotbase.getSettings(opt, quantity="response_run", changes=changes)
fig, ax = plotbase.newPlot()
factor = 2e4
methods = ['mpf', 'ptbalance']
labels = ['MPF', '$p_T$ balance']
for q, c, l, m, in zip(methods,
settings['colors'], labels, settings['markers']):
slopes, serrs, x = [], [], []
for eta1, eta2 in zip(opt.eta[:-1], opt.eta[1:]):
changes = {
'alleta': True,
'allalpha': True,
'selection': 'alpha<0.3 && abs(jet1eta) > %s && abs(jet1eta) < %s' % (eta1, eta2),
'fit': 'slope',
}
rootobject = getroot.histofromfile("%s_run" % q, files[0], settings, changes=changes)
# get fit parameters
slope, serr = fit.fitline2(rootobject)[2:4]
slopes += [slope*factor]
serrs += [serr*factor]
changes['x'] = [0, 6]
x += [getroot.histofromfile("abs(jet1eta)", files[0], settings, changes=changes).GetMean()]
ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c,
fmt='o', capsize=0, label=l)
#formatting stuff
settings['x'] = [0, 5]
plotbase.setAxisLimits(ax, settings)
plotbase.labels(ax, opt, settings)
plotbase.axislabels(ax, 'Leading jet $\eta$', 'Response vs run: linear fit slope (muliplied with 20 000)', settings=settings)
ax.set_ylim(-0.1, 0.05)
ax.set_xlim(0, 5.25)
ax.grid(True)
ax.set_xticks([float("%1.2f" % eta) for eta in opt.eta])
for label in ax.get_xticklabels():
label.set_rotation(45)
ax.axhline(0.0, color='black', linestyle='--')
settings['filename'] = quantity="response_run"
plotbase.Save(fig, settings)
def npuplot(files, opt):
""" Plots for the JEC paper that Mikko requested 24.4.: npv and rho in bins of npu."""
settings = plotbase.getSettings(opt, quantity='npv')
settings['x'] = [-0.5, 99.5]
settings['nbins'] = 100
tgraphs = []
for f in files:
if files.index(f) == 0: # flag bad runs in data
runs = "run!=191411 && run!=198049 && run!=198050 && run!=198063 && run!=201727 && run!=203830 && run!=203832 && run!=203833 && run!=203834 && run!=203835 && run!=203987 && run!=203992 && run!=203994 && run!=204100 && run!=204101 && run!=208509"
else:
runs = 1
npuhisto = getroot.histofromfile('nputruth', f, settings)
for i in range(100):
if npuhisto.GetBinContent(i) > 0:
npu = i
tgraph = ROOT.TGraphErrors()
for n in range(npu):
changes = {'selection': 'nputruth>%s && nputruth<%s && %s' % (n-0.5, n+0.5, runs)}
npv = getroot.histofromfile('npv', f, settings, changes=changes).GetMean()
npverr = getroot.histofromfile('npv', f, settings, changes=changes).GetMeanError()
rho = getroot.histofromfile('rho', f, settings, changes=changes).GetMean()
rhoerr = getroot.histofromfile('rho', f, settings, changes=changes).GetMeanError()
tgraph.SetPoint(n, npv, rho)
tgraph.SetPointError(n, npverr, rhoerr)
tgraphs.append(tgraph)
settings['root'] = settings['root'] or settings['filename']
getroot.saveasroot(tgraphs, opt, settings)
def electronupdate(files, opt):
"""Plots for the Zee update 26.06.2014."""
# Reco/gen electron pt vs eta
filenames = ['mc_ee_raw', 'mc_ee_corr']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes={
'x': [0, 2.5],
'y': [0.9, 1.1],
'nbins': 25,
'labels': ['raw', 'corrected'],
'markers': ['o', '-'],
'colors': ['maroon', 'blue'],
'folder':'zcuts',
'y': [0.94, 1.06],
'title': 'Madgraph',
'xynames': [
r"$|\eta_{e^{-}} \| $",
r'$\mathrm{e}^{-} p_\mathrm{T}$ Reco/Gen'
]
}
plot1d.datamcplot('eminuspt/geneminuspt_abs(eminuseta)', files, opt, changes=changes)
changes={
'ratiosubplot': True,
'title': 'Madgraph',
'x': [0, 1000],
'log': True,
'labels': ['raw', 'corrected'],
'folder': 'all',
'ratiosubplotfit': 'chi2',
}
plot1d.datamcplot('zpt', files, opt, changes=changes)
#LHE information
fig, ax = plotbase.newPlot()
fig2, ax2 = plotbase.newPlot()
changes ={
'folder':'all',
'x': [-4, 4],
'y': [0, 200000],
'subplot': True,
'nbins':50,
'normalize': False,
'xynames': ['Z rapidity', 'Events'],
'log':True,
}
for q, c, m, l in zip(
['zy', 'genzy', 'lhezy'],
['black', 'lightskyblue', 'FireBrick'],
['o', 'f', '-'],
['RecoZ', 'GenZ', 'LHE-Z'],
):
changes['labels'] = [l]
changes['markers'] = [m]
changes['colors'] = [c]
plot1d.datamcplot(q, files[1:], opt, changes=changes, fig_axes=[fig, ax])
settings = plotbase.getSettings(opt, None, None, 'rapidity')
settings['filename'] = 'rapidity'
plotbase.Save(fig, settings)
# Data-MC comparisons ######################################################
# basic quantities
filenames = ['data_ee_corr', 'mc_ee_corr']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'x': [-3, 3],
'y': [-3.2, 3.2],
'folder': 'all',
'nbins': 200,
}
plot2d.twoD('eminusphi_eminuseta', files, opt, changes=changes)
for q, c in zip(['eminuspt', 'eminuseta', 'zy', 'zpt', 'zmass'],
[
{},
{'x': [-2.5, 2.5]},
{},
{'x': [0, 500], 'log':True},
{'x': [80, 102], 'ratiosubploty':[0.9, 1.1]},
]):
changes = {
'labels': ['Data', 'Madgraph'],
'ratiosubplot': True,
'folder':'zcuts',
'nbins': 50,
}
changes.update(c)
plot1d.datamcplot(q, files, opt, changes=changes)
# scale factors
changes = {
'x': [0, 100],
'y': [0, 3],
'z': [0.8, 1.2],
'folder': 'all',
'nbins': 100,
'selection': 'sfminus>0',
'colormap': 'bwr',
}
plot2d.twoD('sfminus_abs(eminuseta)_eminuspt', files[1:], opt, changes=changes)
# zpt in rapidities
for ybin in [[i/2., (i+1)/2.] for i in range(5)]:
changes = {
'x': [0, 600],
'nbins': 30,
'folder':'zcuts',
'title': "%s < $y_Z$ < %s" % tuple(ybin),
'log': 'True',
'ratiosubplot': True,
'selection': 'abs(zy)>%s && abs(zy)<%s' % (ybin[0], ybin[1]),
'filename': ('zpt_rap-%s-%s' % (ybin[0], ybin[1])).replace('.', '_'),
}
plot1d.datamcplot('zpt', files, opt, changes=changes)
# scale factor
changes = {
'labels': ['Madgraph'],
'ratiosubplot': True,
'xynames':['eminuspt', r"$|\eta_{e^{-}} \| $"],
'folder':'all',
'x': [0, 60],
'y': [0, 3],
'colormap': 'bwr',
'z': [0.5, 1],
}
q = 'sfminus_abs(eminuseta)_eminuspt'
plot2d.twoD(q, files[1:], opt, changes=changes)
##############
# Plot for ID acceptance
fig, ax = plotbase.newPlot()
changes ={
'folder':'all',
'x': [0, 150],
'y': [0, 1],
'subplot': True,
'normalize': False,
'legloc': 'lower right',
'xynames': ['eminuspt', 'Acceptance']
}
filenames = ['mc_ee_corr_noid']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
for q, c, m, l in zip(
['eminusidtight', 'eminusidmedium', 'eminusidloose', 'eminusidveto',
'eminusid'],
['lightskyblue', 'FireBrick', 'green', 'black', 'blue'],
['f', '_', '-', "o", "*"],
['Tight ID', 'Medium ID', 'Loose ID', "Veto ID", "MVA ID"],
):
changes['labels'] = [l]
changes['markers'] = [m]
changes['colors'] = [c]
plot1d.datamcplot("%s_eminuspt" % q, files, opt, changes=changes, fig_axes=[fig, ax])
settings = plotbase.getSettings(opt, None, None, 'id')
settings['filename'] = 'id'
settings['title'] = 'MC'
plotbase.Save(fig, settings)
def mpfslopes(files, opt, changes=None):
""" Plot the slope of a linear fit on MPF vs NPV, in Z pT bins."""
quantity="mpf_npv"
settings = plotbase.getSettings(opt, quantity=quantity, changes=changes)
settings['special_binning'] = True
print opt.zbins
fig, ax = plotbase.newPlot()
for f, c, l, m, in zip(files, settings['colors'], settings['labels'],
settings['markers']):
slopes, serrs, x = [], [], []
# iterate over Z pT bins
for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]):
changes = {'selection':'zpt>%s && zpt<%s' % (ptlow, pthigh)}
rootobject = getroot.histofromfile(quantity, f, settings, changes=changes)
# get fit parameters and mean Z pT; append to lists
slope, serr = fit.fitline2(rootobject)[2:4]
slopes += [slope]
serrs += [serr]
x += [getroot.histofromfile("zpt", f, settings, changes=changes).GetMean()]
ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c,
fmt='o', capsize=0, label=l)
#formatting stuff
settings['x'] = [30, 100]
plotbase.setAxisLimits(ax, settings)
plotbase.labels(ax, opt, settings)
ax.set_xscale('log')
settings['xticks'] = opt.zbins
plotbase.axislabels(ax, 'zpt', 'slope from fit on MPF vs NPV', settings=settings)
ax.set_ylim(-0.002, 0.002)
ax.grid(True)
ax.axhline(0.0, color='black', linestyle='--')
plotbase.Save(fig, settings)
def pileup(files, opt):
for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]):
plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={
'allalpha':True,
'selection':'alpha<0.3 && zpt>%s && zpt<%s' % (ptlow, pthigh),
'filename': "mpf_npv_%s-%s" % (ptlow, pthigh)
}
)
def emucomparison(files, opt):
values = []
valueerrs = []
for filenames in [['data', 'mc'], ['data_ee', 'mc_ee']]:
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
for quantity in ['mpf', 'ptbalance']:
settings = plotbase.getSettings(opt, None, None, quantity)
settings['nbins'] = 40
settings['correction'] = 'L1L2L3'
if 'ee' in filenames[0]:
if settings['selection']:
settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0 && %s' % settings['selection']
else:
settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0'
datamc = []
rootobjects = []
fitvalues = []
for f in files:
rootobjects += [getroot.histofromfile(quantity, f, settings)]
p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1],
gauss=True, limits=[0, 2])
fitvalues += [p1, p1err]
ratio = fitvalues[0] / fitvalues[2]
ratioerr = math.sqrt(fitvalues[1] ** 2 + fitvalues[3] ** 2)
values.append(ratio)
valueerrs.append(ratioerr)
fig, ax = plotbase.newPlot()
ax.errorbar(range(4), values, valueerrs, drawstyle='steps-mid', color='black',
fmt='o', capsize=0,)
ax.set_xticks([0, 1, 2, 3])
ax.set_xticklabels(['Zmm\nMPF', 'Zmm\npT balance', 'Zee\nMPF', 'Zee\npT balance'])
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(0.96, 1.001)
ax.axhline(1.0, color='black', linestyle=':')
ax.set_ylabel('Jet response Data/MC ratio', ha="right", x=1)
plotbase.Save(fig, settings)
def electrons(files, opt):
""" Standard set of plots for the dielectron analysis. """
filenames = ['data_ee', 'mc_ee']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
base_changes = {
'out': 'out/ee2014',
'folder': 'zcuts', # no additional restrictions on jets
'normalize': False, # no normalizing to check if the lumi reweighting works
'factor': 1., # on the fly lumi reweighting
'efficiency': 1., # no trigger reweighting for electrons
'ratiosubplot': True,
}
# zmass with fit
changes = {
'legloc': 'center right',
'nbins': 50,
'fit': 'gauss'
}
changes.update(base_changes)
plot1d.datamcplot('zmass', files, opt, changes=changes)
#electron quantities
for charge in ['plus', 'minus']:
changes = {
'x': [0, 150],
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes)
changes['x'] = [-2.5, 2.5]
plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes)
changes['x'] = None
plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes)
changes['legloc'] = 'center right'
changes['filename'] = 'zmass_barrel'
changes['selection'] = 'abs(epluseta)<1.0 && abs(eminuseta)<1.0'
changes['title'] = '|eta(e)| < 1.0'
changes['fit'] = 'gauss'
plot1d.datamcplot('zmass', files, opt, changes=changes)
changes['filename'] = 'zmass_endcap'
changes['selection'] = 'abs(epluseta)>1.0 && abs(eminuseta)>1.0'
changes['title'] = '|eta(e)| > 1.0'
changes['fit'] = 'gauss'
plot1d.datamcplot('zmass', files, opt, changes=changes)
#electron quantities
for charge in ['plus', 'minus']:
changes = {
'x': [0, 150],
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes)
changes['x'] = [-2.5, 2.5]
plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes)
changes['x'] = None
plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes)
# Z pT in rapidity bins
rapbins = ['abs(zy)<1', 'abs(zy)>1 && abs(zy)<2', 'abs(zy)>2 && abs(zy)<3']
raplabels = ['|Y(Z)|<1', '1<|Y(Z)|<2', '2<|Y(Z)|<3']
rapname = ['0zy1', '1zy2', '2zy3']
for rbin, rlabel, rname in zip(rapbins, raplabels, rapname):
changes = {
'selection': rbin,
'filename': 'zpt-%s' % rname,
'x': [30, 750],
'log': True,
'title': rlabel,
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('zpt', files, opt, changes=changes)
#electron quantities
for charge in ['plus', 'minus']:
changes = {
'x': [0, 150],
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes)
changes['x'] = [-2.5, 2.5]
plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes)
changes['x'] = None
plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes)
# npv
changes = {
'folder': 'all',
}
changes.update(base_changes)
changes['folder'] = 'all'
plot1d.datamcplot('npv', files, opt, changes=changes)
changes['noweighting'] = True
changes['factor'] = 3503.71 / 30459503 * 1000
changes['filename'] = 'npv_noweights'
plot1d.datamcplot('npv', files, opt, changes=changes)
changes['noweighting'] = True
changes['factor'] = 3503.71 / 30459503 * 1000
changes['filename'] = 'npv_noweights'
plot1d.datamcplot('npv', files, opt, changes=changes)
# z pt and rapidity
changes = {
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('zy', files, opt, changes=changes)
plot1d.datamcplot('zeta', files, opt, changes=changes)
changes['x'] = [30, 750]
changes['log'] = True
plot1d.datamcplot('zpt', files, opt, changes=changes)
#powheg comparison
filenames = ['data_ee', 'mc_ee', 'mc_ee_powheg']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'log': True,
'x': [30, 750],
'nbins': 40,
'filename': 'zpt_mad-pow',
'labels': ['Data', 'Madgraph', 'Powheg'],
}
changes.update(base_changes)
plot1d.datamcplot('zpt', files, opt, changes=changes)
changes = {
'nbins': 40,
'filename': 'zmass_mad-pow',
'labels': ['Data', 'Madgraph', 'Powheg'],
}
changes.update(base_changes)
plot1d.datamcplot('zmass', files, opt, changes=changes)
files = files[::2]
filenames = filenames[::2]
changes = {
'log':True,
'x': [30, 750],
'nbins': 40,
'filename': 'zpt_pow',
'labels':['Data', 'Powheg'],
}
changes.update(base_changes)
plot1d.Datamcplot('zpt', files, opt, changes=changes)
#backgrounds
filenames = ['Data_ee', 'mc_ee', 'background_ee']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'log': True,
'x': [30, 750],
'filename': 'zpt_backgrounds',
'labels': ['Data', 'MC', 'Backgrounds'],
'markers': ['o', 'f', 'f'],
'stacked': True,
'ratiosubplot': False,
}
changes.update(base_changes)
changes['ratiosubplot'] = False
plot1d.datamcplot('zpt', files, opt, changes=changes)
changes.pop('x', None)
changes['filename'] = 'zmass_backgrounds'
changes['log'] = False
changes['ratiosubplot'] = False
plot1d.datamcplot('zmass', files, opt, changes=changes)
# sync the plots
import subprocess
subprocess.call(['rsync out/ee2014 dhaitz@ekplx26:plots/ -u -r --progress'], shell=True)
"""
merlin 2D_zmass_zpt --files $DATAEE $ARGS -x 0 50 --nbins 100 -y 80 100 -o $OUT
merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 120 -C lightskyblue -m f --folder all
merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 15 --filename eemass_low -C lightskyblue -m f --folder all
merlin 2D_zpt_zy -o $OUT --files $DATAEE $ARGS -y 0 100 --nbins 100
"""
def an(files, opt):
""" Plots for the 2014 Z->mumu JEC AN."""
"""
#MET
for quantity in ['METpt', 'METphi']:
plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'})
plot1d.datamcplot("npv", files, opt, changes = {'folder': 'all', 'title': 'CMS preliminary'})
for n in ['1', '2']:
for quantity in ['pt', 'eta', 'phi']:
plot1d.datamcplot('mu%s%s' % (n, quantity), files, opt, changes = {'title': 'CMS preliminary'})
if n is '2' and quantity is 'eta':
plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'nbins': 10, 'correction': 'L1L2L3', 'title': 'CMS preliminary'})
else:
plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'correction': 'L1L2L3', 'title': 'CMS preliminary'})
for quantity in ['zpt', 'zeta', 'zy', 'zphi', 'zmass']:
plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'})
#response stuff
plotresponse.responseratio(files, opt, over='zpt', types=['mpf'],
changes={'y': [0.98, 1.03, 0.96, 1.03], 'x': [0, 400, 0, 400]})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'],
changes={'y': [0.95, 1.1, 0.93, 1.1]})
plotresponse.responseratio(files, opt, over='npv', types=['mpf'],
changes={'y': [0.95, 1.05, 0.92, 1.03], 'x': [0, 35, 0, 35]})
plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'],
changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400]})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.93, 1.1]})
plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 35, 0, 35]})
"""
for q in ['mpf', 'ptbalance']:
plot1d.datamcplot(q, files, opt, changes={'correction': 'L1L2L3',
'legloc': 'center right',
'nbins': 100,
'fit': 'gauss'})
plotresponse.extrapol(files, opt, changes={'save_individually': True,
'correction': 'L1L2L3'})
"""
plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400], 'title': 'CMS preliminary'})
plotfractions.fractions(files, opt, over='jet1abseta', changes = {'title': 'CMS preliminary'})
plotfractions.fractions(files, opt, over='npv', changes = {'title': 'CMS preliminary'})
for changes in [{'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'},
{'alleta':True, 'rebin':10,
'selection':'jet1abseta>2.5 && jet1abseta<2.964',
'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]:
if 'alleta' in changes:
opt.out += '/ECOT'
opt.user_options['out'] += '/ECOT'
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6)
plotresponse.response_run(files, opt, changes=changes)
opt.out = opt.out[:-5]
opt.user_options['out'] = opt.user_options['out'][:-5]
else:
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes)
plotresponse.response_run(files, opt, changes=changes)
changes['y'] = [0.84, 1.2]
plot2d.twoD("qgtag_btag", files, opt,
changes = {'title': 'CMS Preliminary', 'nbins':50}
)
plot_tagging.tagging_response(files, opt)
plot_tagging.tagging_response_corrected(files, opt)
"""
## MCONLY
if len(files) > 1:
files = files[1:]
"""
# PF composition as function of mc flavour
flavour_comp(files, opt, changes={'title': 'CMS Simulation','mconly':True})
# response vs flavour
for var in [True, False]:
plotresponse.response_physflavour(files, opt,
changes={'title': 'CMS Simulation','mconly':True},
add_neutrinopt=var,
restrict_neutrals=var,
extrapolation=var)
plotfractions.flavour_composition(files, opt, changes={'title': 'CMS Simulation','mconly':True})
plotfractions.flavour_composition_eta(files, opt, changes={'title': 'CMS Simulation','mconly':True, 'selection': 'zpt>95 && zpt<110'})
changes = {'cutlabel' : 'ptetaalpha',
'labels' : ['Pythia 6 Tune Z2*', 'Herwig++ Tune EE3C'],
'y' : [0.98, 1.05],
'markers' : ['o', 'd'],
'colors' : ['red', 'blue'],
'title' : 'CMS Simulation',
'mconly' : True,
'legloc' : 'lower left',
'filename': 'recogen_physflavour_pythia-herwig'}
files += [getroot.openfile("/storage/a/dhaitz/excalibur/work/mc_herwig/out/closure.root")]
plot1d.datamcplot("recogen_physflavour", files, opt, changes=changes)
"""
def eleven(files, opt):
""" Summary of the plots for the response studies with 2011 rereco. """
runrange = [160000, 183000]
plot1d.datamcplot('npv', files, opt, changes={'rebin': 1})
plot1d.datamcplot('zmass', files, opt, changes={'fit': 'vertical', 'legloc': 'center right'})
plotresponse.extrapol(files, opt)
plotresponse.responseratio(files, opt, over='zpt', types=['mpf'],
changes={'y': [0.98, 1.03, 0.96, 1.03], 'uncertaintyband': True, 'x': [0, 400, 0, 400]})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'],
changes={'y': [0.95, 1.1, 0.93, 1.1], 'uncertaintyband': True})
plotresponse.responseratio(files, opt, over='npv', types=['mpf'],
changes={'y': [0.95, 1.05, 0.92, 1.03], 'uncertaintyband': True, 'x': [0, 18, 0, 18]})
plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'],
changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400], 'uncertaintyband': True})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.93, 1.1], 'uncertaintyband': True})
plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 18, 0, 18], 'uncertaintyband': True})
plot1d.datamcplot('npv_run', files, opt, changes={'x': runrange,
'y': [0, 15], 'run': True, 'fit': True})
plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400]})
plotfractions.fractions(files, opt, over='jet1abseta')
plotfractions.fractions(files, opt, over='npv', changes={'x': [-0.5, 24.5]})
for changes in [{'x': runrange, 'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'},
{'x': runrange, 'alleta':True, 'rebin':10,
'selection':'jet1abseta>2.5 && jet1abseta<2.964',
'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]:
if 'alleta' in changes:
opt.out += '/ECOT'
opt.user_options['out'] += '/ECOT'
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6)
else:
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes)
changes['y'] = [0.84, 1.2]
plotresponse.response_run(files, opt, changes=changes)
def rootfile(files, opt):
"""Function for the rootfile sent to the JEC group in early August 2013."""
list_of_quantities = ['ptbalance_alpha', 'mpf_alpha',
'ptbalance', 'mpf', 'zpt', 'npv', 'zmass', 'zpt_alpha', 'npv_alpha',
'ptbalance_zpt', 'mpf_zpt',
'ptbalance_npv', 'mpf_npv',
]
for muon in [["zmumu", "1"], ["zmumu_muoncuts",
"(mupluspt>25 && muminuspt>25 && abs(mupluseta)<1.0 && abs(muminuseta)<1.0)"]]:
for alpha in [[0, "alpha<0.2", "alpha0_2"], [1, "alpha<0.3", "alpha0_3"],
[1, "alpha<0.4", "alpha0_4"]]:
for quantity in list_of_quantities:
changes = {'rebin': 1,
'out': 'out/root/',
'allalpha': True,
'root': "__".join([quantity, alpha[2]]),
'filename': muon[0],
'selection': "&&".join([alpha[1], muon[1]]),
}
if ("_zpt" in quantity) or ("_npv" in quantity):
changes['special_binning'] = True
if "alpha" in quantity:
changes['rebin'] = 10
plot1d.datamcplot(quantity, files, opt, changes=changes)
changes['ratio'] = True
changes['labels'] = ['ratio']
plot1d.datamcplot(quantity, files, opt, changes=changes)
def ineff(files, opt):
settings = plotbase.getSettings(opt, changes=None, settings=None, quantity="flavour_zpt")
fig, ax = plotbase.newPlot()
labels = ["no matching partons", "two matching partons"]
colors = ['red', 'blue']
markers = ['o', 'd']
changes = {'subplot': True,
'lumi': 0,
'xynames': ['zpt', 'physflavourfrac'],
'legloc': 'upper left',
}
for n, l, c, m in zip([0, 2], labels, colors, markers):
quantity = "(nmatchingpartons3==%s)_zpt" % n
changes['labels'] = [l]
changes['colors'] = c
changes['markers'] = m
plot1d.datamcplot(quantity, files, opt, fig_axes=(fig, ax), changes=changes, settings=settings)
settings['filename'] = plotbase.getDefaultFilename("physflavourfrac_zpt", opt, settings)
plotbase.Save(fig, settings['filename'], opt)
def flav(files, opt):
etabins = [0, 1.3, 2.5, 3, 3.2, 5.2]
etastrings = ['0-1_3', '1_3-2_5', '2_5-3', '3-3_2', '3_2-5_2']
flavourdefs = ["algoflavour", "physflavour"]
flavourdefinitions = ["algorithmic", "physics"]
flist = ["(flavour>0&&flavour<4)", "(flavour==1)", "(flavour==2)", "(flavour==3)",
"(flavour==4)", "(flavour==5)", "(flavour==21)", "(flavour==0)"]
q_names = ['uds', 'u', 'd', 's', 'c', 'b', 'gluon', 'unmatched']
changes = {}
############### FLAVOUR NOT 0!!!!!
# barrel:
"""changes['rebin'] = 1
changes['filename']="flavour"
changes['filename']="flavour"
for f_id, quantity in zip(['uds','c','b','gluon'], flist):
changes['root']=f_id
plot1d.datamcplot("%s_zpt" % quantity, files, opt, changes=changes)
"""
for flavourdef, flavourdefinition in zip(flavourdefs, flavourdefinitions):
# iterate over eta bins:
for filename, selection in zip(etastrings, getroot.etacuts(etabins)):
changes['filename'] = "_".join([filename, flavourdefinition])
changes['alleta'] = True
changes['selection'] = "%s && %s" % (selection,
"alpha<0.2")
changes['rebin'] = 1
for f_id, quantity in zip(q_names, flist):
changes['root'] = f_id
plot1d.datamcplot("%s_zpt" % quantity.replace("flavour",
flavourdef), files, opt, changes=changes)
def gif(files, opt):
local_opt = copy.deepcopy(opt)
runlist = listofruns.runlist[::10]
for run, number in zip(runlist, range(len(runlist))):
local_opt.lumi = (run - 190456) * 19500 / (209465 - 190456)
print
plotbase.plot1d.datamcplot('balresp', files, local_opt,
changes={'var': 'var_RunRange_0to%s' % run}, filename="%03d" % number)
def closure(files, opt):
def divide((a, a_err), (b, b_err)):
if (b != 0.0):
R = a / b
else:
R = 0
Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2)
return R, Rerr
def multiply((a, a_err), (b, b_err)):
R = a * b
Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2)
return R, Rerr
changes = {}
changes = plotbase.getchanges(opt, changes)
#get extrapol factors with alpha 035
#changes['var']='var_CutSecondLeadingToZPt_0_4'
#changes['correction']='L1L2L3'
balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError())
mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError())
genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError())
intercept, ierr, slope, serr, chi2, ndf, conf_intervals = getroot.fitline2(getroot.getobjectfromnick('ptbalance_alpha', files[0], changes, rebin=1))
balresp_extrapol = (intercept, conf_intervals[0])
extrapol_reco_factor = divide(balresp_extrapol, balresp)
intercept2, ierr2, slope2, serr2, chi22, ndf2, conf_intervals2 = getroot.fitline2(getroot.getobjectfromnick('genbalance_genalpha', files[0], changes, rebin=1))
genbal_extrapol = (intercept2, conf_intervals2[0])
extrapol_gen_factor = divide(genbal_extrapol, genbal)
intercept3, ierr3, slope3, serr3, chi23, ndf3, conf_intervals3 = getroot.fitline2(getroot.getobjectfromnick('mpf_alpha', files[0], changes, rebin=1))
mpf_extrapol = (intercept3, conf_intervals3[0])
extrapol_mpf_factor = divide(mpf_extrapol, mpfresp)
#del changes['var']
#del changes['correction']
#other quantities with alpha 02
recogen = (getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMeanError())
zresp = (getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMeanError())
balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError())
mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError())
mpfresp_raw = (getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMeanError())
genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError())
balparton = (getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMeanError())
partoncorr = divide(balparton, genbal)
format = "%1.4f"
print changes
print ""
print (r"balresp reco %s +- %s" % (format, format)) % balresp
print (r"mpf %s +- %s" % (format, format)) % mpfresp
print (r"balparton %s +- %s" % (format, format)) % balparton
print (r"zresp %s +- %s" % (format, format)) % zresp
print (r"recogen %s +- %s" % (format, format)) % recogen
print (r"extrapolReco_factor %s +- %s" % (format, format)) % extrapol_reco_factor
print (r"extrapolGen_factor %s +- %s" % (format, format)) % extrapol_gen_factor
print (r"extrapolMPF_factor %s +- %s" % (format, format)) % extrapol_mpf_factor
print (r"parton/genjet %s +- %s" % (format, format)) % divide(balparton, genbal)
print ""
print (r"pTgenjet / pTgenZ %s +- %s" % (format, format)) % genbal
genbal = multiply(genbal, extrapol_gen_factor)
print (r"* gen Level extrapolation %s +- %s" % (format, format)) % genbal
#genbal = multiply(genbal, partoncorr)
#print (r"* pTparton/pTgenjet correction %s +- %s" % (format, format) ) % genbal
#genbal = divide(genbal, balparton)
#print (r"* pTparton/pTZ correction %s +- %s" % (format, format) ) % genbal
reco_bal = divide(multiply(genbal, recogen), zresp)
print (r"* GenToReco for Jet and Z %s +- %s" % (format, format)) % reco_bal
print ""
print (r"pTrecojet / pTrecoZ %s +- %s" % (format, format)) % balresp
balresp = multiply(balresp, extrapol_reco_factor)
print (r"* reco Level extrapolation %s +- %s" % (format, format)) % balresp
print ""
print (r"MPF (typeI) %s +- %s" % (format, format)) % mpfresp
#mpfresp = divide(mpfresp, zresp)
#print (r"MPF (GenZ) %s +- %s" % (format, format) ) % mpfresp
mpfresp = multiply(mpfresp, extrapol_mpf_factor)
print (r"MPF (extrapol) %s +- %s" % (format, format)) % mpfresp
print (r"MPF (Raw) %s +- %s" % (format, format)) % mpfresp_raw
def extrapola(files, opt):
fig, ax = plotbase.newPlot()
changes = {}
changes['var'] = "_var_CutSecondLeadingToZPt_0_3"
local_opt = copy.deepcopy(opt)
rebin = 5
if opt.rebin is not None:
rebin = opt.rebin
plot1d.datamcplot('ptbalance_alpha', files, local_opt, legloc='upper center',
changes=changes, rebin=rebin, subplot=True,
subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False)
local_opt.colors = ['red', 'maroon']
plot1d.datamcplot('mpf_alpha', files, local_opt, legloc='upper center',
changes=changes, rebin=rebin, subplot=True, xy_names=['alpha', 'response'],
subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False, fit_offset=-0.1)
file_name = plotbase.getDefaultFilename("extrapolation_", opt, changes)
plotbase.Save(fig, file_name, opt)
# function for comparing old and new corrections
def comparison(datamc, opt):
"""file_names = [
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root'
]"""
colors = ['red', 'blue', 'blue', 'red']
markers = ['*', 'o', 'o', '*']
#labels = [['MC_52xFast', 'data_52xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xFast', 'data_53xFast'], ['MC_53xOff', 'data_53xOff']]
rebin = 1
import copy
file_names = [
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root',
]
labels = [['MC_52xFast', 'data_52xFast'], ['MC_53xFast', 'data_53xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xOff', 'data_53xOff']]
files = []
for f in file_names:
files += [getroot.openfile(f, opt.verbose)]
local_opt = copy.deepcopy(opt)
local_opt.style = markers
local_opt.colors = colors
quantity = 'L1abs_npv'
# ALL
fig, axes = plotbase.newPlot(subplots=4)
for a, f1, f2, l in zip(axes, files[::2], files[1::2], labels):
local_opt.labels = l
datamcplot(quantity, (f1, f2), local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, a),
rebin=rebin, subplot=True, subtext="")
filename = "L1_all__" + opt.algorithm
plotbase.Save(fig, filename, opt)
"""
#Fastjet vs Offset
fig = plotbase.plt.figure(figsize=(14,7))
axes = [fig.add_subplot(1,2,n) for n in [1,2]]
local_opt.labels = labels[0]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[1]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
#53
local_opt.labels = labels[2]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[3]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
filename = "L1_Fastjet_vs_Offset__"+opt.algorithm
plotbase.Save(fig, filename, opt)
#52X vs 53X
fig = plotbase.plt.figure(figsize=(14,7))
axes = [fig.add_subplot(1,2,n) for n in [1,2]]
local_opt.labels = labels[0]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[2]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[1]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
#
local_opt.labels = labels[3]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
filename = "L1_52X_vs_53X__"+opt.algorithm
plotbase.Save(fig, filename, opt)
import plotresponse
file_names = [
'/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
]
labels = [['data_52xFast', 'MC_52xFast'], [ 'data_53xFast', 'MC_53xFast'], [ 'data_52xOff', 'MC_52xOff'], ['data_53xOff', 'MC_53xOff']]
files=[]
for f in file_names:
files += [getroot.openfile(f, opt.verbose)]
for over, fit in zip(['zpt', 'jet1eta', 'npv'], [True, False, True]):
fig, axes= plotbase.newPlot(subplots=4)
fig2, axes2= plotbase.newPlot(subplots=4)
for a1, a2, f1, f2, l in zip(axes, axes2, files[::2], files[1::2], labels):
local_opt.labels = l
changes ={}# {'correction':'L1L2L3'}
plotresponse.responseplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig,a1),
subplot=True, subtext="")
plotresponse.ratioplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig2 ,a2), fit=fit,
subplot=True, subtext="")
filename = "Response_"+over+"_all__"+opt.algorithm
plotbase.Save(fig, filename, opt)
filename = "Ratio_"+over+"_all__"+opt.algorithm
plotbase.Save(fig2, filename, opt)"""
# function for 2d grid plots
"""def twoD_all_grid(quantity, datamc, opt):
pt_thresholds = [12, 16, 20, 24, 28, 32, 36]
var_list = ['var_JetPt_%1.fto%1.f' % (s1, s2) for (s1, s2) in zip(pt_thresholds, [1000, 1000, 1000, 1000, 1000, 1000, 1000])]
var_list_2 = getroot.npvstrings(opt.npv)
fig = plt.figure(figsize=(10.*len(var_list), 7.*len(var_list_2)))
grid = AxesGrid(fig, 111,
nrows_ncols = (len(var_list), len(var_list_2)),
axes_pad = 0.4,
share_all=True,
label_mode = "L",
#aspect = True,
#cbar_pad = 0,
#cbar_location = "right",
#cbar_mode='single',
)
for n1, var1 in enumerate(var_list):
for n2, var2 in enumerate(var_list_2):
change = {'var':var1+"_"+var2}
index = len(var_list_2)*n1 + n2
change['incut']='allevents'
twoD(quantity, datamc, opt, changes=change, fig_axes = [fig, grid[index]], subplot = True, axtitle = change['var'].replace('var_', ''))
for grid_element, var_strings in zip(grid, opt.npv):
text = r"$%s\leq\mathrm{NPV}\leq%s$" % var_strings
grid_element.text(0.5, 5.5, text, ha='center', va='center', size ='40')
for grid_element, pt_threshold in zip(grid[::len(var_list_2)], pt_thresholds):
text = r"$p_\mathrm{T}^\mathrm{Jet1}$"+"\n"+r"$\geq%s\mathrm{GeV}$" % pt_threshold
grid_element.text(-8.7, 0, text, ha='left', va='center', size ='30')
#fig.suptitle("%s leading jet $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], opt.algorithm, opt.correction), size='50')
fig.suptitle("%s %s $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], quantity[7:-16], opt.algorithm, opt.correction), size='30')
file_name = "grid_"+opt.labels[0]+"_"+quantity +"_"+opt.algorithm + opt.correction
fig.set_figwidth(fig.get_figwidth() * 1.2)
plotbase.Save(fig, file_name, opt, crop=False, pad=1.5)"""
def Fall12(files, opt):
local_opt = copy.deepcopy(opt)
filelist = [
['/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root']
]
labellist = [['data_Summer12', 'MC_Summer12'], ['data_Fall12V1', 'MC_Fall12V1'], ['data_Fall12V4', 'MC_Fall12V4']]
over = 'zpt'
for over in ['zpt', 'npv', 'jet1eta']:
fig = plotbase.plt.figure(figsize=[21, 14])
fig.suptitle(opt.title, size='xx-large')
for typ, row in zip(['bal', 'mpf'], [0, 4]):
for filenames, labels, col in zip(filelist, labellist, [0, 1, 2]):
ax1 = plotbase.plt.subplot2grid((7, 3), (row, col), rowspan=2)
ax2 = plotbase.plt.subplot2grid((7, 3), (row + 2, col))
fig.add_axes(ax1)
fig.add_axes(ax2)
if over == 'jet1eta' and typ == 'bal':
legloc = 'upper right'
else:
legloc = 'lower left'
local_opt.labels = labels
files = []
for f in filenames:
files += [getroot.openfile(f, opt.verbose)]
plotresponse.responseplot(files, local_opt, [typ], over=over, figaxes=(fig, ax1), legloc=legloc, subplot=True)
plotresponse.ratioplot(files, local_opt, [typ], binborders=True, fit=True, over=over, subplot=True, figaxes=(fig, ax2), ratiosubplot=True)
fig.subplots_adjust(hspace=0.05)
ax1.set_xticks([])
ax1.set_xlabel("")
ax2.set_yticks([1.00, 0.95, 0.90])
if col > 0:
ax1.set_ylabel("")
ax2.set_ylabel("")
title = "" # " Jet Response ($p_T$ balance / MPF) vs. Z $p_T$, $N_{vtx}$ , Jet $\eta$ (" +opt.algorithm+" "+opt.correction+")"
fig.suptitle(title, size='x-large')
file_name = "comparison_ALL_" + over + opt.algorithm + opt.correction
plotbase.Save(fig, file_name, opt)
def factors(files, opt):
local_opt = copy.deepcopy(opt)
filelist = [
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root']
]
labellist = [
['Data FastJet V1', 'MC FastJet V1', 'Data Offset V1', 'MC Offset V1'],
['Data FastJet V4', 'MC FastJet V4', 'Data Offset V4', 'MC Offset V4']]
"""filelistt = [
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root']
]
labellistt = ['Data FastJet V1', 'Data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['Data Offset V1', 'Data Offset V4'], ['MC Offset V1','MC Offset V4'
]]
names = ['DataV1', 'MCV1', 'DataV4', 'MCV4' ]"""
files = []
#for sublist in filelist:
# rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist]
# files.append( rootfiles)
for sublist in filelist:
files.append([getroot.openfile(f, opt.verbose) for f in sublist])
fit = None
rebin = 1
# for files, labellist, name in zip(files, labellist, names)
fig, axes = plotbase.newPlot(subplots=2)
quantity = 'L1abs_npv'
local_opt.style = ['o', '*', 'o', '*']
local_opt.labels = labellist[0]
local_opt.colors = ['blue', 'blue', 'red', 'red']
plot1d.datamcplot(quantity, files[0], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit,
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labellist[1]
plot1d.datamcplot(quantity, files[1], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit,
rebin=rebin, subplot=True, subtext="")
file_name = "L1_comparison_" # +name
plotbase.Save(fig, file_name, opt)
def factors2(files, opt):
local_opt = copy.deepcopy(opt)
filelist = [
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4_L1Offset/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root']
]
labellistt = [['data FastJet V1', 'data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['data Offset V1', 'data Offset V4'], ['MC Offset V1', 'MC Offset V4']
]
names = ['dataV1', 'MCV1', 'dataV4', 'MCV4']
files = []
for sublist in filelist:
rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist]
files.append(rootfiles)
#print files
fit = 'chi2_linear'
rebin = 1
fit_offset = -0.1
for files, labellist, name in zip(files, labellistt, names):
print labellist
fig, axes = plotbase.newPlot(subplots=2)
quantity = 'L1abs_npv'
local_opt.style = ['o', '*', 'o', '*']
local_opt.labels = [labellist[0]]
local_opt.colors = ['blue', 'blue', 'red', 'red']
plot1d.datamcplot(quantity, [files[0]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit,
rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="")
local_opt.labels = [labellist[1]]
plot1d.datamcplot(quantity, [files[1]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit,
rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="")
file_name = "L1_comparison_" + name
plotbase.Save(fig, file_name, opt)
import ROOT
def allpu(files, opt, truth=True):
print files
settings = plotbase.getSettings(opt, quantity='npu')
#print settings
print settings['folder']
name = "_".join([settings['folder'], settings['algorithm'] + settings['correction']])
print name, files[1]
name = name.replace("Res", "")
t = files[1].Get(name)
if not t:
print "no tree", name, t.GetName()
exit(1)
# raw wei data weight
if truth:
histos = [getroot.getobject("pileup", files[2])]
else:
histos = [getroot.getobject("pileup;2", files[2])]
histos[-1].Rebin(10)
print histos[-1].GetNbinsX(), "pu2"
histos[0].SetTitle("Data")
histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)]
if truth:
histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)]
t.Project("mcraw", "nputruth")
else:
histos += [ROOT.TH1D("mcraw", "MC", 80, 0, 80)]
t.Project("mcraw", "npu")
if truth:
histos += [ROOT.TH1D("mcwei", "MC'", 1600, 0, 80)]
t.Project("mcwei", "nputruth", "weight")
else:
histos += [ROOT.TH1D("mcwei", "MC'", 80, 0, 80)]
t.Project("mcwei", "npu")
binning = [[0, 1, 2, 3.5, 5], range(45, 80)]
for h in histos:
if h.GetNbinsX() > 1000:
h.Rebin()
if h.GetNbinsX() > 82:
print h.GetNbinsX(), ">82! in", h.GetTitle()
if not truth:
break
print "rebin:", binning
b = binning
if histos.index(h) == 1:
b = binning + [range(5, 46)]
print b
for l in b:
for a, b in zip(l[:-1], l[1:]):
x1 = h.FindBin(a)
x2 = h.FindBin(b)
sumh = sum([h.GetBinContent(i) for i in range(x1, x2)]) / (x2 - x1)
for i in range(x1, x2):
h.SetBinContent(i, sumh)
if truth:
f = histos[1].Integral() / histos[1].Integral(histos[1].FindBin(8), histos[1].FindBin(40))
for i in range(3 + 0 * len(histos)):
#histos[i].Rebin(4)
print i
ff = f / histos[i].Integral(histos[i].FindBin(8), histos[i].FindBin(40))
ff = 1.0 / histos[i].Integral()
histos[i].Scale(ff)
histos += [histos[0].Clone("dataraw")]
histos[-1].SetTitle("Data/MC")
histos[-1].Divide(histos[1])
if len(files) > 3:
histos += [getroot.getobject("pileup", files[3])]
histos[-1].SetTitle("weight")
histos += [histos[2].Clone("rawmc")]
histos[-1].Divide(histos[1])
histos[-1].SetTitle("MC'/MC")
histos += [histos[0].Clone("datamc")]
histos[-1].Divide(histos[2])
histos[-1].SetTitle("Data/MC'")
plots = [getroot.root2histo(h) for h in histos]
fig, ax, ratio = plotbase.newPlot(ratio=True)
fig = plotbase.plt.figure(figsize=[7, 10])
ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax.number = 1
ratio = plotbase.plt.subplot2grid((3, 1), (2, 0))
ratio.number = 2
fig.add_axes(ax)
fig.add_axes(ratio)
fig.subplots_adjust(hspace=0.05)
colors = ['black', 'navy', 'red', 'green']
for p, c in zip(plots[:3], colors):
ax.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6)
colors[1] = 'gray'
for p, c in zip(plots[3:], colors):
r = ratio.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6)
plotbase.labels(ax, opt, settings, settings['subplot'])
plotbase.axislabels(ax, r"$n_\mathrm{PU}", settings['xynames'][1], settings=settings)
xaxistext = r"observed number of pile-up interactions $n_\mathrm{PU}$"
if truth:
xaxistext = xaxistext.replace("observed", "true")
plotbase.axislabels(ratio, xaxistext, "ratio", settings=settings)
print ratio.number, r
plotbase.setAxisLimits(ax, settings)
plotbase.labels(ratio, opt, settings, settings['subplot'])
plotbase.setAxisLimits(ratio, settings)
#handles, labels = ratio.get_legend_handles_labels()
ratio.legend(bbox_to_anchor=[0.8, 1], loc='upper center')
ax.set_xticklabels([])
ax.set_xlabel("")
settings['filename'] = plotbase.getDefaultFilename("npus", opt, settings)
plotbase.Save(fig, settings)
def pu(files, opt):
allpu(files, opt)
def puobserved(files, opt):
allpu(files, opt, False)
|
gpl-2.0
| 1,261,465,762,515,416,000 | 40.024311 | 257 | 0.540758 | false |
baccenfutter/cpassdb
|
cpassdb/protocols/client.py
|
1
|
19784
|
"""cpassdb - Client Protocol Classes"""
__author__ = "Brian Wiborg <baccenfutter@c-base.org>"
__license__ = "GNU/GPLv2"
import os
import sys
import json
import base64
import commands
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
class ClientProtocol(LineReceiver):
"""Abstract client protocol base-class
This class serves as an abstract base-class for all other cpassdb client
protocols. It provides common methods for all client protocols.
"""
# This variable holds the request structure.
request = {}
# In case the server encounters an error, that error will be saved into
# this instance attribute.
error = None
def connectionMade(self):
self.send_request(**self.request)
def dataReceived(self, data):
"""Callback function for incoming data.
This function should be called from ancestor functions as it respects
failure responses and handles them accordingly. Additionally, it will
load the received JSON string into a data structure and return that.
So there is always a benefit of calling this method.
:param data: str - The received line of data.
"""
if data.startswith('ERROR: '):
self.error = data
self.exit_code = 1
return self.terminate(data)
try:
return json.loads(data)
except ValueError:
print data
self.terminate("ERROR: Can not decode JSON.")
def connectionLost(self, reason=""):
"""Callback function for lost connections.
For cpassdb clients, a lost connection means that there is no more data
expected from the server, so the reactor should be stopped.
:param reason: str - An optional reason.
"""
if reactor.running:
reactor.stop()
def terminate(self, error=None):
"""Helper function for terminating a connection.
The provided error message is written to stderr.
:param error: str - An optional error message.
"""
if error is not None:
self.error = error
self.exit_code = 1
if error:
sys.stderr.write("{}\n".format(error))
self.transport.loseConnection()
def gracefully_disconnect(self):
"""Helper function for gracefully terminating a connection.
The gracefulness comes from sending a termination request to the server
and having the server terminate the connection.
"""
self.transport.write("{}\n".format(self.sign_request({"type": "BYE"})))
def sign_request(self, request):
"""Help function for request signing.
:param request: struct - Request structure.
:return: str - GPG CLear-Text Armor
"""
request_string = json.dumps(request)
status, armor = commands.getstatusoutput("echo '{}' | gpg --clearsign".format(request_string))
if status:
return self.terminate("ERROR: Can not load private key.")
return armor
def send_request(self, *args, **kwargs):
"""This method must be overloaded by derived classes."""
raise NotImplementedError("This method must be overloaded by derived classes!")
class MessyClientProtocol(ClientProtocol):
"""Abstract base-class for messy client protocols
A messy client protocol is one that leaves behind dirty secrets after
running. So this would be the case for operations such as adding a key to
or removing it from a recipient group. All secrets that are encrypted for
this particular recipient group does not match the current state any more;
it is still decryptable by the old keys or not decryptable by the new ones.
"""
# This dict can be used by ancestors as a small state-machine.
state_machine = {
'requested_dirty_secrets': False,
}
def build_request_get_dirty_secrets(self, dirty_secrets):
print "{} secret(s) need to be cycled, requesting.".format(len(dirty_secrets)),
return {
'type': 'GET',
'names': dirty_secrets,
}
def build_request_set_dirty_secrets(self, incoming_dirty_secrets):
request = {
'type': 'SET',
'secrets': [],
}
for secret_name in incoming_dirty_secrets:
secret = json.loads(
commands.getoutput(
"echo '{}' | gpg --decrypt 2>/dev/null".format(
incoming_dirty_secrets[secret_name]['armor']
)
)
)
ttl = incoming_dirty_secrets[secret_name]['metadata']['ttl']
recipients = incoming_dirty_secrets[secret_name]['metadata']['recipients']
secret_object = {
'name': secret_name,
'secret': secret,
'metadata': {
'ttl': ttl,
'recipients': recipients,
}
}
request['secrets'].append(secret_object)
print '.',
print
return request
def get_dirty_secrets(self, dirty_secrets):
request = self.build_request_get_dirty_secrets(dirty_secrets)
self.transport.write("{}\n".format(self.sign_request(request)))
def set_dirty_secrets(self, dirty_secrets):
request = self.build_request_set_dirty_secrets(dirty_secrets)
self.transport.write("{}\n".format(self.sign_request(request)))
def handle_dirty_secret_dialog(self, response):
if self.state_machine['requested_dirty_secrets'] is False:
self.get_dirty_secrets(response)
self.state_machine['requested_dirty_secrets'] = True
elif self.state_machine['requested_dirty_secrets'] is True:
self.set_dirty_secrets(response)
self.state_machine['requested_dirty_secrets'] = None
else:
print "Cycled {} secret(s).".format(len(response))
class SetSecret(ClientProtocol):
"""cpassdb client protocol class for setting a secret."""
def send_request(self, name, secret, ttl, recipients):
"""Send write request to server.
:param name: str - Name of the secret (incl. categories).
:param secret: struct - The secret data structure (usually a dict).
:param ttl: int - Time to live (in days past today).
:param recipients: list - List of all recipients.
"""
request = {
"type": "SET",
"secrets": [{
"name": name,
"secret": secret,
"metadata": {
"ttl": ttl,
"recipients": recipients,
}
}]
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
This protocol will return a list of written secrets upon successful
execution or an error message upon failure.
:param data: str - The incoming line of data.
"""
# Parse incoming data into response object.
response = ClientProtocol.dataReceived(self, data)
if response:
print "Written:", ' '.join(response)
self.gracefully_disconnect()
class GetSecret(ClientProtocol):
"""cpassdb client protocol class for getting secrets."""
# Set this variable to true if you would like the data to be outputted as
# a JSON string. The JSON output will always print the full secret object
# including all metadata.
as_json = False
# Set this value to the integer value of line indentation you desire for
# the JSON output. Defining this attribute only makes sense in combination
# with the as_json class attribute.
indent_json = None
# Set this list to the fields you wish the output the be limited to. Using
# this attribute only works on non-JSON output format.
filter_fields = []
# Store a sorted list of all requested secrets, so they can be outputted in
# the requested order.
requested_secrets = []
def send_request(self, names):
"""Send read request to server.
:param names: list - A list of secret-object names.
"""
request = {
"type": "GET",
"names": names,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def decrypt_secret_armor(self, gpg_armor):
"""Helper function for decrypting a GPG encrypted message armor.
:param gpg_armor: str - The GPG armor.
:return: struct - JSON-loaded secret data structure.
"""
return json.loads(
commands.getoutput(
"echo '{}' | gpg --decrypt 2>/dev/null".format(gpg_armor)
)
)
def dataReceived(self, data):
"""Callback function for incoming data.
This protocol will return a list of secret object data structures
containing all metadata in the following format:
[
{
'name': <secret-object-name>,
'secret': <secret-object-data-struct>,
'metadata': {
'ttl': <ttl>,
'recipients': [<recipient>, ...],
}
},
...
]
:param data: str - The line of incoming data.
"""
# Parse incoming data.
response = ClientProtocol.dataReceived(self, data)
if response:
if self.as_json:
print json.dumps([{
'name': secret_name,
'secret': self.decrypt_secret_armor(response[secret_name]['armor']),
'metadata': response[secret_name]['metadata'],
} for secret_name in response], indent=self.indent_json)
else:
for secret_name in response:
secret = self.decrypt_secret_armor(response[secret_name]['armor'])
if self.filter_fields:
for field in self.filter_fields:
if field in secret:
print secret[field]
else:
print '###', secret_name, '###'
for field in secret:
print field + ':', secret[field]
print
self.gracefully_disconnect()
class DelSecret(ClientProtocol):
"""cpassdb client protocol class for deleting a secret."""
def send_request(self, name):
"""Send delete request to server.
:param name: str - Name of secret to delete (incl. its category)
"""
request = {
"type": "DEL",
"name": name,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
This protocol will reply with a success string upon successful
execution.
:param data: str - The actual line of incoming data.
"""
response = ClientProtocol.dataReceived(self, data)
if response:
print response
self.gracefully_disconnect()
class ListSecrets(ClientProtocol):
"""cpassdb client protocol class for listing secrets in a given category."""
def send_request(self, category=None):
"""Send list request to server.
:param category: str - Name of category (default: None)
"""
request = {
"type": "LIST",
"path": category,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
This protocol will reply with a list of secret-object names upon
successful execution.
:param data: str - Actual line of incoming data.
"""
response = ClientProtocol.dataReceived(self, data)
if response:
if isinstance(response, dict):
for d in response['dirs']:
print d + os.path.sep
for secret in response['secrets']:
print secret
elif isinstance(response, list):
for secret in response:
print secret
else:
raise NotImplementedError
self.gracefully_disconnect()
class InstallRootKey(ClientProtocol):
"""cpassdb client protocol class for installing the root-key.
There is a special recipient group that - apart of being a regular
recipient group - mark all cpassdb admins. Admins are allowed to perform
key-management operations such as adding keys, deleting keys and
adding/removing keys from recipient groups.
The cpassdb admin group is always included as recipient to every secret-object,
meaning that cpassdb admins can always decrypt every secret. The admin group is
not stated inside the recipient field in the metadata if not explicitly
defined for that recipient group.
Due to this convention it is not possible to use the cpassdb server before at
least one key-id has been imported and added to the admin group. That's
what this protocol is for. It is really only used once per server, usually.
"""
def send_request(self, pubkey_armor):
"""Send root-key installation request to server.
:param pubkey_armor: str - The GPG armor of the public key.
"""
request = {
'pubkey_armor': pubkey_armor,
}
request_string = json.dumps(request)
self.transport.write("{}\n".format(request_string))
def dataReceived(self, data):
"""Callback function for incoming response data.
This protocol usually replies with a success string upon successful
execution.
:param data: str - Actual line of incoming data.
:return:
"""
response = ClientProtocol.dataReceived(self, data)
print response
self.gracefully_disconnect()
class AddKey(MessyClientProtocol):
"""cpassdb client protocol class for adding a key."""
def send_request(self, pubkey_armor, groups):
"""Send add-key request to server.
:param pubkey_armor: str - GPG armor of public key.
:param groups: list - List of group to add this key-id to.
"""
request = {
'type': 'ADDKEY',
'pubkey_armor': base64.b64encode(pubkey_armor),
'groups': groups,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
:param data: str - Actual line of incoming data.
"""
response = ClientProtocol.dataReceived(self, data)
if response:
self.handle_dirty_secret_dialog(response)
self.gracefully_disconnect()
class DelKey(MessyClientProtocol):
"""cpassdb client protocol class for deleting a key."""
def send_request(self, pubkey_id):
"""Send delete-key request to server.
:param pubkey_id: str - Key-id of key to delete.
"""
request = {
"type": "DELKEY",
"pubkey_id": pubkey_id,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
:param data: str - Actual line of incoming data.
:return:
"""
response = ClientProtocol.dataReceived(self, data)
if response:
self.handle_dirty_secret_dialog(response)
self.gracefully_disconnect()
class ListKeys(ClientProtocol):
"""cpassdb client protocol class for listing all keys in the keyring."""
def send_request(self, keyid_length=8):
"""Send list-keys request to server.
:param keyid_length: int - Length of the key-ids (common are 8 or 16).
"""
request = {
"type": "KEYS",
"keyid_length": keyid_length,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
:param data: str - Actual line of incoming data.
"""
response = ClientProtocol.dataReceived(self, data)
if response:
for key in response:
print key[0], ' '.join(key[1])
self.gracefully_disconnect()
class ListGroups(ClientProtocol):
"""cpassdb client protocol class for listing all recipient groups."""
def send_request(self, pubkey_id=None):
"""Send list-groups request to server.
If the request is supplied with key-id, only the groups of that key-id
will be returned.
:param pubkey_id: str - Optional key-id.
"""
request = {
"type": "GROUPS",
"pubkey_id": pubkey_id,
}
self.transport.write('{}\n'.format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming reponse data.
:param data: str - Actual line of incoming data.
"""
response = ClientProtocol.dataReceived(self, data)
if response:
for group in response:
print group + ':', ' '.join(response[group])
self.gracefully_disconnect()
class AddGroups(MessyClientProtocol):
"""cpassdb client protocol class for add a key-id to a list of groups.
When the members of a recipient group change, all secrets of that recipient
group must be re-encrypted. If the secrets where not re-encrypted they
would not be readable by the keys they should be in the current state of
the recipient group. This adds extra ping-pong complexity to this protocol
that the other client protocols don't have.
"""
def send_request(self, pubkey_id, groups):
"""Send add-group request to server.
:param pubkey_id: str - Key-id of concern.
:param groups: - List of groups to add this key-id to.
"""
request = {
'type': 'ADDGRP',
'pubkey_id': pubkey_id,
'groups': groups,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
:param data: str - Actual line of incoming data.
:return:
"""
response = ClientProtocol.dataReceived(self, data)
if response:
self.handle_dirty_secret_dialog(response)
self.gracefully_disconnect()
class DelGroups(AddGroups):
"""cpassdb client protocol class for deleting a key-id from a group."""
def send_request(self, pubkey_id, groups):
"""Send delete-group request to the server.
:param pubkey_id: str - Key-id of concern.
:param groups: - List of groups to free from the given key-id.
"""
request = {
'type': 'DELGRP',
'pubkey_id': pubkey_id,
'groups': groups,
}
self.transport.write("{}\n".format(self.sign_request(request)))
def dataReceived(self, data):
"""Callback function for incoming response data.
:param data: str - str - Actual line of incoming data.
"""
AddGroups.dataReceived(self, data)
|
gpl-2.0
| -7,313,434,773,447,821,000 | 31.863787 | 102 | 0.591336 | false |
nonapod/gzinflatekiller
|
gzinflatekiller.py
|
1
|
3114
|
#!/bin/env python
#:############################################
#: GZINFLATEKILLER
#: by Les Cordell
#:
#: Hunt through files containing a base64
#: GZInflate Command
#:
#: Written on 07/08/2013
#: last modified @ 07/08/2013
#:############################################
import sys, os, re
#: Extensions constant, these are the files that our program will check
EXTS = ['php']
#: Our Patterns constant contains all of our regular expressions that we want to check against and skip
PATTERNS = [re.compile("<\?php eval\(gzinflate\(base64_decode\(\'.*\'\)\)\);\?>"), re.compile('^\r\n')]
def gzInflateKill():
"""
#: The main function that is run, it checks through the argv arguements first,
#: it requires a directory enclosed in quotes.
"""
dirname = False
#: Check provided directory name
if (len(sys.argv) < 2):
print "You must provide a directory name enclosed in quotes to run this script.\n"
quit()
elif (len(sys.argv) > 2):
print "Too many arguements provided, you must provide a directory for this script to run"
quit()
elif (len(sys.argv) == 2):
#: Store the directory name
dirname = sys.argv[1]
else:
#: If there is an error return false
print "There was an error running this script, please check that you have specified a directory enclosed in quotes."
quit()
#: Open the directory
parseDir(dirname)
quit()
def parseDir(dirname):
"""
#: This is our directory parser, here we parse through every directory until we hit the last
#: feeding the files off to the cleanFile function
"""
if os.path.exists(dirname):
#: If our directory exists then we'll open it and return some files
#: Walk through the directory
for root, dirs, files in os.walk(dirname):
if files: #: If we get any files
for file in files: #: For each file in the list
if file.split('.')[-1] in EXTS: #: Get the extension
thisFile = os.path.join(root, file)
if os.path.isfile(thisFile):
print "cleaning: " + thisFile
cleanFile(thisFile)
if dirs: #: If we get any directories
for dir in dirs: #: For each directory in the list
parseDir(dir); #: Recursively run the function
def cleanFile(filename):
"""
#: Here we will strip the injection from the php file
"""
newFile = []
#: First open the file for reading and get our new file
with open(filename, 'r') as aFile:
for line in aFile.readlines():
#: For each line check if it matches the injection or the new line
if patternMatch(line):
pass
else: #: Append line to new file if no match
newFile.append(line)
aFile.close() #: close the file
#: Now we open the file for reading
if newFile:
newFile = ''.join(newFile) # : join our new file
with open(filename, 'w+b') as aFile:
aFile.write(newFile)
aFile.close()
def patternMatch(line):
"""
#: We pass lines into this function, check them against our PATTERNS constant
#: if we match any of them, we return a true, otherwise we return false
"""
for pattern in PATTERNS:
if pattern.match(line):
return True
return False
# BEGIN #
if __name__ == '__main__':
gzInflateKill();
|
mit
| -5,594,238,935,680,430,000 | 28.942308 | 118 | 0.659923 | false |
L1NT/django-training-log
|
log/models.py
|
1
|
5458
|
from django.db import models
# Create your models here.
class Sport(models.Model):
"""
don't use models.choices because we want the list to be transactional data
example list: [
'bike',
'run',
'swim',
'measurements',
'yoga',
'weights',
# for multi-sport `Event`s:
'multisport', #EventType.sport
'transition', #Entry.sport
]
"""
sport = models.CharField(max_length=20)
class Meta:
ordering = ['sport']
def __unicode__(self):
return self.sport
def __str__(self):
return self.sport
class Measurements(models.Model):
id = models.AutoField(primary_key=True) #added by default
weight = models.FloatField(blank=True, null=True)
class Equipment(models.Model):
"""
this is for things such as bikes, shoes, wheelsets; i.e. things with a
determinable depreciation cost or maintenance periods
"""
name = models.CharField(max_length=50)
cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2)
acquired_date = models.DateField()
disposal_date = models.DateField(blank=True, null=True)
disposal_method = models.CharField(blank=True, max_length=7, choices=[
('sold', 'sold'),
('donated', 'donated'),
('retired', 'retired'),# i.e. 'broken'
])
disposal_proceeds = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2)
expected_lifespan = models.DurationField(blank=True, null=True)
maintenance_interval = models.DurationField(blank=True, null=True)
def history(self):
return EquipmentMaintenance.objects.filter(equipment=self.id)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class EquipmentMaintenance(models.Model):
date = models.DateField()
description = models.CharField(max_length=250)
equipment = models.ForeignKey(Equipment)
cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2)
vendor = models.CharField(max_length=50, default='DIY')
class EventType(models.Model):
"""
examples: '5k', 'Olympic', 'Criterium'
"""
event_type = models.CharField(max_length=20)
sport = models.ForeignKey(Sport)
class Meta:
ordering = ['sport', 'event_type']
def __unicode__(self):
return str(self.sport) + ': ' + self.event_type
def __str__(self):
return str(self.sport) + ': ' + self.event_type
class Event(models.Model):
name = models.CharField(max_length=35)
location = models.CharField(max_length=50)
event_type = models.ForeignKey(EventType, blank=True, null=True)
bib_number = models.IntegerField(blank=True, null=True)
dnf = models.BooleanField()
finish_overall = models.IntegerField(blank=True, null=True)
finishers_overall = models.IntegerField(blank=True, null=True)
#maybe just use "handicapped" as the age group description??
finish_handicapped = models.IntegerField(blank=True, null=True)
finish_gender = models.IntegerField(blank=True, null=True)
finishers_gender = models.IntegerField(blank=True, null=True)
finish_age_group = models.IntegerField(blank=True, null=True)
finishers_age_group = models.IntegerField(blank=True, null=True)
# category/age_group seem to be mutually-exclusive?
category = models.CharField(max_length=10, blank=True, null=True)
age_group = models.CharField(max_length=10, blank=True)
results_url = models.URLField(blank=True, null=True)
official_time = models.TimeField(blank=True, null=True) #used for total event time (brevets & triathlons)
## TODO: maybe this should be handled by multiple `Entry`s?
# swim_distance = models.FloatField(blank=True)
# bike_distance = models.FloatField(blank=True)
# run_distance = models.FloatField(blank=True)
# swim_time = models.TimeField(blank=True)
# bike_time = models.TimeField(blank=True)
# run_time = models.TimeField(blank=True)
# t1_time = models.TimeField(blank=True)
# t2_time = models.TimeField(blank=True)
def get_absolute_url(self):
return "/events?event=%d" % self.id
def __unicode__(self):
return self.name + ' ['+self.date.strftime('%b %d, %Y')+']'
def __str__(self):
return self.name + ' ['+self.date.strftime('%b %d, %Y')+']'
class Entry(models.Model):
#entry_id:
date = models.DateField()
sport = models.ForeignKey(Sport)
event = models.ForeignKey(Event, blank=True, null=True)
route = models.CharField(max_length=50, blank=True) # routes Model?
notes = models.CharField(max_length=256, blank=True)
equipment = models.ForeignKey(Equipment, blank=True, null=True)
distance = models.FloatField(blank=True, null=True)
time = models.TimeField(blank=True, null=True)
avg_speed = models.FloatField(blank=True, null=True)
max_speed = models.FloatField(blank=True, null=True)
elevation_gain = models.IntegerField(blank=True, null=True)
calories = models.IntegerField(blank=True, null=True)
#pace: models.TimeField(blank=True, default=calc_pace(self.time/self.distance)) #could be calculated...
class Meta:
ordering = ['date', 'id']
def __unicode__(self):
return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']'
def __str__(self):
return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']'
|
gpl-2.0
| 1,526,296,373,929,901,000 | 35.878378 | 109 | 0.662147 | false |
steven-murray/pydftools
|
setup.py
|
1
|
2408
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import io
import os
import re
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"scipy",
"numpy>=1.6.2",
"Click>=6.0",
"attrs>=17.0",
"cached_property",
"chainconsumer",
"matplotlib"
# TODO: put package requirements here
]
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8"),
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup_requirements = [
"pytest-runner",
# TODO(steven-murray): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
"pytest",
# TODO: put package test requirements here
]
setup(
name="pydftools",
version=find_version("pydftools", "__init__.py"),
description="A pure-python port of the dftools R package.",
long_description=readme + "\n\n" + history,
author="Steven Murray",
author_email="steven.murray@curtin.edu.au",
url="https://github.com/steven-murray/pydftools",
packages=find_packages(include=["pydftools"]),
entry_points={"console_scripts": ["pydftools=pydftools.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="pydftools",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
mit
| 207,462,107,332,436,930 | 27 | 88 | 0.622924 | false |
chaosk/trinitee
|
trinitee/forums/migrations/0001_initial.py
|
1
|
14851
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('forums_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Category'], null=True, blank=True)),
('ordering', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal('forums', ['Category'])
# Adding model 'Topic'
db.create_table('forums_topic', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_topics', to=orm['auth.User'])),
('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='modified_topics', null=True, to=orm['auth.User'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Category'])),
('is_closed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_sticky', self.gf('django.db.models.fields.BooleanField')(default=False)),
('first_post', self.gf('django.db.models.fields.related.OneToOneField')(related_name='topic_root', unique=True, to=orm['forums.Post'])),
))
db.send_create_signal('forums', ['Topic'])
# Adding model 'Post'
db.create_table('forums_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Topic'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_posts', to=orm['auth.User'])),
('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='modified_posts', null=True, to=orm['auth.User'])),
('show_edits', self.gf('django.db.models.fields.BooleanField')(default=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('content_html', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('forums', ['Post'])
# Adding model 'PostKarma'
db.create_table('forums_postkarma', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Post'])),
('karma', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('forums', ['PostKarma'])
# Adding unique constraint on 'PostKarma', fields ['user', 'post']
db.create_unique('forums_postkarma', ['user_id', 'post_id'])
# Adding model 'Poll'
db.create_table('forums_poll', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='polls_started', to=orm['auth.User'])),
('expires_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('question', self.gf('django.db.models.fields.CharField')(max_length=255)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Topic'])),
('max_votes', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
))
db.send_create_signal('forums', ['Poll'])
# Adding model 'Choice'
db.create_table('forums_choice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Poll'])),
('choice', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('forums', ['Choice'])
# Adding model 'Vote'
db.create_table('forums_vote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Poll'])),
('choice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Choice'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('forums', ['Vote'])
# Adding unique constraint on 'Vote', fields ['poll', 'choice', 'user']
db.create_unique('forums_vote', ['poll_id', 'choice_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'Vote', fields ['poll', 'choice', 'user']
db.delete_unique('forums_vote', ['poll_id', 'choice_id', 'user_id'])
# Removing unique constraint on 'PostKarma', fields ['user', 'post']
db.delete_unique('forums_postkarma', ['user_id', 'post_id'])
# Deleting model 'Category'
db.delete_table('forums_category')
# Deleting model 'Topic'
db.delete_table('forums_topic')
# Deleting model 'Post'
db.delete_table('forums_post')
# Deleting model 'PostKarma'
db.delete_table('forums_postkarma')
# Deleting model 'Poll'
db.delete_table('forums_poll')
# Deleting model 'Choice'
db.delete_table('forums_choice')
# Deleting model 'Vote'
db.delete_table('forums_vote')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forums.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Category']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forums.choice': {
'Meta': {'object_name': 'Choice'},
'choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Poll']"})
},
'forums.poll': {
'Meta': {'object_name': 'Poll'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls_started'", 'to': "orm['auth.User']"}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Topic']"})
},
'forums.post': {
'Meta': {'object_name': 'Post'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_html': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_posts'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'show_edits': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Topic']"})
},
'forums.postkarma': {
'Meta': {'unique_together': "(('user', 'post'),)", 'object_name': 'PostKarma'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Post']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forums.topic': {
'Meta': {'object_name': 'Topic'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Category']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topics'", 'to': "orm['auth.User']"}),
'first_post': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'topic_root'", 'unique': 'True', 'to': "orm['forums.Post']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_topics'", 'null': 'True', 'to': "orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forums.vote': {
'Meta': {'unique_together': "(('poll', 'choice', 'user'),)", 'object_name': 'Vote'},
'choice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Choice']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Poll']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['forums']
|
bsd-3-clause
| -7,617,905,021,076,581,000 | 64.422907 | 182 | 0.570265 | false |
pgroudas/pants
|
src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile_strategy.py
|
1
|
8647
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot, get_scm
from pants.base.exceptions import TaskError
from pants.util.dirutil import safe_delete
class JvmCompileStrategy(object):
"""An abstract base strategy for JVM compilation."""
__metaclass__ = ABCMeta
class CompileContext(object):
"""A context for the compilation of a target.
This can be used to differentiate between a partially completed compile in a temporary location
and a finalized compile in its permanent location.
"""
def __init__(self, target, analysis_file, classes_dir, sources):
self.target = target
self.analysis_file = analysis_file
self.classes_dir = classes_dir
self.sources = sources
@property
def _id(self):
return (self.target, self.analysis_file, self.classes_dir)
def __eq__(self, other):
return self._id == other._id
def __hash__(self):
return hash(self._id)
# Common code.
# ------------
@staticmethod
def _analysis_for_target(analysis_dir, target):
return os.path.join(analysis_dir, target.id + '.analysis')
@staticmethod
def _portable_analysis_for_target(analysis_dir, target):
return JvmCompileStrategy._analysis_for_target(analysis_dir, target) + '.portable'
@classmethod
@abstractmethod
def register_options(cls, register, language, supports_concurrent_execution):
"""Registration for strategy-specific options.
The abstract base class does not register any options itself: those are left to JvmCompile.
"""
pass
def __init__(self, context, options, workdir, analysis_tools, language, sources_predicate):
self._language = language
self.context = context
self._analysis_tools = analysis_tools
# Mapping of relevant (as selected by the predicate) sources by target.
self._sources_by_target = None
self._sources_predicate = sources_predicate
# The ivy confs for which we're building.
self._confs = options.confs
self._clear_invalid_analysis = options.clear_invalid_analysis
@abstractmethod
def name(self):
"""A readable, unique name for this strategy."""
pass
@abstractmethod
def invalidation_hints(self, relevant_targets):
"""A tuple of partition_size_hint and locally_changed targets for the given inputs."""
pass
@abstractmethod
def compile_context(self, target):
"""Returns the default/stable compile context for the given target.
Temporary compile contexts are private to the strategy.
"""
pass
@abstractmethod
def compute_classes_by_source(self, compile_contexts):
"""Compute a map of (context->(src->classes)) for the given compile_contexts.
It's possible (although unfortunate) for multiple targets to own the same sources, hence
the top level division. Srcs are relative to buildroot. Classes are absolute paths.
"""
pass
@abstractmethod
def compile_chunk(self,
invalidation_check,
all_targets,
relevant_targets,
invalid_targets,
extra_compile_time_classpath_elements,
compile_vts,
register_vts,
update_artifact_cache_vts_work):
"""Executes compilations for that invalid targets contained in a single language chunk."""
pass
@abstractmethod
def post_process_cached_vts(self, cached_vts):
"""Post processes VTS that have been fetched from the cache."""
pass
@abstractmethod
def compute_resource_mapping(self, compile_contexts):
"""Computes a merged ResourceMapping for the given compile contexts.
Since classes should live in exactly one context, a merged mapping is unambiguous.
"""
pass
def pre_compile(self):
"""Executed once before any compiles."""
pass
def validate_analysis(self, path):
"""Throws a TaskError for invalid analysis files."""
try:
self._analysis_parser.validate_analysis(path)
except Exception as e:
if self._clear_invalid_analysis:
self.context.log.warn("Invalid analysis detected at path {} ... pants will remove these "
"automatically, but\nyou may experience spurious warnings until "
"clean-all is executed.\n{}".format(path, e))
safe_delete(path)
else:
raise TaskError("An internal build directory contains invalid/mismatched analysis: please "
"run `clean-all` if your tools versions changed recently:\n{}".format(e))
def prepare_compile(self, cache_manager, all_targets, relevant_targets):
"""Prepares to compile the given set of targets.
Has the side effects of pruning old analysis, and computing deleted sources.
"""
# Target -> sources (relative to buildroot).
# TODO(benjy): Should sources_by_target be available in all Tasks?
self._sources_by_target = self._compute_sources_by_target(relevant_targets)
def class_name_for_class_file(self, compile_context, class_file_name):
assert class_file_name.endswith(".class")
assert class_file_name.startswith(compile_context.classes_dir)
class_file_name = class_file_name[len(compile_context.classes_dir) + 1:-len(".class")]
return class_file_name.replace("/", ".")
def _compute_sources_by_target(self, targets):
"""Computes and returns a map target->sources (relative to buildroot)."""
def resolve_target_sources(target_sources):
resolved_sources = []
for target in target_sources:
if target.has_sources():
resolved_sources.extend(target.sources_relative_to_buildroot())
return resolved_sources
def calculate_sources(target):
sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)]
# TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets.
if hasattr(target, 'java_sources') and target.java_sources:
sources.extend(resolve_target_sources(target.java_sources))
return sources
return {t: calculate_sources(t) for t in targets}
def _sources_for_targets(self, targets):
"""Returns a cached map of target->sources for the specified targets."""
if self._sources_by_target is None:
raise TaskError('self._sources_by_target not computed yet.')
return {t: self._sources_by_target.get(t, []) for t in targets}
def _sources_for_target(self, target):
"""Returns the cached sources for the given target."""
if self._sources_by_target is None:
raise TaskError('self._sources_by_target not computed yet.')
return self._sources_by_target.get(target, [])
def _find_locally_changed_targets(self, sources_by_target):
"""Finds the targets whose sources have been modified locally.
Returns a list of targets, or None if no SCM is available.
"""
# Compute the src->targets mapping. There should only be one target per source,
# but that's not yet a hard requirement, so the value is a list of targets.
# TODO(benjy): Might this inverse mapping be needed elsewhere too?
targets_by_source = defaultdict(list)
for tgt, srcs in sources_by_target.items():
for src in srcs:
targets_by_source[src].append(tgt)
ret = OrderedSet()
scm = get_scm()
if not scm:
return None
changed_files = scm.changed_files(include_untracked=True, relative_to=get_buildroot())
for f in changed_files:
ret.update(targets_by_source.get(f, []))
return list(ret)
@property
def _analysis_parser(self):
return self._analysis_tools.parser
# Compute any extra compile-time-only classpath elements.
# TODO(benjy): Model compile-time vs. runtime classpaths more explicitly.
# TODO(benjy): Add a pre-execute goal for injecting deps into targets, so e.g.,
# we can inject a dep on the scala runtime library and still have it ivy-resolve.
def _compute_extra_classpath(self, extra_compile_time_classpath_elements):
def extra_compile_classpath_iter():
for conf in self._confs:
for jar in extra_compile_time_classpath_elements:
yield (conf, jar)
return list(extra_compile_classpath_iter())
|
apache-2.0
| -8,624,641,274,634,726,000 | 36.925439 | 99 | 0.686481 | false |
ruhan/django-silk-mongoengine
|
silk/profiling/profiler.py
|
1
|
6695
|
import inspect
import logging
import time
import traceback
from django.conf import settings
from django.utils import timezone
import six
from silk.collector import DataCollector
from silk.config import SilkyConfig
from silk.models import _time_taken
Logger = logging.getLogger('silk')
# noinspection PyPep8Naming
class silk_meta_profiler(object):
"""Used in the profiling of Silk itself."""
def __init__(self):
super(silk_meta_profiler, self).__init__()
self.start_time = None
@property
def _should_meta_profile(self):
return SilkyConfig().SILKY_META
def __enter__(self):
if self._should_meta_profile:
self.start_time = timezone.now()
def __exit__(self, exc_type, exc_val, exc_tb):
if self._should_meta_profile:
end_time = timezone.now()
exception_raised = exc_type is not None
if exception_raised:
Logger.error('Exception when performing meta profiling, dumping trace below')
traceback.print_exception(exc_type, exc_val, exc_tb)
request = getattr(DataCollector().local, 'request', None)
if request:
curr = request.meta_time or 0
request.meta_time = curr + _time_taken(self.start_time, end_time)
def __call__(self, target):
if self._should_meta_profile:
def wrapped_target(*args, **kwargs):
request = DataCollector().request
if request:
start_time = timezone.now()
result = target(*args, **kwargs)
end_time = timezone.now()
curr = request.meta_time or 0
request.meta_time = curr + _time_taken(start_time, end_time)
else:
result = target(*args, **kwargs)
return result
return wrapped_target
return target
# noinspection PyPep8Naming
class silk_profile(object):
def __init__(self, name=None, _dynamic=False):
super(silk_profile, self).__init__()
self.name = name
self.profile = None
self._queries_before = None
self._queries_after = None
self._dynamic = _dynamic
def _query_identifiers_from_collector(self):
return [x for x in DataCollector().queries]
def _start_queries(self):
"""record queries that have been executed before profiling began"""
self._queries_before = self._query_identifiers_from_collector()
def _end_queries(self):
"""record queries that have been executed after profiling has finished"""
self._queries_after = self._query_identifiers_from_collector()
def __enter__(self):
if self._silk_installed() and self._should_profile():
with silk_meta_profiler():
self._start_queries()
if not self.name:
raise ValueError('silk_profile used as a context manager must have a name')
frame = inspect.currentframe()
frames = inspect.getouterframes(frame)
outer_frame = frames[1]
path = outer_frame[1]
line_num = outer_frame[2]
request = DataCollector().request
self.profile = {
'name': self.name,
'file_path': path,
'line_num': line_num,
'dynamic': self._dynamic,
'request': request,
'start_time': timezone.now(),
}
else:
Logger.warn('Cannot execute silk_profile as silk is not installed correctly.')
def _finalise_queries(self):
collector = DataCollector()
self._end_queries()
assert self.profile, 'no profile was created'
diff = set(self._queries_after).difference(set(self._queries_before))
self.profile['queries'] = diff
collector.register_profile(self.profile)
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
if self._silk_installed() and self._should_profile():
with silk_meta_profiler():
start_time = None
exception_raised = exc_type is not None
self.profile['exception_raised'] = exception_raised
self.profile['end_time'] = timezone.now()
self._finalise_queries()
def _silk_installed(self):
app_installed = 'silk' in settings.INSTALLED_APPS
middleware_installed = 'silk.middleware.SilkyMiddleware' in settings.MIDDLEWARE_CLASSES
return app_installed and middleware_installed
def _should_profile(self):
return DataCollector().request is not None
def __call__(self, target):
if self._silk_installed():
def wrapped_target(*args, **kwargs):
with silk_meta_profiler():
try:
func_code = six.get_function_code(target)
except AttributeError:
raise NotImplementedError('Profile not implemented to decorate type %s' % target.__class__.__name__)
line_num = func_code.co_firstlineno
file_path = func_code.co_filename
func_name = target.__name__
if not self.name:
self.name = func_name
self.profile = {
'func_name': func_name,
'name': self.name,
'file_path': file_path,
'line_num': line_num,
'dynamic': self._dynamic,
'start_time': timezone.now(),
'request': DataCollector().request
}
self._start_queries()
try:
result = target(*args, **kwargs)
except Exception:
self.profile['exception_raised'] = True
raise
finally:
with silk_meta_profiler():
self.profile['end_time'] = timezone.now()
self._finalise_queries()
return result
return wrapped_target
else:
Logger.warn('Cannot execute silk_profile as silk is not installed correctly.')
return target
def distinct_queries(self):
queries = [x for x in self._queries_after if not x in self._queries_before]
return queries
@silk_profile()
def blah():
time.sleep(1)
if __name__ == '__main__':
blah()
|
mit
| -1,680,985,588,628,297,200 | 35.391304 | 124 | 0.542644 | false |
sandz-in/twilio_trello
|
twilio_sms_handler/views.py
|
1
|
1174
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from twilio.twiml.messaging_response import MessagingResponse
# Create your views here.
from twilio_sms_handler.TrelloQuery import TrelloQuery
from twilio_trello.twilio_util import validate_twilio_request
from django.views.decorators.csrf import csrf_exempt
@require_POST
@validate_twilio_request
@csrf_exempt
def sms_view(request):
"""Twilio Messaging URL - receives incoming messages from Twilio"""
# Create a new TwiML response
resp = MessagingResponse()
# <Message> a text back to the person who texted us
text = request.POST['Body']
split_text = text.lower().split(" ")
if len(split_text) < 2:
body = '''1)get boards
2)get lists <board-no>
3)get cards <board-no:list-no>
'''
else:
trello_query = TrelloQuery()
action = '_'.join(split_text[:2])
try:
body = getattr(trello_query, action)(split_text[2:])
except:
body = "Incorrect input!!"
resp.message(body)
# Return the TwiML
return HttpResponse(resp)
|
mit
| 3,368,238,826,495,782,000 | 29.102564 | 71 | 0.683986 | false |
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKeztranslationsWordpressCom.py
|
1
|
1210
|
def extractKeztranslationsWordpressCom(item):
'''
Parser for 'keztranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('FOD', 'Quickly Wear the Face of the Devil', 'translated'),
('ABO', 'ABO Cadets', 'translated'),
('dfc', 'The First Dragon Convention', 'translated'),
('ogu', 'My Family’s Omega Has Just Grown Up', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('ABO Vol', 'ABO Cadets', 'translated'),
('FOD Chapter', 'Quickly Wear the Face of the Devil', 'translated'),
('FOD Chap', 'Quickly Wear the Face of the Devil', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
bsd-3-clause
| -7,868,676,419,566,919,000 | 36.78125 | 104 | 0.620861 | false |
dvida/UWO-PA-Python-Course
|
Lecture 3/L3_lecture.py
|
1
|
5075
|
from __future__ import print_function
### READING FILES
file_name = 'data.txt'
# Reading in and parsing file contents
data_list = []
with open(file_name) as f:
# SKip the header (the first line)
next(f)
for line in f:
# Remove newline char
line = line.replace('\n', '')
# Split the line into a list by a comma
line = line.split(',')
# Parse the line
num = line[0]
name = line[1].strip()
epoch = int(line[2])
elements = list(map(float, line[3:9]))
ref = line[9]
# Add the line to the data list
data_list.append([num, name, epoch, elements, ref])
print(num, name, epoch, elements, ref)
###################################
print(data_list)
# Wile E. Coyote rewrites history...
for line in data_list:
line[1] = 'Coyote'
print(data_list)
# But before we write the data back to disk...
###################################
### STRING FORMATTING
### Note for the lecture:
### C/P and explain how formatting works
# Converting floats to strings
x = 3.14159
print('{:4.2f}'.format(x))
# Signed formatting
print('{:+5.2f}'.format(x))
# Zero padding
print('{:06.2f}'.format(x))
# More decimals
print('{:7.5f}'.format(x))
# More decimal places than the number precision
y = 2.71
print('{:7.5f}'.format(y))
# Less decimal precision, but same size -> left padding
print('{:7.2f}'.format(y))
# Integers (same singed and zero padding rules)
z = 42
print('{:7d}'.format(z))
# Strings
print('{:10}'.format('wile e'))
# Align to the right
print('{:>10}'.format('wile e'))
# Named agruments
print("{a} {b} {c}".format(a=5, b=8, c=10))
###################################
### WRITING FILES
# Writing the data back to the list
new_file_name = 'true_data.txt'
# Open a file for writing (if a file with the same name exists, it will erase its content!)
with open(new_file_name, 'w') as f:
# Write the header
f.write('Num,Name,Epoch,q,e,i,w,Node,Tp,Ref\n')
for line in data_list:
# Composing a string
str_line = ['{:>3}'.format(line[0]), line[1], '{:5d}'.format(line[2])]
# Convert all elemets using the same format
for element in line[3]:
str_line.append('{:.3f}'.format(element))
# Add the reference
str_line.append(line[-1])
print(str_line)
# Convert the list to a comma delimited string
final_line = ','.join(str_line)
# Write the line
f.write(final_line+'\n')
###################################
# Appending to a file
with open(new_file_name, 'a') as f:
f.write('Wile E. was here')
###################################
### PYTHON MODULES
# Python standard library: https://docs.python.org/3/library/
import math
# Sqrt
print(math.sqrt(2))
# Sine
print(math.sin(math.pi))
# Log10
print(math.log10(100))
# Random module
import random
# Random integer in the 1 to 100 range
print(random.randint(1, 100))
# Random float in the 0 to 1 range
print(random.random())
# Shuffle a list
a = [1, 2, 3, 4, 5]
random.shuffle(a)
print(a)
# Sample 10 elements from a list
b = range(1, 100)
print(random.sample(b, 10))
# Sampling a gaussian distribution
for i in range(10):
print(random.gauss(0, 2))
###################################
### Ways of importing modules
# Module alias
import math as m
print(m.sqrt(2))
# Importing individual functions - PREFERED!
from math import sqrt
print(sqrt(2))
# Importing all functions from a module - NOT RECOMMENDED!
from math import *
print(sqrt(2))
print(pi)
###################################
# FILE HANDLING - os library
import os
# Listing the contents of the current directory
print(os.listdir('.'))
# Printing the current directory
print(os.getcwd())
# Changing the current directory one up
os.chdir('..')
print(os.getcwd())
# Directory separator
# DO NOT USE / or \
print(os.sep)
### Making a new directory
# Construct a new path to the directory
new_dir_path = os.path.join(os.getcwd(), 'test')
print(new_dir_path)
# Make new dir if the dir does not exist
if not os.path.exists(new_dir_path):
os.mkdir(new_dir_path)
else:
print('The directory already exists!')
###
# Make an example file in the new directory
file_name = 'top_secret.txt'
file_path = os.path.join(new_dir_path, file_name)
with open(file_path, 'w') as f:
pass
# Delete the file
if os.path.isfile(file_path):
os.remove(file_path)
else:
print('The file does not exist!')
###################################
# FILE HANDLING - shutil library
import shutil
# Make an example file
with open(file_path, 'w') as f:
pass
# Copying files
copy_path = 'unclassified.txt'
shutil.copy2(file_path, copy_path)
# Moving/renaming files
new_name = 'public_release.txt'
shutil.move(copy_path, new_name)
|
mit
| -3,814,201,120,210,156,500 | 17.909804 | 91 | 0.575369 | false |
terna/SLAPP3
|
6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/$$slapp$$/txtxFunctions.py
|
1
|
1888
|
import os
def executeFormula(fIn, fOu, nrow, n, s):
# v=0 #init. not required; it can interfere with the try/except structure
pos = s.find("v")
if pos == -1:
print("missing 'v' in formula, row", nrow,
"\nexecution stopped in error")
fIn.close()
fOu.close()
os.sys.exit(1)
pos = s.find("=")
if pos == -1:
print("missing '=' in formula, row", nrow,
"\nexecution stopped in error")
fIn.close()
fOu.close()
os.sys.exit(1)
try:
while s[0] == ' ':
if s[0] == ' ':
s = s[1:]
pos = s.find('\n') # eliminating spaces after \n (formerly #) if any
if pos != -1:
while s[pos + 1] == ' ':
s = s[:pos + 1] + s[pos + 2:]
# print "[",n, s,"]",
d = dict([('n', n), ('v', 0)])
exec(s, d)
v = d['v']
return str(v)
except BaseException:
print("error in formula, row", nrow, "\nexecution stopped in error")
fIn.close()
fOu.close()
os.sys.exit(1)
def fill(s):
s = list(s)
if s == "":
return s
change = False
s = list(s)
for i in range(len(s)):
if s[i] == '&':
if not change:
change = True
else:
change = False
if s[i] == ' ' and change:
s[i] = '&'
return "".join(s)
def splitUnfill(s):
if s == "":
return s
# print s
s = s.split()
# print s
for i in range(len(s)):
s_tmp = list(s[i])
# print s_tmp, len(s_tmp)
for j in range(len(s_tmp)):
if s_tmp[j] == "&":
s_tmp[j] = ' '
if s_tmp[j] == "#":
s_tmp[j] = '\n' # inserting \n sign
# print s_tmp
s[i] = "".join(s_tmp)
return s
|
cc0-1.0
| 2,214,278,890,246,831,600 | 21.211765 | 77 | 0.417373 | false |
xyos/horarios
|
horarios/migrations/0004_auto__chg_field_subject_name.py
|
1
|
2406
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Subject.name'
db.alter_column(u'horarios_subject', 'name', self.gf('django.db.models.fields.CharField')(max_length=200))
def backwards(self, orm):
# Changing field 'Subject.name'
db.alter_column(u'horarios_subject', 'name', self.gf('django.db.models.fields.CharField')(max_length=140))
models = {
u'horarios.group': {
'Meta': {'object_name': 'Group'},
'code': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['horarios.Profession']", 'symmetrical': 'False'}),
'schedule': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['horarios.Subject']"}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['horarios.Teacher']"})
},
u'horarios.profession': {
'Meta': {'object_name': 'Profession'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'horarios.subject': {
'Meta': {'object_name': 'Subject'},
'code': ('django.db.models.fields.IntegerField', [], {}),
'credits': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stype': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'horarios.teacher': {
'Meta': {'object_name': 'Teacher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['horarios']
|
mit
| -5,984,127,859,106,971,000 | 47.14 | 146 | 0.566916 | false |
attdona/NAIS
|
pynais/msg.py
|
1
|
2142
|
import struct
import pynais as ns
class Profile:
def __init__(self, uid=None, pwd=None):
self.uid = uid
self.pwd = pwd
def __str__(self):
return "Profile uid: [%s], pwd: [%s]" % (self.uid, self.pwd)
def set_protobuf(self, obj):
obj.uid = self.uid
obj.pwd = self.pwd
def build_from_protobuf(self, obj):
self.uid = obj.uid
self.pwd = obj.pwd
return self
class Config:
""" board configuration items and connection parameters
"""
def __init__(self, network="local", board="", host='localhost', port=1883,
alive_period=None, secure=False):
self.network = network
self.board = board
self.host = host
self.port = port
self.alive_period = alive_period
self.secure = secure
def __str__(self):
return "Config network: [%s], board: [%s], remote: [%s:%d]" % (
self.network, self.board, self.host, self.port)
def set_protobuf(self, obj):
obj.network = self.network
obj.board = self.board
obj.host = self.host
obj.port = self.port
if self.alive_period:
obj.alive_period = self.alive_period
obj.secure = self.secure
def build_from_protobuf(self, obj):
self.network = obj.network
self.host = obj.host
self.board = obj.board
self.port = obj.port
self.alive_period = obj.alive_period
self.secure = obj.secure
return self
class Ack:
""" a message acknowledgement
Args:
id (int): message request identifier (packet.id field value)
"""
def __init__(self, id=None, sts=None):
self.id = id
self.sts = sts
def __str__(self):
return "Ack ([%s] - sts:[%s])" % (ns.msg_type(self.id), self.sts)
def set_protobuf(self, obj):
obj.id = self.id
if not self.sts==None:
obj.status = self.sts
def build_from_protobuf(self, obj):
self.id = obj.id
if (obj.HasField('status')):
self.sts = obj.status
return self
|
gpl-3.0
| -5,864,915,119,413,480,000 | 25.775 | 78 | 0.548086 | false |
liuslevis/handwrite_dataset_generator
|
3_gen_digit_data_label.py
|
1
|
2918
|
import os
import shutil
DEBUG = False
img_suffix = ['.jpeg','.jpg','.png','.tiff']
def gen_img_unique_file_name(count,total):
assert(count<=total)
name = ''
for i in range(len(str(total)) - len(str(count))):
name+='0'
offset = str(count)
name+=offset
return name
def copy_img_file(src_path,save_data_path,filename):
if DEBUG: print src_path,filename
print src_path,filename
if not os.path.isdir(save_data_path):
os.mkdir(save_data_path)
shutil.copyfile(src_path, os.path.join(save_data_path, filename))
def count_img_under_dir(path):
count = 0
for label_paths in os.listdir(path):
label_path = os.path.join(path, label_paths)
if os.path.isdir(label_path) and label_path[-1] >= '0' and label_path[-1] <= '9' :
label = int(label_path[-1])
assert( label >= 0 and label <=9)
for digit_img in os.listdir(label_path):
count+=1
return count
def gen_label_file(dict,save_label_path):
label_list = []
for label in dict.keys():
times = dict.get(label)
print 'digit:',label,' has ',times,' imgs'
label_list+=[label for i in range(times)]
content = ''
for label in label_list:
content += str(label) + '\n'
with open(save_label_path,'w') as f:
f.write(content);
f.close()
print 'gen_label_file:',save_label_path
def main():
save_label_path = './4_dataset/testLabel.txt'
save_data_path = './4_dataset/'
rootDir ='./3_cropped'
dict = {} # store num of each digit labels
total = count_img_under_dir(rootDir)
uid = 0
suffix = ""
print 'total_img:',total
for label_paths in os.listdir(rootDir):
label_path = os.path.join(rootDir, label_paths)
if os.path.isdir(label_path) and label_path[-1] >= '0' and label_path[-1] <= '9' :
label = int(label_path[-1])
if DEBUG: print '--------------label:%d--------------'%label
assert( label >= 0 and label <=9)
for img_path in os.listdir(label_path):
if DEBUG: print img_path
if suffix not in img_suffix:
(filepath,filename)=os.path.split(img_path)
suffix = os.path.splitext(filename)[-1]
if suffix in img_suffix:
count = dict.get(label)
if None == count:
dict.update({label:1})
else:
count += 1
dict.update({label:count})
uid+=1
save_name = gen_img_unique_file_name(uid,total)
copy_img_file(os.path.join(label_path,img_path),save_data_path, save_name+suffix )
print 'database'
gen_label_file(dict,save_label_path)
if __name__ == '__main__':
main()
|
mit
| 667,820,483,972,718,700 | 29.726316 | 102 | 0.536326 | false |
nrz/ylikuutio
|
external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py
|
2
|
2810
|
"""Randomize the minitaur_gym_alternating_leg_env when reset() is called.
The randomization include swing_offset, extension_offset of all legs that mimics
bent legs, desired_pitch from user input, battery voltage and motor damping.
"""
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import tf.compat.v1 as tf
from pybullet_envs.minitaur.envs import env_randomizer_base
# Absolute range.
NUM_LEGS = 4
BATTERY_VOLTAGE_RANGE = (14.8, 16.8)
MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01)
class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that changes the minitaur_gym_alternating_leg_env."""
def __init__(self,
perturb_swing_bound=0.1,
perturb_extension_bound=0.1,
perturb_desired_pitch_bound=0.01):
super(MinitaurAlternatingLegsEnvRandomizer, self).__init__()
self.perturb_swing_bound = perturb_swing_bound
self.perturb_extension_bound = perturb_extension_bound
self.perturb_desired_pitch_bound = perturb_desired_pitch_bound
def randomize_env(self, env):
perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound,
high=self.perturb_swing_bound,
size=NUM_LEGS)
env.set_swing_offset(perturb_magnitude)
tf.logging.info("swing_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound,
high=self.perturb_extension_bound,
size=NUM_LEGS)
env.set_extension_offset(perturb_magnitude)
tf.logging.info("extension_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound,
high=self.perturb_desired_pitch_bound)
env.set_desired_pitch(perturb_magnitude)
tf.logging.info("desired_pitch: {}".format(perturb_magnitude))
randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0],
BATTERY_VOLTAGE_RANGE[1])
env.minitaur.SetBatteryVoltage(randomized_battery_voltage)
tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage))
randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
env.minitaur.SetMotorViscousDamping(randomized_motor_damping)
tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
|
agpl-3.0
| -7,600,583,277,604,451,000 | 45.065574 | 86 | 0.666904 | false |
dreaming-dog/kaldi-long-audio-alignment
|
scripts/classes/entry_manager.py
|
1
|
2096
|
# Copyright 2017 Speech Lab, EE Dept., IITM (Author: Srinivas Venkattaramanujam)
from entry import Entry
class EntryManager:
__statuses__ = ['PENDING','DONE']
def __init__(self):
self.entries=[]
def add_entry(self,entry):
# Problem:
# add new entries to the existing list such that:
# 1) the start and end time of an entry is not the same
# 2) All the words in the range are covered
# 3) If two consecutive entries have the same status, merge
# trivial cases:
# 1) if list is empty, simply add to list
# edge cases:
# 1) While merging, if there is a status change, have to check previous entry, therefore don't do it inplace! remove the last entry, make changes and insert the entry
if(len(self.entries)==0):
self.entries.append(entry)
else:
# assert (last word+1) of previous entry and the first word of current entry match
try:
assert (self.entries[-1].word_end+1)==entry.word_begin
except AssertionError:
print "Words are not continous in ",self.entries[-1]," and ", entry
exit(1)
# check if to be merged. if not, just insert.
if(entry.begin_time!=entry.end_time and self.entries[-1].status!=entry.status and (entry.end_time-entry.begin_time)>=0.1):
self.entries.append(entry)
else:
# merge case
prev_entry=self.entries[-1]
self.entries=self.entries[:-1]
entry=self.__merge__(prev_entry, entry)
return self.add_entry(entry)
def __min_status__(self, status1, status2):
# _list=[EntryManager.__statuses__.index(status1), EntryManager.__statuses__.index(status2)]
# print 'status 1,2', status1, status2
_list=[EntryManager.__statuses__.index(status1), EntryManager.__statuses__.index(status2)]
return EntryManager.__statuses__[min(_list)]
def __merge__(self,prev_entry, entry):
# print 'merge called'
return Entry(prev_entry.begin_time, entry.end_time, self.__min_status__(prev_entry.status, entry.status), prev_entry.word_begin, entry.word_end)
def print_entries(self):
#print the entries
for e in self.entries:
print e.begin_time, e.end_time, e.status, e.word_begin, e.word_end
|
apache-2.0
| -5,431,410,990,582,398,000 | 42.666667 | 169 | 0.694179 | false |
hirochachacha/apython
|
bpython/completion/completers/import_completer.py
|
1
|
9006
|
# The MIT License
#
# Copyright (c) 2009-2011 Andreas Stuehrk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import with_statement
import imp
import itertools
import os
import sys
import warnings
try:
from warnings import catch_warnings
except ImportError:
import contextlib
@contextlib.contextmanager
def catch_warnings():
"""Stripped-down version of `warnings.catch_warnings()`
(available in Py >= 2.6)."""
filters = warnings.filters
warnings.filters = list(filters)
try:
yield
finally:
warnings.filters = filters
from bpython._py3compat import PY3
from six import next
# The cached list of all known modules
modules = dict()
sorted_modules = []
fully_loaded = False
def get_object(cw, line):
if not cw:
cw = ""
tokens = line.split()
completing_from = False
if len(tokens) == 1:
return
if tokens[0] == 'from':
if len(tokens) > 3:
if '.' in cw:
# This will result in a SyntaxError, so do not return
# any matches
return None
completing_from = True
cw = '%s.%s' % (tokens[1], cw)
elif len(tokens) == 3:
if 'import '.startswith(cw):
return None
else:
# Will result in a SyntaxError
return None
match_objects = list()
for name in sorted_modules:
if not (name == cw and name.find('.', len(cw)) == -1):
continue
try:
obj = sys.modules[name]
except:
if modules[name].endswith('.pyc'):
f = modules[name][:-1]
if os.path.isfile(f):
obj = f
else:
obj = None
else:
obj = None
if completing_from:
name = name[len(tokens[1]) + 1:]
try:
obj = getattr(obj, name)
except:
obj = None
match_objects.append(obj)
if completing_from and tokens[1] in sys.modules:
# from x import y -> search for attributes starting with y if
# x is in sys.modules
_, _, cw = cw.rpartition('.')
module = sys.modules[tokens[1]]
names = [name for name in dir(module) if name == cw]
objects = [getattr(module, name) for name in names]
match_objects.extend(objects)
elif len(tokens) == 2:
# from x.y or import x.y -> search for attributes starting
# with y if x is in sys.modules and the attribute is also in
# sys.modules
module_name, _, cw = cw.rpartition('.')
if module_name in sys.modules:
module = sys.modules[module_name]
for name in dir(module):
if name != cw:
continue
submodule_name = '%s.%s' % (module_name, name)
if submodule_name in sys.modules:
match_objects.append(sys.modules[submodule_name])
if not match_objects:
return None
return match_objects[0]
def complete(cw, line):
"""Construct a full list of possibly completions for imports."""
if not cw:
return None
tokens = line.split()
completing_from = False
if tokens[0] == 'from':
if len(tokens) > 3:
if '.' in cw:
# This will result in a SyntaxError, so do not return
# any matches
return None
completing_from = True
cw = '%s.%s' % (tokens[1], cw)
elif len(tokens) == 3:
if 'import '.startswith(cw):
return ['import ']
else:
# Will result in a SyntaxError
return None
matches = list()
for name in sorted_modules:
if not (name.startswith(cw) and name.find('.', len(cw)) == -1):
continue
if completing_from:
name = name[len(tokens[1]) + 1:]
matches.append(name)
if completing_from and tokens[1] in sys.modules:
# from x import y -> search for attributes starting with y if
# x is in sys.modules
_, _, cw = cw.rpartition('.')
module = sys.modules[tokens[1]]
names = [name for name in dir(module) if name.startswith(cw)]
matches.extend(names)
elif len(tokens) == 2:
# from x.y or import x.y -> search for attributes starting
# with y if x is in sys.modules and the attribute is also in
# sys.modules
module_name, _, cw = cw.rpartition('.')
if module_name in sys.modules:
module = sys.modules[module_name]
for name in dir(module):
if not name.startswith(cw):
continue
submodule_name = '%s.%s' % (module_name, name)
if submodule_name in sys.modules:
matches.append(submodule_name)
if not matches:
return []
return matches
def find_modules(path):
"""Find all modules (and packages) for a given directory."""
if not os.path.isdir(path):
# Perhaps a zip file
return
try:
filenames = os.listdir(path)
except EnvironmentError:
filenames = []
for name in filenames:
filename = name
if not any(name.endswith(suffix[0]) for suffix in imp.get_suffixes()):
# Possibly a package
if '.' in name:
continue
elif os.path.isdir(os.path.join(path, name)):
# Unfortunately, CPython just crashes if there is a directory
# which ends with a python extension, so work around.
continue
for suffix in imp.get_suffixes():
if name.endswith(suffix[0]):
name = name[:-len(suffix[0])]
break
if PY3 and name == "badsyntax_pep3120":
# Workaround for issue #166
continue
try:
with catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
fo, pathname, _ = imp.find_module(name, [path])
except (ImportError, IOError, SyntaxError):
continue
except UnicodeEncodeError:
# Happens with Python 3 when there is a filename in some
# invalid encoding
continue
else:
if fo is not None:
fo.close()
else:
# Yay, package
for subname, filename in find_modules(pathname):
if subname != '__init__':
yield '%s.%s' % (name, subname), os.path.join(pathname, filename)
yield name, filename
def find_all_modules(path=None):
"""Return a list with all modules in `path`, which should be a list of
directory names. If path is not given, sys.path will be used."""
global sorted_modules
i = itertools.repeat(None)
if path is None:
d = dict(zip(sys.builtin_module_names, i))
modules.update(d)
path = sys.path
for p in path:
if not p:
p = os.curdir
for module, filename in find_modules(p):
if not PY3 and not isinstance(module, unicode):
try:
module = module.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
# Not importable anyway, ignore it
continue
modules[module] = os.path.join(p, filename)
sorted_modules = sorted(modules)
yield
def find_coroutine():
global fully_loaded
if fully_loaded:
return None
try:
next(find_iterator)
except StopIteration:
fully_loaded = True
return True
def reload():
"""Refresh the list of known modules."""
modules.clear()
for _ in find_all_modules():
pass
find_iterator = find_all_modules()
|
mit
| 6,365,923,528,338,857,000 | 31.989011 | 89 | 0.563957 | false |
julzhk/codekata
|
instant_runoff_voting.py
|
1
|
1947
|
from collections import defaultdict, Counter
def runoff(voters):
"""
a function that calculates an election winner from a list of voter selections using an
Instant Runoff Voting algorithm. https://en.wikipedia.org/wiki/Instant-runoff_voting
Each voter selects several candidates in order of preference.
The votes are tallied from the each voter's first choice.
If the first-place candidate has more than half the total votes, they win.
Otherwise, find the candidate who got the least votes and remove them from each person's voting list.
In case of a tie for least, remove all of the tying candidates.
In case of a complete tie between every candidate, return None
Continue until somebody has more than half the votes; they are the winner.
The function takes a list of voter ballots; each ballot will be a list of candidates in descending order of
preference.
Returns the symbol corresponding to the winning candidate.
"""
votes_cast_so_far=0
final_tally = Counter()
removed_candidates = set()
for this_round in range(len(voters[0])):
this_round_votes = [voter[this_round] for voter in voters if voter[this_round] not in removed_candidates]
if not this_round_votes:
# all knocked out
return None
tally = Counter(this_round_votes)
final_tally.update(tally)
leader = final_tally.most_common(1)
votes_cast_so_far += sum(final_tally.values())
if final_tally[leader] >= votes_cast_so_far / 2.0:
return leader
lowest_vote = min(tally.values())
knockout_candidates = [candidate for candidate in tally if tally[candidate] == lowest_vote]
removed_candidates |= set(knockout_candidates)
voters = [
['c', 'a', 'b', 'd', 'e'],
['b', 'e', 'd', 'c', 'a'],
['b', 'e', 'c', 'a', 'd'],
['d', 'b', 'c', 'a', 'e'],
['c', 'b', 'd', 'a', 'e']
]
assert(runoff(voters) == "b")
|
mit
| -6,354,312,978,943,293,000 | 42.266667 | 113 | 0.659476 | false |
openstack/tempest
|
tempest/api/compute/certificates/test_certificates.py
|
1
|
1501
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class CertificatesV2TestJSON(base.BaseV2ComputeTest):
"""Test Certificates API"""
@classmethod
def skip_checks(cls):
super(CertificatesV2TestJSON, cls).skip_checks()
if not CONF.compute_feature_enabled.nova_cert:
raise cls.skipException("Nova cert is not available")
@decorators.idempotent_id('c070a441-b08e-447e-a733-905909535b1b')
def test_create_root_certificate(self):
"""Test creating root certificate"""
self.certificates_client.create_certificate()
@decorators.idempotent_id('3ac273d0-92d2-4632-bdfc-afbc21d4606c')
def test_get_root_certificate(self):
"""Test getting root certificate details"""
self.certificates_client.show_certificate('root')
|
apache-2.0
| -5,259,292,156,591,591,000 | 36.525 | 78 | 0.724184 | false |
factorlibre/carrier-delivery
|
delivery_carrier_ups/model/ups_config.py
|
1
|
2330
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 FactorLibre (http://www.factorlibre.com)
# Hugo Santos <hugo.santos@factorlibre.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
UPS_LABEL_FORMAT = [
('EPL', 'EPL'),
('ZPL', 'ZPL'),
('GIF', 'GIF'),
('STARPL', 'STARPL'),
('SPL', 'SPL')
]
class UPSConfig(models.Model):
_name = 'ups.config'
@api.model
def _ups_weight_uom(self):
return [
('KGS', 'KGS'),
('LBS', 'LBS')
]
@api.model
def _ups_dimension_uom(self):
return [
('CM', 'CM'),
('IN', 'IN')
]
@api.model
def _ups_label_file_format(self):
return UPS_LABEL_FORMAT
name = fields.Char('UPS Config Name', required=True)
is_test = fields.Boolean('Is a test?')
username = fields.Char('UPS Username', required=True)
password = fields.Char('UPS Password', required=True)
access_license = fields.Char('UPS Access license', required=True)
shipper_number = fields.Char('UPS Shipper number', required=True)
weight_uom = fields.Selection('_ups_weight_uom', required=True,
default="KGS")
dimension_uom = fields.Selection('_ups_dimension_uom', required=True,
default='CM')
label_file_format = fields.Selection('_ups_label_file_format',
required=True, default='EPL')
|
agpl-3.0
| 545,612,102,985,848,500 | 34.30303 | 78 | 0.566953 | false |
forio/julia-studio
|
tests/system/suite_SCOM/tst_SCOM04/test.py
|
1
|
1991
|
source("../../shared/qtcreator.py")
source("../../shared/suites_qtta.py")
# entry of test
def main():
# expected error texts - for different compilers
expectedErrorAlternatives = ["'SyntaxError' was not declared in this scope",
"'SyntaxError' : undeclared identifier"]
startApplication("qtcreator" + SettingsPath)
# create qt quick application
createNewQtQuickApplication(tempDir(), "SampleApp")
# create syntax error in cpp file
doubleClickItem(":Qt Creator_Utils::NavigationTreeView", "SampleApp.Sources.main\\.cpp", 5, 5, 0, Qt.LeftButton)
if not appendToLine(waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget"), "viewer.showExpanded();", "SyntaxError"):
invokeMenuItem("File", "Exit")
return
# save all
invokeMenuItem("File", "Save All")
# build it - on all (except Qt 4.7.0 (would fail)) build configurations
availableConfigs = iterateBuildConfigs(1, 0)
if not availableConfigs:
test.fatal("Haven't found a suitable Qt version (anything except Qt 4.7.0) - leaving without building.")
for config in availableConfigs:
selectBuildConfig(1, 0, config)
# try to compile
test.log("Testing build configuration: " + config)
clickButton(waitForObject(":*Qt Creator.Build Project_Core::Internal::FancyToolButton"))
# wait until build finished
waitForSignal("{type='ProjectExplorer::BuildManager' unnamed='1'}", "buildQueueFinished(bool)")
# open issues list view
ensureChecked(waitForObject(":Qt Creator_Issues_Core::Internal::OutputPaneToggleButton"))
issuesView = waitForObject(":Qt Creator.Issues_QListView")
# verify that error is properly reported
test.verify(checkSyntaxError(issuesView, expectedErrorAlternatives, False),
"Verifying cpp syntax error while building simple qt quick application.")
# exit qt creator
invokeMenuItem("File", "Exit")
|
gpl-3.0
| -5,123,714,353,026,638,000 | 52.810811 | 132 | 0.684581 | false |
wenhuchen/ETHZ-Bootstrapped-Captioning
|
visual-concepts/coco/PythonAPI/pycocotools/coco.py
|
1
|
16953
|
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import urllib
import copy
import itertools
import mask
import os
from collections import defaultdict
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns,cats,imgs = dict(),dict(),dict()
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception("datasetType not supported")
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print 'Loading and preparing results... '
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print("Converting ndarray to lists...")
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print("%d/%d" % (i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
|
bsd-3-clause
| -9,197,075,660,975,878,000 | 42.358056 | 128 | 0.54775 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.