text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
NAME='guimao33'
SPELL='guǐmǎo'
CN='癸卯'
SEQ='40'
if __name__=='__main__':
pass
|
sinotradition/sinoera
|
sinoera/ganzhi/guimao33.py
|
Python
|
apache-2.0
| 231 | 0.031111 |
from .Commerce import Commerce
from .Transaction import Transaction
|
lexotero/python-redsys
|
redsys/__init__.py
|
Python
|
mit
| 68 | 0 |
import wpilib
import math
class SharpIR2Y0A02:
'''
Sharp IR sensor GP2Y0A02YK0F
Long distance sensor: 20cm to 150cm
Output is in centimeters
Distance can be calculated using 62.28*x ^ -1.092
'''
def __init__(self,num):
self.distance = wpilib.AnalogInput(num)
def getDistance(self):
'''Returns distance in centimeters'''
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 62.28*math.pow(v, -1.092)
# Constrain output
return max(min(d, 145.0), 22.5)
def getVoltage(self):
return self.distance.getVoltage()
class SharpIRGP2Y0A41SK0F:
'''
Sharp IR sensor GP2Y0A41SK0F
Short distance sensor: 4cm to 40cm
Output is in centimeters=
'''
#short Distance
def __init__(self,num):
self.distance = wpilib.AnalogInput(num)
def getDistance(self):
'''Returns distance in centimeters'''
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 12.84*math.pow(v, -0.9824)
# Constrain output
return max(min(d, 25), 4.0)
def getVoltage(self):
return self.distance.getVoltage()
class CombinedSensor:
def __init__(self, longDist, longOff, shortDist, shortOff):
self.longDistance = longDist
self.shortDistance = shortDist
self.longOff = longOff
self.shortOff = shortOff
def getDistance(self):
long = self.longDistance.getDistance()
short = self.shortDistance.getDistance()
#if short < 25:
# return short - self.shortOff
#else:
return max(long - self.longOff, 0)
|
frc1418/2015-robot
|
robot/common/distance_sensors.py
|
Python
|
apache-2.0
| 1,861 | 0.014508 |
"""Detect zmq version"""
#
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#
#
# Adapted for use in pycapnp from pyzmq. See https://github.com/zeromq/pyzmq
# for original project.
import shutil
import sys
import os
import logging
import platform
from distutils import ccompiler
from distutils.ccompiler import get_default_compiler
import tempfile
from .misc import get_compiler, get_output_error
from .patch import patch_lib_paths
pjoin = os.path.join
#
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#
def test_compilation(cfile, compiler=None, **compiler_attrs):
"""Test simple compilation with given settings"""
cc = get_compiler(compiler, **compiler_attrs)
efile, _ = os.path.splitext(cfile)
cpreargs = lpreargs = []
if sys.platform == 'darwin':
# use appropriate arch for compiler
if platform.architecture()[0] == '32bit':
if platform.processor() == 'powerpc':
cpu = 'ppc'
else:
cpu = 'i386'
cpreargs = ['-arch', cpu]
lpreargs = ['-arch', cpu, '-undefined', 'dynamic_lookup']
else:
# allow for missing UB arch, since it will still work:
lpreargs = ['-undefined', 'dynamic_lookup']
if sys.platform == 'sunos5':
if platform.architecture()[0] == '32bit':
lpreargs = ['-m32']
else:
lpreargs = ['-m64']
extra_compile_args = compiler_attrs.get('extra_compile_args', [])
if os.name != 'nt':
extra_compile_args += ['--std=c++14']
extra_link_args = compiler_attrs.get('extra_link_args', [])
if cc.compiler_type == 'msvc':
extra_link_args += ['/MANIFEST']
objs = cc.compile([cfile], extra_preargs=cpreargs, extra_postargs=extra_compile_args)
cc.link_executable(objs, efile, extra_preargs=lpreargs, extra_postargs=extra_link_args)
return efile
def detect_version(basedir, compiler=None, **compiler_attrs):
"""Compile, link & execute a test program, in empty directory `basedir`.
The C compiler will be updated with any keywords given via setattr.
Parameters
----------
basedir : path
The location where the test program will be compiled and run
compiler : str
The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32')
**compiler_attrs : dict
Any extra compiler attributes, which will be set via ``setattr(cc)``.
Returns
-------
A dict of properties for zmq compilation, with the following two keys:
vers : tuple
The ZMQ version as a tuple of ints, e.g. (2,2,0)
settings : dict
The compiler options used to compile the test function, e.g. `include_dirs`,
`library_dirs`, `libs`, etc.
"""
if compiler is None:
compiler = get_default_compiler()
cfile = pjoin(basedir, 'vers.cpp')
shutil.copy(pjoin(os.path.dirname(__file__), 'vers.cpp'), cfile)
# check if we need to link against Realtime Extensions library
if sys.platform.startswith('linux'):
cc = ccompiler.new_compiler(compiler=compiler)
cc.output_dir = basedir
if not cc.has_function('timer_create'):
if 'libraries' not in compiler_attrs:
compiler_attrs['libraries'] = []
compiler_attrs['libraries'].append('rt')
cc = get_compiler(compiler=compiler, **compiler_attrs)
efile = test_compilation(cfile, compiler=cc)
patch_lib_paths(efile, cc.library_dirs)
rc, so, se = get_output_error([efile])
if rc:
msg = "Error running version detection script:\n%s\n%s" % (so, se)
logging.error(msg)
raise IOError(msg)
handlers = {'vers': lambda val: tuple(int(v) for v in val.split('.'))}
props = {}
for line in (x for x in so.split('\n') if x):
key, val = line.split(':')
props[key] = handlers[key](val)
return props
def test_build(**compiler_attrs):
"""do a test build of libcapnp"""
tmp_dir = tempfile.mkdtemp()
# line()
# info("Configure: Autodetecting Cap'n Proto settings...")
# info(" Custom Cap'n Proto dir: %s" % prefix)
try:
detected = detect_version(tmp_dir, None, **compiler_attrs)
finally:
erase_dir(tmp_dir)
# info(" Cap'n Proto version detected: %s" % v_str(detected['vers']))
return detected
def erase_dir(path):
"""Erase directory"""
try:
shutil.rmtree(path)
except Exception:
pass
|
SymbiFlow/pycapnp
|
buildutils/detect.py
|
Python
|
bsd-2-clause
| 4,766 | 0.000629 |
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import json
import pickle
import argparse
from PIL import Image
import numpy as np
from utils import Vocabulary
class CocoDataset(data.Dataset):
def __init__(self, root, anns, vocab, mode='train',transform=None):
self.root = root
self.anns = json.load(open(anns))
self.vocab = pickle.load(open(vocab, 'rb'))
self.transform = transform
self.data = [ann for ann in self.anns if ann['split'] == mode]
def __getitem__(self, index):
data = self.data
vocab = self.vocab
# load image
path = os.path.join(self.root, data[index]['file_path'])
img = Image.open(path).convert('RGB')
if self.transform is not None:
img = self.transform(img)
# load caption
cap = data[index]['final_caption']
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(word) for word in cap])
caption.append(vocab('<end>'))
target = torch.IntTensor(caption)
return img, target, data[index]['imgid']
def __len__(self):
return len(self.data)
def collate_fn(data):
# sort the data in descending order
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions, imgids = zip(*data)
# merge images (from tuple of 3D tensor to 4D tensor).
images = torch.stack(images, 0)
# merge captions (from tuple of 1D tensor to 2D tensor).
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths, imgids
def get_loader(opt, mode='train', shuffle=True, num_workers=1, transform=None):
coco = CocoDataset(root=opt.root_dir,
anns=opt.data_json,
vocab=opt.vocab_path,
mode=mode,
transform=transform)
data_loader = torch.utils.data.DataLoader(dataset=coco,
batch_size=opt.batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str, default='/home/myunggi/Research/show-and-tell', help="root directory of the project")
parser.add_argument('--data_json', type=str, default='data/data.json', help='input data list which includes captions and image information')
parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='vocabulary wrapper')
parser.add_argument('--crop_size', type=int, default=224, help='image crop size')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
args = parser.parse_args()
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
data_loader = get_loader(args, transform=transform)
total_iter = len(data_loader)
for i, (img, target, length) in enumerate(data_loader):
print('done')
|
incredible-vision/show-and-tell
|
data_loader.py
|
Python
|
mit
| 3,497 | 0.003432 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.announcement': {
'Meta': {'ordering': "['-created']", 'object_name': 'Announcement'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'auth.awards': {
'Meta': {'object_name': 'Awards'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'confirmation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.logintoken': {
'Meta': {'object_name': 'LoginToken'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'login_token'", 'unique': 'True', 'to': "orm['auth.CustomUser']"})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'auth.userlanguage': {
'Meta': {'unique_together': "(['user', 'language'],)", 'object_name': 'UserLanguage'},
'follow_requests': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'proficiency': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}),
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'teams.team': {
'Meta': {'ordering': "['name']", 'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'notify_interval': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'meta_1_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_1_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_2_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_2_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_3_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_3_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
'primary_audio_language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['auth']
|
norayr/unisubs
|
apps/auth/migrations/0032_remove_thumb_options.py
|
Python
|
agpl-3.0
| 21,604 | 0.008193 |
# -*- encoding: utf-8 -*-
import mock
import os
from shutil import rmtree
from tempfile import mkdtemp
from django.test import TestCase
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from django.template.base import TemplateDoesNotExist
from paperclip.models import Attachment
from geotrek.common.models import Organism, FileType
from geotrek.common.parsers import ExcelParser, AttachmentParserMixin
class OrganismParser(ExcelParser):
model = Organism
fields = {'organism': 'nOm'}
class OrganismEidParser(ExcelParser):
model = Organism
fields = {'organism': 'nOm'}
eid = 'organism'
class AttachmentParser(AttachmentParserMixin, OrganismEidParser):
non_fields = {'attachments': 'photo'}
class ParserTests(TestCase):
def test_bad_parser_class(self):
with self.assertRaises(CommandError) as cm:
call_command('import', 'geotrek.common.DoesNotExist', '', verbosity=0)
self.assertEqual(unicode(cm.exception), u"Failed to import parser class 'geotrek.common.DoesNotExist'")
def test_bad_filename(self):
with self.assertRaises(CommandError) as cm:
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', 'find_me/I_am_not_there.shp', verbosity=0)
self.assertEqual(unicode(cm.exception), u"File does not exists at: find_me/I_am_not_there.shp")
def test_create(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', filename, verbosity=0)
self.assertEqual(Organism.objects.count(), 1)
organism = Organism.objects.get()
self.assertEqual(organism.organism, u"Comité Théodule")
def test_duplicate_without_eid(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.OrganismParser', filename, verbosity=0)
self.assertEqual(Organism.objects.count(), 2)
def test_unmodified_with_eid(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename, verbosity=0)
self.assertEqual(Organism.objects.count(), 1)
def test_updated_with_eid(self):
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
filename2 = os.path.join(os.path.dirname(__file__), 'data', 'organism2.xls')
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.OrganismEidParser', filename2, verbosity=0)
self.assertEqual(Organism.objects.count(), 2)
organisms = Organism.objects.order_by('pk')
self.assertEqual(organisms[0].organism, u"Comité Théodule")
self.assertEqual(organisms[1].organism, u"Comité Hippolyte")
def test_report_format_text(self):
parser = OrganismParser()
self.assertRegexpMatches(parser.report(), '0/0 lines imported.')
self.assertNotRegexpMatches(parser.report(), '<div id=\"collapse-\$celery_id\" class=\"collapse\">')
def test_report_format_html(self):
parser = OrganismParser()
self.assertRegexpMatches(parser.report(output_format='html'), '<div id=\"collapse-\$celery_id\" class=\"collapse\">')
def test_report_format_bad(self):
parser = OrganismParser()
with self.assertRaises(TemplateDoesNotExist):
parser.report(output_format='toto')
@override_settings(MEDIA_ROOT=mkdtemp('geotrek_test'))
class AttachmentParserTests(TestCase):
def setUp(self):
self.filetype = FileType.objects.create(type=u"Photographie")
def tearDown(self):
rmtree(settings.MEDIA_ROOT)
@mock.patch('requests.get')
def test_attachment(self, mocked):
mocked.return_value.status_code = 200
mocked.return_value.content = ''
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.AttachmentParser', filename, verbosity=0)
organism = Organism.objects.get()
attachment = Attachment.objects.get()
self.assertEqual(attachment.content_object, organism)
self.assertEqual(attachment.attachment_file.name, 'paperclip/common_organism/{pk}/titi.png'.format(pk=organism.pk))
self.assertEqual(attachment.filetype, self.filetype)
@mock.patch('requests.get')
def test_attachment_not_updated(self, mocked):
mocked.return_value.status_code = 200
mocked.return_value.content = ''
filename = os.path.join(os.path.dirname(__file__), 'data', 'organism.xls')
call_command('import', 'geotrek.common.tests.test_parsers.AttachmentParser', filename, verbosity=0)
call_command('import', 'geotrek.common.tests.test_parsers.AttachmentParser', filename, verbosity=0)
self.assertEqual(mocked.call_count, 1)
self.assertEqual(Attachment.objects.count(), 1)
|
johan--/Geotrek
|
geotrek/common/tests/test_parsers.py
|
Python
|
bsd-2-clause
| 5,465 | 0.004762 |
"""Main command module that starts the different threads."""
from ws4py.server.cherrypyserver import WebSocketPlugin
from ws4py.server.cherrypyserver import WebSocketTool
import argparse
import cherrypy
import datetime
import logging
import os
import serial
import signal
import subprocess
import sys
import threading
import time
from control.command import Command
from control.driver import Driver, STEERING_GPIO_PIN, STEERING_NEUTRAL_US, THROTTLE_GPIO_PIN, THROTTLE_NEUTRAL_US
from control.simple_waypoint_generator import SimpleWaypointGenerator
from control.chase_waypoint_generator import ChaseWaypointGenerator
from control.extension_waypoint_generator import ExtensionWaypointGenerator
from control.sup800f import switch_to_nmea_mode
from control.sup800f_telemetry import Sup800fTelemetry
from control.telemetry import Telemetry
from control.telemetry_dumper import TelemetryDumper
from control.web_telemetry.status_app import StatusApp as WebTelemetryStatusApp
from messaging import config
from messaging.async_logger import AsyncLogger, AsyncLoggerReceiver
from messaging.message_consumer import consume_messages
from messaging.message_producer import MessageProducer
from monitor.status_app import StatusApp as MonitorApp
from monitor.web_socket_logging_handler import WebSocketLoggingHandler
# pylint: disable=global-statement
# pylint: disable=broad-except
def override_imports_for_non_rpi():
"""Overrides modules that only work on the Raspberry Pi. Importing RPIO
(used in button) on a non Raspberry Pi raises a SystemError, so for testing
on other systems, just ignore it.
"""
class Dummy(object): # pylint: disable=missing-docstring,too-few-public-methods
def __getattr__(self, attr):
return lambda *arg, **kwarg: time.sleep(0.01)
# pylint: disable=invalid-name
global Button
Button = lambda *arg: Dummy()
serial.Serial = lambda *arg: Dummy()
global Driver
Driver = lambda *arg: Dummy()
global Sup800fTelemetry
Sup800fTelemetry = lambda *arg: Dummy()
global switch_to_nmea_mode
switch_to_nmea_mode = lambda *arg: Dummy()
# Ignore messages
drop = lambda message: None
drop2 = lambda: consume_messages(config.COMMAND_FORWARDED_EXCHANGE, drop)
thread = threading.Thread(target=drop2)
thread.name = config.COMMAND_FORWARDED_EXCHANGE
thread.start()
try:
from control.button import Button
except SystemError:
print('Disabling button because not running on Raspberry Pi')
override_imports_for_non_rpi()
THREADS = []
POPEN = None
DRIVER = None
EMIT_INITIALIZED = False
class CherryPyServer(threading.Thread):
"""Runs the various web apps in a thread."""
def __init__(self, port, address, telemetry, waypoint_generator):
super(CherryPyServer, self).__init__()
self.name = self.__class__.__name__
# Web monitor
config = MonitorApp.get_config(os.path.abspath(os.getcwd()))
status_app = cherrypy.tree.mount(
MonitorApp(telemetry, waypoint_generator, port),
'/',
config
)
cherrypy.config.update({
'server.socket_host': address,
'server.socket_port': port,
'server.ssl_module': 'builtin',
'server.ssl_certificate': 'control/web_telemetry/cert.pem',
'server.ssl_private_key': 'control/web_telemetry/key.pem',
'engine.autoreload.on': False,
})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
# Web telemetry
config = WebTelemetryStatusApp.get_config(os.path.abspath(os.getcwd()))
web_telemetry_app = cherrypy.tree.mount(
WebTelemetryStatusApp(telemetry, port),
'/telemetry',
config
)
# OMG, shut up CherryPy, nobody cares about your problems
for app in (status_app, web_telemetry_app, cherrypy):
app.log.access_log.setLevel(logging.ERROR)
app.log.error_log.setLevel(logging.ERROR)
def run(self):
"""Runs the thread and server in a thread."""
cherrypy.engine.start()
@staticmethod
def kill():
"""Stops the thread and server."""
cherrypy.engine.exit()
def terminate(signal_number, stack_frame): # pylint: disable=unused-argument
"""Terminates the program. Used when a signal is received."""
print(
'Received signal {signal_number}, quitting'.format(
signal_number=signal_number
)
)
if POPEN is not None and POPEN.poll() is None:
print('Killing image capture')
try:
POPEN.kill()
except OSError:
pass
DRIVER.drive(0.0, 0.0)
time.sleep(0.2)
try:
with open('/dev/pi-blaster', 'w') as blaster:
time.sleep(0.1)
blaster.write(
'{pin}={throttle}\n'.format(
pin=THROTTLE_GPIO_PIN,
throttle=THROTTLE_NEUTRAL_US
)
)
time.sleep(0.1)
blaster.write(
'{pin}={steering}\n'.format(
pin=STEERING_GPIO_PIN,
steering=STEERING_NEUTRAL_US
)
)
time.sleep(0.1)
except IOError:
pass
for socket in os.listdir(os.sep.join(('.', 'messaging', 'sockets'))):
MessageProducer(socket).kill()
time.sleep(0.1)
for thread in THREADS:
thread.kill()
thread.join()
# Some threads should still be active
expected = set(('MainThread', '_TimeoutMonitor'))
actives = set((thread.name for thread in threading.enumerate()))
if not (actives <= expected):
print('Trying to exit while {} threads are still active!'.format(
threading.active_count()
))
for thread in threading.enumerate():
print(thread.name)
sys.exit(0)
def get_configuration(value, default):
"""Returns a system configuration value."""
if value in os.environ:
return os.environ[value]
return default
def start_threads(
waypoint_generator,
logger,
web_socket_handler,
max_throttle,
kml_file_name,
):
"""Runs everything."""
logger.info('Creating Telemetry')
telemetry = Telemetry(kml_file_name)
telemetry_dumper = TelemetryDumper(
telemetry,
waypoint_generator,
web_socket_handler
)
logger.info('Done creating Telemetry')
global DRIVER
DRIVER = Driver(telemetry)
DRIVER.set_max_throttle(max_throttle)
logger.info('Setting SUP800F to NMEA mode')
serial_ = serial.Serial('/dev/ttyAMA0', 115200)
serial_.setTimeout(1.0)
for _ in range(10):
serial_.readline()
try:
switch_to_nmea_mode(serial_)
except: # pylint: disable=W0702
logger.error('Unable to set mode')
for _ in range(10):
serial_.readline()
logger.info('Done')
# The following objects must be created in order, because of message
# exchange dependencies:
# sup800f_telemetry: reads from command forwarded
# command: reads from command, writes to command forwarded
# button: writes to command
# cherry_py_server: writes to command
# TODO(2016-08-21) Have something better than sleeps to work around race
# conditions
logger.info('Creating threads')
sup800f_telemetry = Sup800fTelemetry(serial_)
time.sleep(0.5)
command = Command(telemetry, DRIVER, waypoint_generator)
time.sleep(0.5)
button = Button()
port = int(get_configuration('PORT', 8080))
address = get_configuration('ADDRESS', '0.0.0.0')
cherry_py_server = CherryPyServer(
port,
address,
telemetry,
waypoint_generator
)
time.sleep(0.5)
global THREADS
THREADS += (
button,
cherry_py_server,
command,
sup800f_telemetry,
telemetry_dumper,
)
for thread in THREADS:
thread.start()
logger.info('Started all threads')
# Use a fake timeout so that the main thread can still receive signals
sup800f_telemetry.join(100000000000)
# Once we get here, sup800f_telemetry has died and there's no point in
# continuing because we're not receiving telemetry messages any more, so
# stop the command module
command.stop()
command.join(100000000000)
cherry_py_server.kill()
cherry_py_server.join(100000000000)
button.kill()
button.join(100000000000)
def make_parser():
"""Builds and returns an argument parser."""
parser = argparse.ArgumentParser(
description='Command and control software for the Sparkfun AVC.'
)
now = datetime.datetime.now()
parser.add_argument(
'-l',
'--log',
dest='log',
help='The file to log to.',
default=(
'/data/sparkfun-{date}.log'.format(
date=datetime.datetime.strftime(
now,
'%Y-%m-%d_%H-%M-%S'
)
)
),
type=str
)
parser.add_argument(
'--video',
dest='video',
help='The video file name.',
default=(
'/data/video-{date}.h264'.format(
date=datetime.datetime.strftime(
now,
'%Y-%m-%d_%H-%M-%S'
)
)
),
type=str
)
parser.add_argument(
'-v',
'--verbose',
dest='verbose',
help='Increase output.',
action='store_true'
)
parser.add_argument(
'-k',
'--kml',
dest='kml_file',
help='The KML file from which to load waypoints.',
default=None,
type=str,
)
parser.add_argument(
'--max-throttle',
dest='max_throttle',
help='The max throttle to drive at.',
default=1.0,
type=float,
)
parser.add_argument(
'--chase',
dest='chase',
help='Use a chase waypoint generator.',
action='store_true'
)
return parser
def main():
"""Sets up logging, signal handling, etc. and starts the threads."""
signal.signal(signal.SIGINT, terminate)
parser = make_parser()
args = parser.parse_args()
#try:
# global POPEN
# POPEN = subprocess.Popen((
# 'raspivid', '-o', args.video, '-w', '1024', '-h', '576', '-b', '6000000', '-t', '300000'
# ))
#except Exception:
# logging.warning('Unable to save video')
concrete_logger = logging.Logger('sparkfun')
concrete_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s:%(levelname)s %(message)s'
)
file_handler = None
try:
if os.path.exists(args.log):
os.remove(args.log)
file_handler = logging.FileHandler(args.log)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
concrete_logger.addHandler(file_handler)
try:
last_log = os.path.dirname(args.log) + os.sep + 'last-log.txt'
with open(last_log, 'a') as last_log:
last_log.write(args.log + '\n')
except Exception as exc:
print('Unable to save last log information: {}'.format(exc))
except Exception as exception:
logging.warning('Could not create file log: ' + str(exception))
stdout_handler = logging.StreamHandler(sys.stdout)
if args.verbose:
stdout_handler.setLevel(logging.DEBUG)
else:
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(formatter)
concrete_logger.addHandler(stdout_handler)
async_logger = AsyncLoggerReceiver(concrete_logger)
# We need to start async_logger now so that other people can log to it
async_logger.start()
time.sleep(0.1)
THREADS.append(async_logger)
web_socket_handler = WebSocketLoggingHandler()
web_socket_handler.setLevel(logging.INFO)
web_socket_handler.setFormatter(formatter)
concrete_logger.addHandler(web_socket_handler)
logger = AsyncLogger()
if sys.version_info.major < 3:
logger.warn(
'Python 2 is not officially supported, use at your own risk'
)
kml_file = args.kml_file
if kml_file is None:
logger.info(
'Setting waypoints to Solid State Depot for testing'
)
kml_file = 'solid-state-depot.kml'
if args.chase:
waypoint_generator = ChaseWaypointGenerator(
SimpleWaypointGenerator.get_waypoints_from_file_name(
kml_file
)
)
else:
waypoint_generator = ExtensionWaypointGenerator(
SimpleWaypointGenerator.get_waypoints_from_file_name(
kml_file
)
)
logger.debug('Calling start_threads')
start_threads(
waypoint_generator,
logger,
web_socket_handler,
args.max_throttle,
kml_file,
)
if __name__ == '__main__':
main()
|
bskari/sparkfun-avc
|
main.py
|
Python
|
mit
| 13,115 | 0.001067 |
"Add a movie to Plex."
import sys
from pathlib import Path
from argparse import ArgumentParser
from tkinter import filedialog, messagebox, simpledialog, Tk, Frame, Label
from tkinter.ttk import Combobox, Button
class FeaturettePicker:
FEATURETTES = {
"Behind the Scenes": "behindthescenes",
"Deleted Scenes": "deleted",
"Featurette": "featurette",
"Interview": "interview",
"Scene": "scene",
"Short": "short",
"Trailer": "trailer",
"Other": "other",
}
def __init__(self, parent, file):
self.file = Path(file)
self.label = Label(parent, text=self.file.name, justify='left')
# can also position using "grid" instead of "pack", but no both
self.label.pack(fill="x", padx=5, pady=5)
self.parent = parent
self.parent.bind("<Return>", self.ok)
self.parent.bind("<Escape>", self.cancel)
self.box = Frame(parent)
self.ok_button = Button(self.box, text="Add", command=self.ok, default='active')
self.ok_button.pack(padx=5, pady=5, side='right')
self.cancel_button = Button(self.box, text="Cancel", command=self.cancel)
self.cancel_button.pack(padx=5, pady=5, side='right')
self.combo = Combobox(parent, values=[*self.FEATURETTES])
self.combo.pack(fill="x", padx=5, pady=5)
self.box.pack()
self.result = None
def run(self):
self.parent.mainloop()
return self.result
def cancel(self):
try:
self.parent.withdraw()
finally:
self.parent.quit()
def ok(self):
self.result = self.combo.get()
self.cancel()
def get_parser():
parser = ArgumentParser()
parser.add_argument('infile')
return parser
def main(infile):
try:
tkr = Tk()
tkr.withdraw()
src = Path(infile)
root = Path(src.anchor)
outname = simpledialog\
.askstring("Movie Name", f"Selected: {src}.\n\nName of movie (and year in parens):",
initialvalue=src.name, parent=tkr) or sys.exit()
dst = root/'media-library'/'movies'/outname
ext = simpledialog\
.askstring("Extension",
"File extension (If you want to specify a variant, do so here by prepending "
"it to the extension, e.g. ` - [OldVersion].mp4`):",
initialvalue=src.suffix, parent=tkr)
out = dst/(outname+ext)
files = []
while messagebox.askyesno("Special Features", "Add more files as special features?"):
for f in filedialog.askopenfilenames(parent=tkr, initialdir="."):
fsrc = Path(f)
ftype = FeaturettePicker(Tk(), fsrc).run()
if ftype is None:
return
name = simpledialog.askstring("Featurette Name", "Featurette Name:",
initialvalue=fsrc.name[:-len(fsrc.suffix)], parent=tkr)
if name is None:
return
fext = simpledialog.askstring("Extension",
f"File extension for {fsrc.name}:",
initialvalue=fsrc.suffix, parent=tkr)
fdst = dst/f"{name}-{FeaturettePicker.FEATURETTES[ftype]}{fext}"
files.append((fsrc, fdst))
msg = ("\n\nFeatures:\n\n"+"\n".join(f"{s} -> {d.name}" for (s, d) in files)+"\n\n") if files else ""
if not messagebox.askokcancel("Proceed?",
f"Ready to link {src.name} -> {out}. {msg}Proceed?",
parent=tkr):
return
dst.mkdir(exist_ok=True)
for s, d in [(src, out)] + files:
if d.exists():
overwrite = messagebox.askyesnocancel("Overwrite?", f"{d} exists. Overwrite?")
if overwrite is None:
return
if overwrite:
d.unlink()
else:
continue
s.link_to(d)
messagebox.showinfo("Success", "Success.")
except Exception as e:
messagebox.showerror("Fatal Error", f"Fatal error: {e}")
raise
if __name__ == "__main__":
main(get_parser().parse_args().infile)
|
stefco/dotfiles
|
winscripts/plexmovie.py
|
Python
|
mit
| 4,386 | 0.003648 |
#!/usr/bin/env python3
import rainbow
import hashlib
import string
import time
import random
"""SHA-256 hash function
Precondition: Input plaintext as string
Postcondition: Returns hash as string
"""
def sha256(plaintext):
return hashlib.sha256(bytes(plaintext, 'utf-8')).hexdigest()
"""Returns a reduction function which generates an n-digit lowercase password from a hash
"""
def reduce_lower(n):
"""Reduction function
Precondition: hash is H(previousPlaintext)
Postcondition: returns randomly distributed n-digit lowercase plaintext password
"""
def result(hash, col):
plaintextKey = (int(hash[:9], 16) ^ col) % (26 ** n)
plaintext = ""
for _ in range(n):
plaintext += string.ascii_lowercase[plaintextKey % 26]
plaintextKey //= 26
return plaintext
return result
"""Returns a function which generates a random n-digit lowercase password
"""
def gen_lower(n):
def result():
password = ""
for _ in range(n):
password += random.choice(string.ascii_lowercase)
return password
return result
"""Precondition: Input a function which generates a random password, or input no arguments to generate a random password
Postcondition: Cracks H(password) and prints elapsed time
"""
def test(table, hash_function, gen_password_function, password=""):
if password == "":
password = gen_password_function()
print("Cracking password: {0}\nH(password): {1}".format(password, hash_function(password)))
cracked = table.crack(hash_function(password))
if cracked:
print("Success! Password: {0}".format(cracked))
return True
else:
print("Unsuccessful :(")
return False
# Tests random passwords multiple times and prints success rate and average crack time.
def bulk_test(table, hash_function, gen_password_function, numTests):
start = time.time()
numSuccess = 0
for i in range(numTests):
print("\nTest {0} of {1}".format(i + 1, numTests))
numSuccess += test(table, hash_function, gen_password_function)
print("""\n{0} out of {1} random hashes were successful!\n
Average time per hash (including failures): {2} secs.""" \
.format(numSuccess, numTests, (time.time() - start) / numTests))
table = rainbow.RainbowTable(sha256, reduce_lower(4), gen_lower(4))
|
clu8/RainbowTable
|
crack.py
|
Python
|
mit
| 2,202 | 0.024069 |
#!/usr/bin/env python3
# NOTE: this example requires PyAudio because it uses the Microphone class
import speech_recognition as sr
# this is called from the background thread
def callback(recognizer, audio):
# received audio data, now we'll recognize it using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
# print("Google Speech Recognition thinks you said " + recognizer.recognize_google(audio))
r.recognize_google(audio, key="")
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source) # we only need to calibrate once, before we start listening
# start listening in the background (note that we don't have to do this inside a `with` statement)
stop_listening = r.listen_in_background(m, callback)
# `stop_listening` is now a function that, when called, stops background listening
# do some other computation for 5 seconds, then stop listening and keep doing other computations
import time
for _ in range(50): time.sleep(0.1) # we're still listening even though the main thread is doing other things
stop_listening() # calling this function requests that the background listener stop listening
while True: time.sleep(0.1)
|
xe1gyq/GiekIs
|
examples/au.py
|
Python
|
apache-2.0
| 1,636 | 0.011002 |
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module to provide APIs for authorization checking for VMDK ops.
"""
import logging
import auth_data
import sqlite3
import convert
import auth_data_const
import volume_kv as kv
import threadutils
# All supported vmdk commands
CMD_CREATE = 'create'
CMD_REMOVE = 'remove'
CMD_ATTACH = 'attach'
CMD_DETACH = 'detach'
SIZE = 'size'
# thread local storage in this module namespace
thread_local = threadutils.get_local_storage()
def get_auth_mgr():
""" Get a connection to auth DB. """
global thread_local
if not hasattr(thread_local, '_auth_mgr'):
thread_local._auth_mgr = auth_data.AuthorizationDataManager()
thread_local._auth_mgr.connect()
return thread_local._auth_mgr
def get_tenant(vm_uuid):
""" Get tenant which owns this VM by querying the auth DB. """
_auth_mgr = get_auth_mgr()
try:
cur = _auth_mgr.conn.execute(
"SELECT tenant_id FROM vms WHERE vm_id = ?",
(vm_uuid, )
)
result = cur.fetchone()
except sqlite3.Error as e:
logging.error("Error %s when querying from vms table for vm_id %s", e, vm_uuid)
return str(e), None, None
if result:
logging.debug("get tenant vm_uuid=%s tenant_id=%s", vm_uuid, result[0])
tenant_uuid = None
tenant_name = None
if result:
tenant_uuid = result[0]
try:
cur = _auth_mgr.conn.execute(
"SELECT name FROM tenants WHERE id = ?",
(tenant_uuid, )
)
result = cur.fetchone()
except sqlite3.Error as e:
logging.error("Error %s when querying from tenants table for tenant_id %s",
e, tenant_uuid)
return str(e), None, None
if result:
tenant_name = result[0]
logging.debug("Found tenant_uuid %s, tenant_name %s", tenant_uuid, tenant_name)
return None, tenant_uuid, tenant_name
def get_privileges(tenant_uuid, datastore):
""" Return privileges for given (tenant_uuid, datastore) pair by
querying the auth DB.
"""
_auth_mgr = get_auth_mgr()
privileges = []
logging.debug("get_privileges tenant_uuid=%s datastore=%s", tenant_uuid, datastore)
try:
cur = _auth_mgr.conn.execute(
"SELECT * FROM privileges WHERE tenant_id = ? and datastore = ?",
(tenant_uuid, datastore)
)
privileges = cur.fetchone()
except sqlite3.Error as e:
logging.error("Error %s when querying privileges table for tenant_id %s and datastore %s",
e, tenant_uuid, datastore)
return str(e), None
return None, privileges
def has_privilege(privileges, type):
""" Check the privileges has the specific type of privilege set. """
if not privileges:
return False
logging.debug("%s=%d", type, privileges[type])
return privileges[type]
def get_vol_size(opts):
""" get volume size. """
if not opts or not opts.has_key(SIZE):
logging.warning("Volume size not specified")
return kv.DEFAULT_DISK_SIZE
return opts[SIZE].upper()
def check_max_volume_size(opts, privileges):
""" Check whether the size of the volume to be created exceeds
the max volume size specified in the privileges.
"""
if privileges:
vol_size_in_MB = convert.convert_to_MB(get_vol_size(opts))
max_vol_size_in_MB = privileges[auth_data_const.COL_MAX_VOLUME_SIZE]
logging.debug("vol_size_in_MB=%d max_vol_size_in_MB=%d",
vol_size_in_MB, max_vol_size_in_MB)
# if max_vol_size_in_MB which read from DB is 0, which means
# no max_vol_size limit, function should return True
if max_vol_size_in_MB == 0:
return True
return vol_size_in_MB <= max_vol_size_in_MB
else:
# no privileges
return True
def get_total_storage_used(tenant_uuid, datastore):
""" Return total storage used by (tenant_uuid, datastore)
by querying auth DB.
"""
_auth_mgr = get_auth_mgr()
total_storage_used = 0
try:
cur = _auth_mgr.conn.execute(
"SELECT SUM(volume_size) FROM volumes WHERE tenant_id = ? and datastore = ?",
(tenant_uuid, datastore)
)
except sqlite3.Error as e:
logging.error("Error %s when querying storage table for tenant_id %s and datastore %s",
e, tenant_uuid, datastore)
return str(e), total_storage_used
result = cur.fetchone()
if result:
if result[0]:
total_storage_used = result[0]
logging.debug("total storage used for (tenant %s datastore %s) is %s MB", tenant_uuid,
datastore, total_storage_used)
return None, total_storage_used
def check_usage_quota(opts, tenant_uuid, datastore, privileges):
""" Check if the volume can be created without violating the quota. """
if privileges:
vol_size_in_MB = convert.convert_to_MB(get_vol_size(opts))
error_info, total_storage_used = get_total_storage_used(tenant_uuid, datastore)
if error_info:
# cannot get the total_storage_used, to be safe, return False
return False
usage_quota = privileges[auth_data_const.COL_USAGE_QUOTA]
# if usage_quota which read from DB is 0, which means
# no usage_quota, function should return True
if usage_quota == 0:
return True
return vol_size_in_MB + total_storage_used <= usage_quota
else:
# no privileges
return True
def check_privileges_for_command(cmd, opts, tenant_uuid, datastore, privileges):
"""
Check whether the (tenant_uuid, datastore) has the privileges to run
the given command.
"""
result = None
cmd_need_mount_privilege = [CMD_ATTACH, CMD_DETACH]
if cmd in cmd_need_mount_privilege:
if not has_privilege(privileges, auth_data_const.COL_MOUNT_VOLUME):
result = "No mount privilege"
if cmd == CMD_CREATE:
if not has_privilege(privileges, auth_data_const.COL_CREATE_VOLUME):
result = "No create privilege"
if not check_max_volume_size(opts, privileges):
result = "volume size exceeds the max volume size limit"
if not check_usage_quota(opts, tenant_uuid, datastore, privileges):
result = "The total volume size exceeds the usage quota"
if cmd == CMD_REMOVE:
if not has_privilege(privileges, auth_data_const.COL_DELETE_VOLUME):
result = "No delete privilege"
return result
def tables_exist():
""" Check tables needed for authorization exist or not. """
_auth_mgr = get_auth_mgr()
try:
cur = _auth_mgr.conn.execute("SELECT name FROM sqlite_master WHERE type = 'table' and name = 'tenants';")
result = cur.fetchall()
except sqlite3.Error as e:
logging.error("Error %s when checking whether table tenants exists or not", e)
return str(e), False
if not result:
error_info = "table tenants does not exist"
logging.error(error_info)
return error_info, False
try:
cur = _auth_mgr.conn.execute("SELECT name FROM sqlite_master WHERE type = 'table' and name = 'vms';")
result = cur.fetchall()
except sqlite3.Error as e:
logging.error("Error %s when checking whether table vms exists or not", e)
return str(e), False
if not result:
error_info = "table vms does not exist"
logging.error(error_info)
return error_info, False
try:
cur = _auth_mgr.conn.execute("SELECT name FROM sqlite_master WHERE type = 'table' and name = 'privileges';")
result = cur.fetchall()
except sqlite3.Error as e:
logging.error("Error %s when checking whether table privileges exists or not", e)
return str(e), False
if not result:
error_info = "table privileges does not exist"
logging.error(error_info)
return error_info, False
try:
cur = _auth_mgr.conn.execute("SELECT name FROM sqlite_master WHERE type = 'table' and name = 'volumes';")
result = cur.fetchall()
except sqlite3.Error as e:
logging.error("Error %s when checking whether table volumes exists or not", e)
return str(e), False
if not result:
error_info = "table volumes does not exist"
logging.error(error_info)
return error_info, False
return None, True
def authorize(vm_uuid, datastore, cmd, opts):
""" Check whether the command can be run on this VM.
Return value: result, tenant_uuid, tenant_name
- result: return None if the command can be run on this VM, otherwise, return
corresponding error message
- tenant_uuid: If the VM belongs to a tenant, return tenant_uuid, otherwise, return
None
- tenant_name: If the VM belongs to a tenant, return tenant_name, otherwise, return
None
"""
logging.debug("Authorize: vm_uuid=%s", vm_uuid)
logging.debug("Authorize: datastore=%s", datastore)
logging.debug("Authorize: cmd=%s", cmd)
logging.debug("Authorize: opt=%s", opts)
try:
get_auth_mgr()
except auth_data.DbConnectionError as e:
error_info = "Failed to connect auth DB({0})".format(e)
return error_info, None, None
# If table "tenants", "vms", "privileges" or "volumes" does not exist
# don't need auth check
if not tables_exist():
logging.error("Required tables in auth db do not exist")
error_info = "Required tables in aut db do not exist"
return error_info, None, None
error_info, tenant_uuid, tenant_name = get_tenant(vm_uuid)
if error_info:
return error_info, None, None
if not tenant_uuid:
# This VM does not associate any tenant, don't need auth check
logging.debug("VM %s does not belong to any tenant", vm_uuid)
return None, None, None
else:
error_info, privileges = get_privileges(tenant_uuid, datastore)
if error_info:
return error_info, None, None
result = check_privileges_for_command(cmd, opts, tenant_uuid, datastore, privileges)
if not result:
logging.info("cmd %s with opts %s on tenant_uuid %s datastore %s is allowed to execute",
cmd, opts, tenant_uuid, datastore)
return result, tenant_uuid, tenant_name
def add_volume_to_volumes_table(tenant_uuid, datastore, vol_name, vol_size_in_MB):
""" Insert volume to volumes table. """
_auth_mgr = get_auth_mgr()
logging.debug("add to volumes table(%s %s %s %s)", tenant_uuid, datastore,
vol_name, vol_size_in_MB)
try:
_auth_mgr.conn.execute(
"INSERT INTO volumes(tenant_id, datastore, volume_name, volume_size) VALUES (?, ?, ?, ?)",
(tenant_uuid, datastore, vol_name, vol_size_in_MB)
)
_auth_mgr.conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when insert into volumes table for tenant_id %s and datastore %s",
e, tenant_uuid, datastore)
return str(e)
return None
def get_row_from_tenants_table(conn, tenant_uuid):
""" Get a row from tenants table for a given tenant """
try:
cur = conn.execute(
"SELECT * FROM tenants WHERE id=?",
(tenant_uuid,)
)
except sqlite3.Error as e:
logging.error("Error: %s when querying tenants table for tenant %s",
e, tenant_uuid)
return str(e), None
result = cur.fetchone()
return None, result
def get_row_from_vms_table(conn, tenant_uuid):
""" Get rows from vms table for a given tenant """
try:
cur = conn.execute(
"SELECT * FROM vms WHERE tenant_id=?",
(tenant_uuid,)
)
except sqlite3.Error as e:
logging.error("Error: %s when querying vms table for tenant %s", e, tenant_uuid)
return str(e), None
result = cur.fetchall()
return None, result
def get_row_from_privileges_table(conn, tenant_uuid):
""" Get rows from privileges table for a given tenant """
try:
cur = conn.execute(
"SELECT * FROM privileges WHERE tenant_id=?",
(tenant_uuid,)
)
except sqlite3.Error as e:
logging.error("Error: %s when querying privileges table for tenant %s", e, tenant_uuid)
return str(e), None
result = cur.fetchall()
return None, result
|
BaluDontu/docker-volume-vsphere
|
esx_service/utils/auth.py
|
Python
|
apache-2.0
| 13,215 | 0.003405 |
import unittest
from fractions import Fraction as F
from abcesac.music import *
class KeyTestCase(unittest.TestCase):
def test_get_notes(self):
got = Key('C').get_notes()
want = ['C','D','E','F','G','A','B']
self.assertEquals(got, want)
got = Key('D').get_notes()
want = ['D','E','F#','G','A','B','C#']
self.assertEquals(got, want)
got = Key('E').get_notes()
want = ['E','F#','G#','A','B','C#','D#']
self.assertEquals(got, want)
got = Key('Eb').get_notes()
want = ['Eb','F','G','Ab','Bb','C','D']
self.assertEquals(got, want)
def test_interval(self):
got = Key('C').interval('C', 5)
self.assertEquals(got, 'A')
got = Key('C').interval('B', 5)
self.assertEquals(got, 'G')
got = Key('C').interval('B', 6)
self.assertEquals(got, 'A')
got = Key('G#').interval('B', 6)
self.assertEquals(got, 'A#')
def test_length_value(self):
got = length_value(F(1,8))
self.assertEquals(got, (F(1,8),0))
got = length_value(F(2,8))
self.assertEquals(got, (F(1,4),0))
got = length_value(F(3,8))
self.assertEquals(got, (F(1,4),1))
got = length_value(F(4,8))
self.assertEquals(got, (F(1,2),0))
got = length_value(F(5,8))
self.assertEquals(got, (F(1,2),1))
got = length_value(F(6,8))
self.assertEquals(got, (F(1,2),1))
got = length_value(F(7,8))
self.assertEquals(got, (F(1,2),2))
got = length_value(F(8,8))
self.assertEquals(got, (F(1,1),0))
got = length_value(F(9,8))
self.assertEquals(got, (F(1,1),1))
got = length_value(F(10,8))
self.assertEquals(got, (F(1,1),1))
got = length_value(F(16,8))
self.assertEquals(got, (F(2,1),0))
def test_tuplets(self):
tuplet = Tuplet(3, 2)
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,8)))
self.assertEquals(tuplet.length, F(2,8))
tuplet = Tuplet(3, 2)
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
self.assertEquals(tuplet.length, F(2,8))
tuplet = Tuplet(5, 3)
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,8)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
self.assertEquals(tuplet.length, F(3,8))
tuplet = Tuplet(7, 3)
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
tuplet.add_note(Note(name='C', length=F(1,16)))
self.assertEquals(tuplet.length, F(3,16))
def test_modes(self):
got = Key('C').mode_scale('major')
want = ['C', 'D', 'E', 'F', 'G', 'A', 'B']
self.assertEquals(got, want)
got = Key('C').mode_scale('ionian')
want = ['C', 'D', 'E', 'F', 'G', 'A', 'B']
self.assertEquals(got, want)
got = Key('C').mode_scale('minor')
want = ['C', 'D', 'Eb', 'F', 'G', 'Ab', 'Bb']
self.assertEquals(got, want)
got = Key('E').mode_scale('minor')
want = ['E', 'F#', 'G', 'A', 'B', 'C', 'D']
self.assertEquals(got, want)
got = Key('C').mode_scale('dorian')
want = ['C', 'D', 'Eb', 'F', 'G', 'A', 'Bb']
self.assertEquals(got, want)
got = Key('C').mode_scale('phrygian')
want = ['C', 'Db', 'Eb', 'F', 'G', 'Ab', 'Bb']
self.assertEquals(got, want)
got = Key('C').mode_scale('lydian')
want = ['C', 'D', 'E', 'F#', 'G', 'A', 'B']
self.assertEquals(got, want)
got = Key('C').mode_scale('mixolydian')
want = ['C', 'D', 'E', 'F', 'G', 'A', 'Bb']
self.assertEquals(got, want)
got = Key('C').mode_scale('aeolian')
want = ['C', 'D', 'Eb', 'F', 'G', 'Ab', 'Bb']
self.assertEquals(got, want)
got = Key('C').mode_scale('locrian')
want = ['C', 'Db', 'Eb', 'F', 'Gb', 'Ab', 'Bb']
self.assertEquals(got, want)
got = Key('D').mode_scale('major')
want = ['D', 'E', 'F#', 'G', 'A', 'B', 'C#']
self.assertEquals(got, want)
if __name__ == '__main__':
unittest.main()
|
google-code/abc2esac
|
abcesac/tests/music.py
|
Python
|
gpl-3.0
| 4,909 | 0.017315 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2019, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Container
########################
The :class:`~exa.core.container.Container` class is the primary object for
data processing, analysis, and visualization. In brief, containers are composed
of data objects whose contents are used for 2D and 3D visualization. Containers
also provide some content management and data relationship features.
See Also:
For a description of data objects see :mod:`~exa.core.numerical`.
"""
import os
import numpy as np
import pandas as pd
import networkx as nx
from sys import getsizeof
from copy import deepcopy
from collections import defaultdict
from .numerical import check_key, Field, Series, DataFrame
from exa.util.utility import convert_bytes
from exa.util import mpl
import matplotlib.pyplot as plt
class Container(object):
"""
Container class responsible for all features related to data management.
"""
_getter_prefix = 'compute'
_cardinal = None # Name of the cardinal data table
def copy(self, name=None, description=None, meta=None):
"""
Create a copy of the current object (may alter the container's name,
description, and update the metadata if needed).
"""
cls = self.__class__
kwargs = self._rel(copy=True)
kwargs.update(self._data(copy=True))
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if meta is not None:
kwargs['meta'] = meta
return cls(**kwargs)
def concat(self, *args, **kwargs):
"""
Concatenate any number of container objects with the current object into
a single container object.
See Also:
For argument description, see :func:`~exa.core.container.concat`.
"""
raise NotImplementedError()
def slice_naive(self, key):
"""
Naively slice each data object in the container by the object's index.
Args:
key: Int, slice, or list by which to extra "sub"-container
Returns:
sub: Sub container of the same format with a view of the data
Warning:
To ensure that a new container is created, use the copy method.
.. code-block:: Python
mycontainer[slice].copy()
"""
kwargs = {'name': self.name, 'description': self.description, 'meta': self.meta}
for name, data in self._data().items():
k = name[1:] if name.startswith('_') else name
kwargs[k] = data.slice_naive(key)
return self.__class__(**kwargs)
def slice_cardinal(self, key):
"""
Slice the container according to its (primary) cardinal axis.
The "cardinal" axis can have any name so long as the name matches a
data object attached to the container. The index name for this object
should also match the value of the cardinal axis.
The algorithm builds a network graph representing the data relationships
(including information about the type of relationship) and then traverses
the edge tree (starting from the cardinal table). Each subsequent child
object in the tree is sliced based on its relationship with its parent.
Note:
Breadth first traversal is performed.
Warning:
This function does not make a copy (if possible): to ensure a new
object is created (a copy) use :func:`~exa.core.container.Container.copy`
after slicing.
.. code-block:: Python
myslice = mycontainer[::2].copy()
See Also:
For data network generation, see :func:`~exa.core.container.Container.network`.
For information about relationships between data objects see
:mod:`~exa.core.numerical`.
"""
if self._cardinal:
cls = self.__class__
key = check_key(self[self._cardinal], key, cardinal=True)
g = self.network(fig=False)
kwargs = {self._cardinal: self[self._cardinal].ix[key], 'name': self.name,
'description': self.description, 'meta': self.meta}
# Next traverse, breadth first, all data objects
for parent, child in nx.bfs_edges(g, self._cardinal):
if child in kwargs:
continue
typ = g.edge_types[(parent, child)]
if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'):
kwargs[child] = self[child].slice_cardinal(key)
elif typ == 'index-index':
# Select from the child on the parent's index (the parent is
# in the kwargs already).
kwargs[child] = self[child].ix[kwargs[parent].index.values]
elif typ == 'index-column':
# Select from the child where the column (of the same name as
# the parent) is in the parent's index values
cdf = self[child]
kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)]
elif typ == 'column-index':
# Select from the child where the child's index is in the
# column of the parent. Note that this relationship
cdf = self[child]
cin = cdf.index.name
cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())]
index = kwargs[parent][cols].stack().astype(np.int64).values
kwargs[child] = cdf[cdf.index.isin(index)]
return cls(**kwargs)
def cardinal_groupby(self):
"""
Create an instance of this class for every step in the cardinal dimension.
"""
if self._cardinal:
g = self.network(fig=False)
cardinal_indexes = self[self._cardinal].index.values
selfs = {}
cls = self.__class__
for cardinal_index in cardinal_indexes:
kwargs = {self._cardinal: self[self._cardinal].ix[[cardinal_index]]}
for parent, child in nx.bfs_edges(g):
if child in kwargs:
continue
typ = g.edge_types[(parent, child)]
if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'):
kwargs[child] = self[child].slice_cardinal(key)
elif typ == 'index-index':
# Select from the child on the parent's index (the parent is
# in the kwargs already).
kwargs[child] = self[child].ix[kwargs[parent].index.values]
elif typ == 'index-column':
# Select from the child where the column (of the same name as
# the parent) is in the parent's index values
cdf = self[child]
kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)]
elif typ == 'column-index':
# Select from the child where the child's index is in the
# column of the parent. Note that this relationship
cdf = self[child]
cin = cdf.index.name
cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())]
index = kwargs[parent][cols].stack().astype(np.int64).values
kwargs[child] = cdf[cdf.index.isin(index)]
selfs[cardinal_index] = cls(**kwargs)
return selfs
def info(self):
"""
Display information about the container's data objects (note that info
on metadata and visualization objects is also provided).
Note:
Sizes are reported in bytes.
"""
names = []
types = []
sizes = []
names.append('WIDGET')
types.append('-')
s = 0
sizes.append(s)
names.append('METADATA')
types.append('-')
s = 0
for obj in self._rel().values():
s += getsizeof(obj)
sizes.append(s)
for name, obj in self._data().items():
names.append(name[1:] if name.startswith('_') else name)
types.append('.'.join((obj.__module__, obj.__class__.__name__)))
if isinstance(obj, pd.Series):
sizes.append(obj.memory_usage())
else:
sizes.append(obj.memory_usage().sum())
inf = pd.DataFrame.from_dict({'object': names, 'type': types, 'size': sizes})
inf.set_index('object', inplace=True)
return inf.sort_index()
def memory_usage(self, string=False):
"""
Get the memory usage estimate of the container.
Args:
string (bool): Human readable string (default false)
See Also:
:func:`~exa.core.container.Container.info`
"""
if string:
n = getsizeof(self)
return ' '.join((str(s) for s in convert_bytes(n)))
return self.info()['size']
def network(self, figsize=(14, 9), fig=True):
"""
Display information about the container's object relationships.
Nodes correspond to data objects. The size of the node corresponds
to the size of the table in memory. The color of the node corresponds
to its fundamental data type. Nodes are labeled by their container
name; class information is listed below. The color of the connections
correspond to the type of relationship; either an index of one table
corresponds to a column in another table or the two tables share an
index.
Args:
figsize (tuple): Tuple containing figure dimensions
fig (bool): Generate the figure (default true)
Returns:
graph: Network graph object containing data relationships
"""
conn_types = ['index-index', 'index-column']
conn_colors = mpl.sns.color_palette('viridis', len(conn_types))
conn = dict(zip(conn_types, conn_colors))
def get_node_type_color(obj):
"""Gets the color of a node based on the node's (sub)type."""
cols = mpl.sns.color_palette('viridis', len(conn_types))
for col in cols:
if isinstance(obj, (pd.DataFrame, pd.Series, pd.SparseSeries, pd.SparseDataFrame)):
typ = type(obj)
return '.'.join((typ.__module__, typ.__name__)), col
return 'other', 'gray'
def legend(items, name, loc, ax):
"""Legend creation helper function."""
proxies = []
descriptions = []
for label, color in items:
if label == 'column-index':
continue
if name == 'Data Type':
line = mpl.sns.mpl.lines.Line2D([], [], linestyle='none', color=color, marker='o')
else:
line = mpl.sns.mpl.lines.Line2D([], [], linestyle='-', color=color)
proxies.append(line)
descriptions.append(label)
lgnd = ax.legend(proxies, descriptions, title=name, loc=loc, frameon=True)
lgnd_frame = lgnd.get_frame()
lgnd_frame.set_facecolor('white')
lgnd_frame.set_edgecolor('black')
return lgnd, ax
info = self.info()
info = info[info['type'] != '-']
info['size'] *= 13000/info['size'].max()
info['size'] += 2000
node_size_dict = info['size'].to_dict() # Can pull all nodes from keys
node_class_name_dict = info['type'].to_dict()
node_type_dict = {} # Values are tuple of "underlying" type and color
node_conn_dict = {} # Values are tuple of connection type and color
items = self._data().items()
for k0, v0 in items:
n0 = k0[1:] if k0.startswith('_') else k0
node_type_dict[n0] = get_node_type_color(v0)
for k1, v1 in items:
if v0 is v1:
continue
n1 = k1[1:] if k1.startswith('_') else k1
for name in v0.index.names: # Check the index of data object 0 against the index
if name is None: # and columns of data object 1
continue
if name in v1.index.names:
contyp = 'index-index'
node_conn_dict[(n0, n1)] = (contyp, conn[contyp])
node_conn_dict[(n1, n0)] = (contyp, conn[contyp])
for col in v1.columns:
# Catches index "atom", column "atom1"; does not catch atom10
if name == col or (name == col[:-1] and col[-1].isdigit()):
contyp = 'index-column'
node_conn_dict[(n0, n1)] = (contyp, conn[contyp])
node_conn_dict[(n1, n0)] = ('column-index', conn[contyp])
g = nx.Graph()
g.add_nodes_from(node_size_dict.keys())
g.add_edges_from(node_conn_dict.keys())
node_sizes = [node_size_dict[node] for node in g.nodes()]
node_labels = {node: ' {}\n({})'.format(node, node_class_name_dict[node]) for node in g.nodes()}
node_colors = [node_type_dict[node][1] for node in g.nodes()]
edge_colors = [node_conn_dict[edge][1] for edge in g.edges()]
# Build the figure and legends
if fig:
fig, ax = plt.subplots(1, figsize=figsize)
ax.axis('off')
pos = nx.spring_layout(g)
nx.draw_networkx_nodes(g, pos=pos, ax=ax, alpha=0.7, node_size=node_sizes,
node_color=node_colors)
nx.draw_networkx_labels(g, pos=pos, labels=node_labels, font_size=17,
font_weight='bold', ax=ax)
nx.draw_networkx_edges(g, pos=pos, edge_color=edge_colors, width=2, ax=ax)
l1, ax = legend(set(node_conn_dict.values()), 'Connection', (1, 0), ax)
_, ax = legend(set(node_type_dict.values()), 'Data Type', (1, 0.3), ax)
fig.gca().add_artist(l1)
g.edge_types = {node: value[0] for node, value in node_conn_dict.items()} # Attached connection information to network graph
return g
def save(self, path=None, complevel=1, complib='zlib'):
"""
Save the container as an HDF5 archive.
Args:
path (str): Path where to save the container
"""
if path is None:
path = self.hexuid + '.hdf5'
elif os.path.isdir(path):
path += os.sep + self.hexuid + '.hdf5'
elif not (path.endswith('.hdf5') or path.endswith('.hdf')):
raise ValueError('File path must have a ".hdf5" or ".hdf" extension.')
with pd.HDFStore(path, 'w', complevel=complevel, complib=complib) as store:
store['kwargs'] = pd.Series()
store.get_storer('kwargs').attrs.metadata = self._rel()
fc = 0 # Field counter (see special handling of fields below)
for name, data in self._data().items():
if hasattr(data, '_revert_categories'):
data._revert_categories()
name = name[1:] if name.startswith('_') else name
if isinstance(data, Field): # Fields are handled separately
fname = 'FIELD{}_'.format(fc) + name + '/'
store[fname + 'data'] = pd.DataFrame(data)
for i, field in enumerate(data.field_values):
ffname = fname + 'values' + str(i)
if isinstance(field, pd.Series):
store[ffname] = pd.Series(field)
else:
store[ffname] = pd.DataFrame(field)
fc += 1
elif isinstance(data, Series):
s = pd.Series(data)
if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):
s = s.astype('O')
store[name] = s
elif isinstance(data, DataFrame):
store[name] = pd.DataFrame(data)
elif isinstance(data, SparseSeries):
s = pd.SparseSeries(data)
if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):
s = s.astype('O')
store[name] = s
elif isinstance(data, SparseDataFrame):
store[name] = pd.SparseDataFrame(data)
else:
if hasattr(data, 'dtype') and isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):
data = data.astype('O')
else:
for col in data:
if isinstance(data[col].dtype, pd.types.dtypes.CategoricalDtype):
data[col] = data[col].astype('O')
store[name] = data
if hasattr(data, '_set_categories'):
data._set_categories()
def to_hdf(self, *args, **kwargs):
"""Alias of :func:`~exa.core.container.Container`."""
self.save(*args, **kwargs)
@classmethod
def load(cls, pkid_or_path=None):
"""
Load a container object from a persistent location or file path.
Args:
pkid_or_path: Integer pkid corresponding to the container table or file path
Returns:
container: The saved container object
"""
path = pkid_or_path
if isinstance(path, (int, np.int32, np.int64)):
raise NotImplementedError('Lookup via CMS not implemented.')
elif not os.path.isfile(path):
raise FileNotFoundError('File {} not found.'.format(path))
kwargs = {}
fields = defaultdict(dict)
with pd.HDFStore(path) as store:
for key in store.keys():
if 'kwargs' in key:
kwargs.update(store.get_storer(key).attrs.metadata)
elif "FIELD" in key:
name, dname = "_".join(key.split("_")[1:]).split("/")
dname = dname.replace('values', '')
fields[name][dname] = store[key]
else:
name = str(key[1:])
kwargs[name] = store[key]
for name, field_data in fields.items():
fps = field_data.pop('data')
kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in
sorted(map(int, field_data.keys()))])
return cls(**kwargs)
@classmethod
def from_hdf(cls, *args, **kwargs):
"""Alias for :func:`~exa.core.container.Container`."""
return cls.load(*args, **kwargs)
def _rel(self, copy=False):
"""
Get descriptive kwargs of the container (e.g. name, description, meta).
"""
rel = {}
for key, obj in vars(self).items():
if not isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)) and not key.startswith('_'):
if copy and 'id' not in key:
rel[key] = deepcopy(obj)
else:
rel[key] = obj
return rel
def _data(self, copy=False):
"""
Get data kwargs of the container (i.e. dataframe and series objects).
"""
data = {}
for key, obj in vars(self).items():
if isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)):
if copy:
data[key] = obj.copy(deep=True)
else:
data[key] = obj
return data
def __delitem__(self, key):
if key in vars(self):
del self.__dict__[key]
def __sizeof__(self):
"""Note that this function must return a Python integer."""
return int(self.info()['size'].sum())
def __getitem__(self, key):
if isinstance(key, str):
return getattr(self, key)
elif isinstance(key, (int, slice, list)) and self._cardinal is None:
return self.slice_naive(key)
elif isinstance(key, (int, slice, list)) and self._cardinal is not None:
return self.slice_cardinal(key)
raise KeyError()
def __init__(self, name=None, description=None, meta=None, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.name = name
self.description = description
self.meta = meta
class TypedMeta(type):
"""
This metaclass creates statically typed class attributes using the property
framework.
.. code-block:: Python
class TestMeta(TypedMeta):
attr1 = (int, float)
attr2 = DataFrame
class TestClass(metaclass=TestMeta):
def __init__(self, attr1, attr2):
self.attr1 = attr1
self.attr2 = attr2
The above code dynamically creates code that looks like the following:
.. code-block:: Python
class TestClass:
@property
def attr1(self):
return self._attr1
@attr1.setter
def attr1(self, obj):
if not isinstance(obj, (int, float)):
raise TypeError('attr1 must be int')
self._attr1 = obj
@attr1.deleter
def attr1(self):
del self._attr1
@property
def attr2(self):
return self._attr2
@attr2.setter
def attr2(self, obj):
if not isinstance(obj, DataFrame):
raise TypeError('attr2 must be DataFrame')
self._attr2 = obj
@attr2.deleter
def attr2(self):
del self._attr2
def __init__(self, attr1, attr2):
self.attr1 = attr1
self.attr2 = attr2
"""
@staticmethod
def create_property(name, ptype):
"""
Creates a custom property with a getter that performs computing
functionality (if available) and raise a type error if setting
with the wrong type.
Note:
By default, the setter attempts to convert the object to the
correct type; a type error is raised if this fails.
"""
pname = '_' + name
def getter(self):
# This will be where the data is store (e.g. self._name)
# This is the default property "getter" for container data objects.
# If the property value is None, this function will check for a
# convenience method with the signature, self.compute_name() and call
# it prior to returning the property value.
if not hasattr(self, pname) and hasattr(self, '{}{}'.format(self._getter_prefix, pname)):
self['{}{}'.format(self._getter_prefix, pname)]()
if not hasattr(self, pname):
raise AttributeError('Please compute or set {} first.'.format(name))
return getattr(self, pname)
def setter(self, obj):
# This is the default property "setter" for container data objects.
# Prior to setting a property value, this function checks that the
# object's type is correct.
if not isinstance(obj, ptype):
try:
obj = ptype(obj)
except Exception:
raise TypeError('Must be able to convert object {0} to {1} (or must be of type {1})'.format(name, ptype))
setattr(self, pname, obj)
def deleter(self):
# Deletes the property's value.
del self[pname]
return property(getter, setter, deleter)
def __new__(mcs, name, bases, clsdict):
"""
Modification of the class definition occurs here; we iterate over all
statically typed attributes and attach their property (see
:func:`~exa.container.TypedMeta.create_property`) definition, returning
the new class definition.
"""
for k, v in vars(mcs).items():
if isinstance(v, type) and not k.startswith('_'):
clsdict[k] = mcs.create_property(k, v)
return super(TypedMeta, mcs).__new__(mcs, name, bases, clsdict)
|
tjduigna/exa
|
exa/core/container.py
|
Python
|
apache-2.0
| 24,987 | 0.002441 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPolicyDefinitionsOperations:
"""ServiceEndpointPolicyDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified ServiceEndpoint policy definitions.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the Service Endpoint Policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> "_models.ServiceEndpointPolicyDefinition":
"""Get the specified service endpoint policy definitions from service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ServiceEndpointPolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
service_endpoint_policy_definitions: "_models.ServiceEndpointPolicyDefinition",
**kwargs: Any
) -> "_models.ServiceEndpointPolicyDefinition":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_endpoint_policy_definitions, 'ServiceEndpointPolicyDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
service_endpoint_policy_definitions: "_models.ServiceEndpointPolicyDefinition",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicyDefinition"]:
"""Creates or updates a service endpoint policy definition in the specified service endpoint
policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:param service_endpoint_policy_definitions: Parameters supplied to the create or update service
endpoint policy operation.
:type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_07_01.models.ServiceEndpointPolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.ServiceEndpointPolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
service_endpoint_policy_definitions=service_endpoint_policy_definitions,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyDefinitionListResult"]:
"""Gets all service endpoint policy definitions in a service end point policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.ServiceEndpointPolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_service_endpoint_policy_definitions_operations.py
|
Python
|
mit
| 24,059 | 0.005362 |
from contextlib import ContextDecorator
from unittest import mock
import httpx
import pytest
from util.working_directory import working_directory
from .http import pull_http
class MockedHttpxStreamResponse(ContextDecorator):
"""
VCR does not like recording HTTPX stream requests so mock it.
"""
def __init__(self, method, url, **kwargs):
self.response = httpx.get(url)
def __getattr__(self, attr):
return getattr(self.response, attr)
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return self
@pytest.mark.vcr
@mock.patch("httpx.stream", MockedHttpxStreamResponse)
def test_extension_from_mimetype(tempdir):
with working_directory(tempdir.path):
files = pull_http({"url": "https://httpbin.org/get"})
assert files["get.json"]["mimetype"] == "application/json"
files = pull_http({"url": "https://httpbin.org/image/png"}, path="image")
assert files["image.png"]["mimetype"] == "image/png"
files = pull_http({"url": "https://httpbin.org/html"}, path="content")
assert files["content.html"]["mimetype"] == "text/html"
files = pull_http({"url": "https://httpbin.org/html"}, path="foo.bar")
assert files["foo.bar"]["mimetype"] is None
# For some reason the status code does not work with VCR record
def test_status_codes(tempdir):
with pytest.raises(RuntimeError) as excinfo:
pull_http({"url": "https://httpbin.org/status/404"})
assert "Error when fetching https://httpbin.org/status/404: 404" in str(
excinfo.value
)
|
stencila/hub
|
worker/jobs/pull/http_test.py
|
Python
|
apache-2.0
| 1,623 | 0.000616 |
#!/usr/bin/env python3
# https://docs.python.org/3/library/modulefinder.html
from modulefinder import ModuleFinder
finder = ModuleFinder()
finder.run_script('graph1.py')
print('Loaded modules:')
for name, mod in finder.modules.items():
print('%s: ' % name, end='')
print(','.join(list(mod.globalnames.keys())[:3]))
print('-'*50)
print('Modules not imported:')
print('\n'.join(finder.badmodules.keys()))
|
jtraver/dev
|
python3/graphics/modulefinder1.py
|
Python
|
mit
| 416 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 Camptocamp SA (http://www.camptocamp.com)
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
import time
class AccountStatementFromInvoiceLines(orm.TransientModel):
_inherit = "account.statement.from.invoice.lines"
def populate_statement(self, cr, uid, ids, context=None):
"""Taken from account voucher as no hook is available. No function
no refactoring, just trimming the part that generates voucher"""
if context is None:
context = {}
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool['account.move.line']
statement_obj = self.pool['account.bank.statement']
statement_line_obj = self.pool['account.bank.statement.line']
currency_obj = self.pool['res.currency']
line_date = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
statement = statement_obj.browse(
cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
amount = currency_obj.compute(
cr, uid, line.currency_id.id, statement.currency.id,
line.amount_currency, context=ctx)
elif (line.invoice and
line.invoice.currency_id.id != statement.currency.id):
amount = currency_obj.compute(
cr, uid, line.invoice.currency_id.id, statement.currency.id,
amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
s_type = 'general'
if line.journal_id.type in ('sale', 'sale_refund'):
s_type = 'customer'
elif line.journal_id.type in ('purchase', 'purhcase_refund'):
s_type = 'supplier'
vals = self._prepare_statement_line_vals(
cr, uid, line, s_type, statement_id, amount, context=context)
statement_line_obj.create(cr, uid, vals, context=context)
return {'type': 'ir.actions.act_window_close'}
def _prepare_statement_line_vals(self, cr, uid, move_line, s_type,
statement_id, amount, context=None):
return {'name': move_line.name or '?',
'amount': amount,
'type': s_type,
'partner_id': move_line.partner_id.id,
'account_id': move_line.account_id.id,
'statement_id': statement_id,
'ref': move_line.ref,
'voucher_id': False,
'date': time.strftime('%Y-%m-%d'),
}
class AccountPaymentPopulateStatement(orm.TransientModel):
_inherit = "account.payment.populate.statement"
def populate_statement(self, cr, uid, ids, context=None):
"""Taken from payment addon as no hook is vailable. No function
no refactoring, just trimming the part that generates voucher"""
line_obj = self.pool['payment.line']
statement_obj = self.pool['account.bank.statement']
statement_line_obj = self.pool['account.bank.statement.line']
currency_obj = self.pool['res.currency']
if context is None:
context = {}
data = self.read(cr, uid, ids, [], context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(
cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# Last value_date earlier,but this field exists no more now
ctx['date'] = line.ml_maturity_date
amount = currency_obj.compute(
cr, uid, line.currency.id, statement.currency.id,
line.amount_currency, context=ctx)
if not line.move_line_id.id:
continue
context.update({'move_line_ids': [line.move_line_id.id]})
vals = self._prepare_statement_line_vals(
cr, uid, line, -amount, statement, context=context)
st_line_id = statement_line_obj.create(cr, uid, vals,
context=context)
line_obj.write(
cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
def _prepare_statement_line_vals(self, cr, uid, payment_line, amount,
statement, context=None):
return {
'name': payment_line.order_id.reference or '?',
'amount': amount,
'type': 'supplier',
'partner_id': payment_line.partner_id.id,
'account_id': payment_line.move_line_id.account_id.id,
'statement_id': statement.id,
'ref': payment_line.communication,
'date': (payment_line.date or payment_line.ml_maturity_date or
statement.date)
}
|
akretion/bank-statement-reconcile
|
__unported__/statement_voucher_killer/voucher.py
|
Python
|
agpl-3.0
| 6,659 | 0.00015 |
from pygame.sprite import DirtySprite
from pygame import draw
class BaseWidget(DirtySprite):
"""clase base para todos los widgets"""
focusable = True
# si no es focusable, no se le llaman focusin y focusout
# (por ejemplo, un contenedor, una etiqueta de texto)
hasFocus = False
# indica si el widget está en foco o no.
enabled = True
# un widget con enabled==False no recibe ningun evento
nombre = ''
# identifica al widget en el renderer
hasMouseOver = False
# indica si el widget tuvo el mouse encima o no, por el onMouseOut
opciones = None
# las opciones con las que se inicializo
setFocus_onIn = False
# if True: Renderer.setFocus se dispara onMouseIn también.
KeyCombination = ''
layer = 0
rect = None
x, y = 0, 0
def __init__(self, parent=None, **opciones):
if parent is not None:
self.parent = parent
self.layer = self.parent.layer + 1
self.opciones = opciones
super().__init__()
def on_focus_in(self):
self.hasFocus = True
def on_focus_out(self):
self.hasFocus = False
def on_mouse_down(self, mousedata):
pass
def on_mouse_up(self, mousedata):
pass
def on_mouse_over(self):
pass
def on_mouse_in(self):
self.hasMouseOver = True
def on_mouse_out(self):
self.hasMouseOver = False
def on_key_down(self, keydata):
pass
def on_key_up(self, keydata):
pass
def on_destruction(self):
# esta funcion se llama cuando el widget es quitado del renderer.
pass
@staticmethod
def _biselar(imagen, color_luz, color_sombra):
w, h = imagen.get_size()
draw.line(imagen, color_sombra, (0, h - 2), (w - 1, h - 2), 2)
draw.line(imagen, color_sombra, (w - 2, h - 2), (w - 2, 0), 2)
draw.lines(imagen, color_luz, 0, [(w - 2, 0), (0, 0), (0, h - 4)], 2)
return imagen
def reubicar_en_ventana(self, dx=0, dy=0):
self.rect.move_ip(dx, dy)
self.x += dx
self.y += dy
self.dirty = 1
def __repr__(self):
return self.nombre
def is_visible(self):
return self._visible
|
zenieldanaku/DyDCreature_Editor
|
azoe/widgets/basewidget.py
|
Python
|
mit
| 2,314 | 0 |
from django.conf import settings
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello")
|
rochacbruno/dynaconf
|
example/django_pure/polls/views.py
|
Python
|
mit
| 125 | 0 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import os
import time
import pytest
from django.core.urlresolvers import reverse
from shuup.testing.browser_utils import (
click_element, wait_until_appeared, wait_until_condition
)
from shuup.testing.factories import (
create_order_with_product, get_default_product, get_default_shop,
get_default_supplier
)
from shuup.testing.utils import initialize_admin_browser_test
from shuup.utils.i18n import format_money
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
@pytest.mark.browser
@pytest.mark.djangodb
def test_refunds(browser, admin_user, live_server, settings):
order = create_order_with_product(
get_default_product(), get_default_supplier(), 10, decimal.Decimal("10"), n_lines=10,
shop=get_default_shop())
order2 = create_order_with_product(
get_default_product(), get_default_supplier(), 10, decimal.Decimal("10"), n_lines=10,
shop=get_default_shop())
order2.create_payment(order2.taxful_total_price)
initialize_admin_browser_test(browser, live_server, settings)
_test_toolbar_visibility(browser, live_server, order)
_test_create_full_refund(browser, live_server, order)
_test_refund_view(browser, live_server, order2)
def _check_create_refund_link(browser, order, present):
url = reverse("shuup_admin:order.create-refund", kwargs={"pk": order.pk})
wait_until_condition(browser, lambda x: (len(x.find_by_css("a[href='%s']" % url)) > 0) == present)
def _test_toolbar_visibility(browser, live_server, order):
url = reverse("shuup_admin:order.detail", kwargs={"pk": order.pk})
browser.visit("%s%s" % (live_server, url))
wait_until_appeared(browser, "#order_details")
_check_create_refund_link(browser, order, False)
order.create_payment(order.taxful_total_price)
browser.reload()
wait_until_appeared(browser, "#order_details")
_check_create_refund_link(browser, order, True)
def _test_create_full_refund(browser, live_server, order):
url = reverse("shuup_admin:order.create-refund", kwargs={"pk": order.pk})
browser.visit("%s%s" % (live_server, url))
wait_until_condition(browser, lambda x: x.is_text_present("Refunded: %s" % format_money(order.shop.create_price("0.00"))))
wait_until_condition(browser, lambda x: x.is_text_present("Remaining: %s" % format_money(order.taxful_total_price)))
url = reverse("shuup_admin:order.create-full-refund", kwargs={"pk": order.pk})
click_element(browser, "a[href='%s']" % url)
wait_until_condition(browser, lambda x: x.is_text_present("Refund Amount: %s" % format_money(order.taxful_total_price)))
click_element(browser, "#create-full-refund")
wait_until_appeared(browser, "#order_details")
_check_create_refund_link(browser, order, False)
order.refresh_from_db()
assert not order.taxful_total_price
assert order.is_paid()
assert order.is_fully_shipped()
def _test_refund_view(browser, live_server, order):
url = reverse("shuup_admin:order.create-refund", kwargs={"pk": order.pk})
browser.visit("%s%s" % (live_server, url))
wait_until_condition(browser, lambda x: x.is_text_present("Refunded: %s" % format_money(order.shop.create_price("0.00"))))
assert len(browser.find_by_css("#id_form-0-line_number option")) == 12 # blank + arbitrary amount + num lines
click_element(browser, "#select2-id_form-0-line_number-container")
wait_until_appeared(browser, "input.select2-search__field")
browser.execute_script('$($(".select2-results__option")[1]).trigger({type: "mouseup"})') # select arbitrary amount
wait_until_condition(browser, lambda x: len(x.find_by_css("#id_form-0-text")))
wait_until_condition(browser, lambda x: len(x.find_by_css("#id_form-0-amount")))
browser.find_by_css("#id_form-0-text").first.value = "test"
browser.find_by_css("#id_form-0-amount").first.value = "900"
click_element(browser, "#add-refund")
click_element(browser, "#select2-id_form-1-line_number-container")
wait_until_appeared(browser, "input.select2-search__field")
browser.execute_script('$($(".select2-results__option")[2]).trigger({type: "mouseup"})') # select first line
browser.find_by_css("#id_form-1-amount").first.value == "100"
browser.find_by_css("#id_form-1-quantity").first.value == "10"
click_element(browser, "button[form='create_refund']")
_check_create_refund_link(browser, order, True) # can still refund quantity
order.refresh_from_db()
assert not order.taxful_total_price
assert order.is_paid()
assert not order.is_fully_shipped()
|
suutari/shoop
|
shuup_tests/browser/admin/test_refunds.py
|
Python
|
agpl-3.0
| 4,879 | 0.003689 |
"""Source code used for the talk:
http://www.slideshare.net/MarcGarcia11/cart-not-only-classification-and-regression-trees
"""
# data
import pandas as pd
data = {'age': [38, 49, 27, 19, 54, 29, 19, 42, 34, 64,
19, 62, 27, 77, 55, 41, 56, 32, 59, 35],
'distance': [6169.98, 7598.87, 3276.07, 1570.43, 951.76, 139.97, 4476.89,
8958.77, 1336.44, 6138.85, 2298.68, 1167.92, 676.30, 736.85,
1326.52, 712.13, 3083.07, 1382.64, 2267.55, 2844.18],
'attended': [False, False, False, True, True, True, False, True, True, True,
False, True, True, True, False, True, True, True, True, False]}
df = pd.DataFrame(data)
# base_plot
from bokeh.plotting import figure, show
def base_plot(df):
p = figure(title='Event attendance',
plot_width=900,
plot_height=400)
p.xaxis.axis_label = 'Distance'
p.yaxis.axis_label = 'Age'
p.circle(df[df.attended]['distance'],
df[df.attended]['age'],
color='red',
legend='Attended',
fill_alpha=0.2,
size=10)
p.circle(df[~df.attended]['distance'],
df[~df.attended]['age'],
color='blue',
legend="Didn't attend",
fill_alpha=0.2,
size=10)
return p
_ = show(base_plot())
# tree_to_nodes
from collections import namedtuple
from itertools import starmap
def tree_to_nodes(dtree):
nodes = starmap(namedtuple('Node', 'feature,threshold,left,right'),
zip(map(lambda x: {0: 'age', 1: 'distance'}.get(x),
dtree.tree_.feature),
dtree.tree_.threshold,
dtree.tree_.children_left,
dtree.tree_.children_right))
return list(nodes)
# cart_plot
from collections import namedtuple, deque
from functools import partial
class NodeRanges(namedtuple('NodeRanges', 'node,max_x,min_x,max_y,min_y')):
pass
def cart_plot(df, dtree, nodes, limit=None):
nodes = tree_to_nodes(dtree)
plot = base_plot()
add_line = partial(plot.line, line_color='black', line_width=2)
stack = deque()
stack.append(NodeRanges(node=nodes[0],
max_x=df['distance'].max(),
min_x=df['distance'].min(),
max_y=df['age'].max(),
min_y=df['age'].min()))
count = 1
while len(stack):
node, max_x, min_x, max_y, min_y = stack.pop()
feature, threshold, left, right = node
if feature == 'distance':
add_line(x=[threshold, threshold],
y=[min_y, max_y])
elif feature == 'age':
add_line(x=[min_x, max_x],
y=[threshold, threshold])
else:
continue
stack.append(NodeRanges(node=nodes[left],
max_x=threshold if feature == 'distance' else max_x,
min_x=min_x,
max_y=threshold if feature == 'age' else max_y,
min_y=min_y))
stack.append(NodeRanges(node=nodes[right],
max_x=max_x,
min_x=threshold if feature == 'distance' else min_x,
max_y=max_y,
min_y=threshold if feature == 'age' else min_y))
if limit is not None and count >= limit:
break
else:
count += 1
show(plot)
# decision_tree_model
def decision_tree_model(age, distance):
if distance >= 2283.11:
if age >= 40.00:
if distance >= 6868.86:
if distance >= 8278.82:
return True
else:
return False
else:
return True
else:
return False
else:
if age >= 54.50:
if age >= 57.00:
return True
else:
return False
else:
return True
# entropy
import math
def entropy(a, b):
total = a + b
prob_a = a / total
prob_b = b / total
return - prob_a * math.log(prob_a, 2) - prob_b * math.log(prob_b, 2)
# get_best_split
def get_best_split(x, y):
best_split = None
best_entropy = 1.
for feature in x.columns.values:
column = x[feature]
for value in column.iterrows():
a = y[column < value] == class_a_value
b = y[column < value] == class_b_value
left_weight = (a + b) / len(y.index)
left_entropy = entropy(a, b)
a = y[column >= value] == class_a_value
b = y[column >= value] == class_b_value
right_weight = (a + b) / len(y.index)
right_entropy = entropy(a, b)
split_entropy = left_weight * left_entropy + right_weight * right_entropy
if split_entropy < best_entropy:
best_split = (feature, value)
best_entropy = split_entropy
return best_split
# train_decision_tree
def train_decision_tree(x, y):
feature, value = get_best_split(x, y)
x_left, y_left = x[x[feature] < value], y[x[feature] < value]
if len(y_left.unique()) > 1:
left_node = train_decision_tree(x_left, y_left)
else:
left_node = None
x_right, y_right = x[x[feature] >= value], y[x[feature] >= value]
if len(y_right.unique()) > 1:
right_node = train_decision_tree(x_right, y_right)
else:
right_node = None
return (feature, value, left_node, right_node)
|
datapythonista/datapythonista.github.io
|
docs/cart_talk.py
|
Python
|
apache-2.0
| 5,718 | 0.003498 |
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
import formatter
import io
import sys
import time
import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.output import xtermTitle
from _emerge.getloadavg import getloadavg
if sys.hexversion >= 0x3000000:
basestring = str
class JobStatusDisplay(object):
_bound_properties = ("curval", "failed", "running")
# Don't update the display unless at least this much
# time has passed, in units of seconds.
_min_display_latency = 2
_default_term_codes = {
'cr' : '\r',
'el' : '\x1b[K',
'nel' : '\n',
}
_termcap_name_map = {
'carriage_return' : 'cr',
'clr_eol' : 'el',
'newline' : 'nel',
}
def __init__(self, quiet=False, xterm_titles=True):
object.__setattr__(self, "quiet", quiet)
object.__setattr__(self, "xterm_titles", xterm_titles)
object.__setattr__(self, "maxval", 0)
object.__setattr__(self, "merges", 0)
object.__setattr__(self, "_changed", False)
object.__setattr__(self, "_displayed", False)
object.__setattr__(self, "_last_display_time", 0)
self.reset()
isatty = os.environ.get('TERM') != 'dumb' and \
hasattr(self.out, 'isatty') and \
self.out.isatty()
object.__setattr__(self, "_isatty", isatty)
if not isatty or not self._init_term():
term_codes = {}
for k, capname in self._termcap_name_map.items():
term_codes[k] = self._default_term_codes[capname]
object.__setattr__(self, "_term_codes", term_codes)
encoding = sys.getdefaultencoding()
for k, v in self._term_codes.items():
if not isinstance(v, basestring):
self._term_codes[k] = v.decode(encoding, 'replace')
if self._isatty:
width = portage.output.get_term_size()[1]
else:
width = 80
self._set_width(width)
def _set_width(self, width):
if width == getattr(self, 'width', None):
return
if width <= 0 or width > 80:
width = 80
object.__setattr__(self, "width", width)
object.__setattr__(self, "_jobs_column_width", width - 32)
@property
def out(self):
"""Use a lazy reference to sys.stdout, in case the API consumer has
temporarily overridden stdout."""
return sys.stdout
def _write(self, s):
# avoid potential UnicodeEncodeError
s = _unicode_encode(s,
encoding=_encodings['stdio'], errors='backslashreplace')
out = self.out
if sys.hexversion >= 0x3000000:
out = out.buffer
out.write(s)
out.flush()
def _init_term(self):
"""
Initialize term control codes.
@rtype: bool
@return: True if term codes were successfully initialized,
False otherwise.
"""
term_type = os.environ.get("TERM", "").strip()
if not term_type:
return False
tigetstr = None
try:
import curses
try:
curses.setupterm(term_type, self.out.fileno())
tigetstr = curses.tigetstr
except curses.error:
pass
except ImportError:
pass
if tigetstr is None:
return False
term_codes = {}
for k, capname in self._termcap_name_map.items():
# Use _native_string for PyPy compat (bug #470258).
code = tigetstr(portage._native_string(capname))
if code is None:
code = self._default_term_codes[capname]
term_codes[k] = code
object.__setattr__(self, "_term_codes", term_codes)
return True
def _format_msg(self, msg):
return ">>> %s" % msg
def _erase(self):
self._write(
self._term_codes['carriage_return'] + \
self._term_codes['clr_eol'])
self._displayed = False
def _display(self, line):
self._write(line)
self._displayed = True
def _update(self, msg):
if not self._isatty:
self._write(self._format_msg(msg) + self._term_codes['newline'])
self._displayed = True
return
if self._displayed:
self._erase()
self._display(self._format_msg(msg))
def displayMessage(self, msg):
was_displayed = self._displayed
if self._isatty and self._displayed:
self._erase()
self._write(self._format_msg(msg) + self._term_codes['newline'])
self._displayed = False
if was_displayed:
self._changed = True
self.display()
def reset(self):
self.maxval = 0
self.merges = 0
for name in self._bound_properties:
object.__setattr__(self, name, 0)
if self._displayed:
self._write(self._term_codes['newline'])
self._displayed = False
def __setattr__(self, name, value):
old_value = getattr(self, name)
if value == old_value:
return
object.__setattr__(self, name, value)
if name in self._bound_properties:
self._property_change(name, old_value, value)
def _property_change(self, name, old_value, new_value):
self._changed = True
self.display()
def _load_avg_str(self):
try:
avg = getloadavg()
except OSError:
return 'unknown'
max_avg = max(avg)
if max_avg < 10:
digits = 2
elif max_avg < 100:
digits = 1
else:
digits = 0
return ", ".join(("%%.%df" % digits ) % x for x in avg)
def display(self):
"""
Display status on stdout, but only if something has
changed since the last call. This always returns True,
for continuous scheduling via timeout_add.
"""
if self.quiet:
return True
current_time = time.time()
time_delta = current_time - self._last_display_time
if self._displayed and \
not self._changed:
if not self._isatty:
return True
if time_delta < self._min_display_latency:
return True
self._last_display_time = current_time
self._changed = False
self._display_status()
return True
def _display_status(self):
# Don't use len(self._completed_tasks) here since that also
# can include uninstall tasks.
curval_str = "%s" % (self.curval,)
maxval_str = "%s" % (self.maxval,)
running_str = "%s" % (self.running,)
failed_str = "%s" % (self.failed,)
load_avg_str = self._load_avg_str()
color_output = io.StringIO()
plain_output = io.StringIO()
style_file = portage.output.ConsoleStyleFile(color_output)
style_file.write_listener = plain_output
style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
style_writer.style_listener = style_file.new_styles
f = formatter.AbstractFormatter(style_writer)
number_style = "INFORM"
f.add_literal_data("Jobs: ")
f.push_style(number_style)
f.add_literal_data(curval_str)
f.pop_style()
f.add_literal_data(" of ")
f.push_style(number_style)
f.add_literal_data(maxval_str)
f.pop_style()
f.add_literal_data(" complete")
if self.running:
f.add_literal_data(", ")
f.push_style(number_style)
f.add_literal_data(running_str)
f.pop_style()
f.add_literal_data(" running")
if self.failed:
f.add_literal_data(", ")
f.push_style(number_style)
f.add_literal_data(failed_str)
f.pop_style()
f.add_literal_data(" failed")
padding = self._jobs_column_width - len(plain_output.getvalue())
if padding > 0:
f.add_literal_data(padding * " ")
f.add_literal_data("Load avg: ")
f.add_literal_data(load_avg_str)
# Truncate to fit width, to avoid making the terminal scroll if the
# line overflows (happens when the load average is large).
plain_output = plain_output.getvalue()
if self._isatty and len(plain_output) > self.width:
# Use plain_output here since it's easier to truncate
# properly than the color output which contains console
# color codes.
self._update(plain_output[:self.width])
else:
self._update(color_output.getvalue())
if self.xterm_titles:
# If the HOSTNAME variable is exported, include it
# in the xterm title, just like emergelog() does.
# See bug #390699.
title_str = " ".join(plain_output.split())
hostname = os.environ.get("HOSTNAME")
if hostname is not None:
title_str = "%s: %s" % (hostname, title_str)
xtermTitle(title_str)
|
nullishzero/Portage
|
pym/_emerge/JobStatusDisplay.py
|
Python
|
gpl-2.0
| 7,763 | 0.031302 |
#!/usr/bin/env python3
from mutagen.mp3 import MP3
import sys
if len(sys.argv) < 2:
print('error: didn\'t pass enough arguments')
print('usage: ./bitrate.py <file name>')
print('usage: find the bitrate of an mp3 file')
exit(1)
f = MP3(sys.argv[1])
print('bitrate: %s' % (f.info.bitrate / 1000))
|
lehmacdj/.dotfiles
|
bin/bitrate.py
|
Python
|
gpl-3.0
| 314 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
],
options={
},
bases=(models.Model,),
),
]
|
joshsmith2/superlists
|
lists/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 492 | 0.002033 |
from rstem import led_matrix, accel
import RPi.GPIO as GPIO
import random
import time
import sys
# notify of progress
print("P50")
sys.stdout.flush()
# set up led matrix
#led_matrix.init_grid(2,2)
led_matrix.init_matrices([(0,8),(8,8),(8,0),(0,0)])
# set up accelometer
accel.init(1)
# notify of progress
print("P60")
sys.stdout.flush()
# set up buttons
A = 4
B = 17
UP = 25
DOWN = 24
LEFT = 23
RIGHT = 18
START = 27
SELECT = 22
# accelometer threshold
THRESHOLD = 3
class State(object):
PLAYING, IDLE, SCORE, EXIT = range(4)
# starting variables
state = State.IDLE
field = None
title = led_matrix.LEDText("ASPIRIN - Press A to use accelometer or B to use buttons")
# notify of progress
print("P90")
sys.stdout.flush()
class Direction(object):
LEFT, RIGHT, UP, DOWN = range(4)
class Apple(object):
def __init__(self, position):
self.position = position
def draw(self):
led_matrix.point(*self.position)
class Striker(object):
def __init__(self, start_pos, direction):
self.position = start_pos # starting position of the striker
self.direction = direction
def draw(self):
led_matrix.point(*self.position, color=3)
def move(self):
# check if the striker hit the wall and needs to bounce back
if self.direction == Direction.LEFT and self.position[0] == 0:
self.direction = Direction.RIGHT
elif self.direction == Direction.RIGHT and self.position[0] == led_matrix.width()-1:
self.direction = Direction.LEFT
elif self.direction == Direction.DOWN and self.position[1] == 0:
self.direction = Direction.UP
elif self.direction == Direction.UP and self.position[1] == led_matrix.height()-1:
self.direction = Direction.DOWN
if self.direction == Direction.LEFT:
self.position = (self.position[0]-1, self.position[1])
elif self.direction == Direction.RIGHT:
self.position = (self.position[0]+1, self.position[1])
elif self.direction == Direction.DOWN:
self.position = (self.position[0], self.position[1]-1)
elif self.direction == Direction.UP:
self.position = (self.position[0], self.position[1]+1)
class Player(object):
def __init__(self, position=None, accel=False):
# set position to be center of screen if position is not given
if position is None:
self.position = (int(led_matrix.width()/2), int(led_matrix.height()/2))
else:
self.position = position
self.accel = accel # True if controls are the accelometer, False if controls are buttons
def draw(self):
led_matrix.point(*self.position, color=8)
def move(self, direction):
if direction == Direction.UP:
if self.position[1] < led_matrix.height()-1:
self.position = (self.position[0], self.position[1]+1)
elif direction == Direction.DOWN:
if self.position[1] > 0:
self.position = (self.position[0], self.position[1]-1)
elif direction == Direction.LEFT:
if self.position[0] > 0:
self.position = (self.position[0]-1, self.position[1])
elif direction == Direction.RIGHT:
if self.position[0] < led_matrix.width()-1:
self.position = (self.position[0]+1, self.position[1])
else:
raise ValueError("Invalid direction given.")
class Field(object):
def __init__(self, player):
self.player = player
empty_strikers = set()
# initialize empty strikers
for x_pos in range(led_matrix.width()):
empty_strikers.add(Striker((x_pos, 0), Direction.UP))
for y_pos in range(led_matrix.height()):
empty_strikers.add(Striker((0, y_pos), Direction.RIGHT))
self.empty_strikers = empty_strikers # strikers not used yet
self.strikers = set() # active strikers
self.apple = None
def draw(self):
self.player.draw()
self.apple.draw()
# strikers = self.horizontal_strikers.union(self.vertical_strikers)
for striker in self.strikers:
striker.draw()
def player_collided_with_apple(self):
return self.player.position == self.apple.position
def player_collided_with_striker(self):
# strikers = self.horizontal_strikers.union(self.vertical_strikers)
for striker in self.strikers:
if self.player.position == striker.position:
return True
return False
def new_apple(self):
# set up list of x and y choices
x_pos = list(range(led_matrix.width()))
y_pos = list(range(led_matrix.height()))
# remove the position that player is currently in
del x_pos[self.player.position[0]]
del y_pos[self.player.position[1]]
self.apple = Apple((random.choice(x_pos), random.choice(y_pos)))
def add_striker(self):
if len(self.empty_strikers) == 0:
return False # no more strikers to make, you win!!
new_striker = random.choice(list(self.empty_strikers))
self.strikers.add(new_striker)
self.empty_strikers.remove(new_striker)
return True
# set up buttons
GPIO.setmode(GPIO.BCM)
def button_handler(channel):
global state
global field
if channel in [START, SELECT]:
state = State.EXIT
elif state in [State.IDLE, State.SCORE] and channel in [A, B]:
# Reset field and player to start a new game
player = Player(accel=(channel == A))
field = None
field = Field(player)
field.new_apple() # add the first apple
state = State.PLAYING
# elif state == State.PLAYING and (not field.player.accel) and channel in [UP, DOWN, LEFT, RIGHT]:
# if channel == UP:
# field.player.move(Direction.UP)
# elif channel == DOWN:
# field.player.move(Direction.DOWN)
# elif channel == LEFT:
# field.player.move(Direction.LEFT)
# elif channel == RIGHT:
# field.player.move(Direction.RIGHT)
for button in [UP, DOWN, LEFT, RIGHT, START, A, B, SELECT]:
GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(button, GPIO.FALLING, callback=button_handler, bouncetime=100)
# notify of progress
print("P100")
sys.stdout.flush()
# notify menu we are ready for the led matrix
print("READY")
sys.stdout.flush()
# FSM =======
while True:
if state == State.PLAYING:
led_matrix.erase()
# move player with accelometer, otherwise poll the buttons
if field.player.accel:
angles = accel.angles()
# "Simple" lowpass filter for velocity data
x = angles[0]
y = angles[1]
# alpha = 0.2
# velocity = 0.0
# x_diff = velocity*alpha + (angles[0]*2*8/90)*(1 - alpha)
# y_diff = velocity*alpha + (angles[1]*2*8/90)*(1 - alpha)
if x > THRESHOLD:
field.player.move(Direction.RIGHT)
elif x < -THRESHOLD:
field.player.move(Direction.LEFT)
if y > THRESHOLD:
field.player.move(Direction.DOWN)
elif y < -THRESHOLD:
field.player.move(Direction.UP)
else:
if GPIO.input(UP) == 0:
field.player.move(Direction.UP)
if GPIO.input(DOWN) == 0:
field.player.move(Direction.DOWN)
if GPIO.input(LEFT) == 0:
field.player.move(Direction.LEFT)
if GPIO.input(RIGHT) == 0:
field.player.move(Direction.RIGHT)
# move the strikers
for striker in field.strikers:
striker.move()
# draw all the objects on the field
field.draw()
led_matrix.show()
# check for collisions
if field.player_collided_with_striker():
state = State.SCORE
elif field.player_collided_with_apple():
field.new_apple()
ret = field.add_striker()
if ret == False:
state = State.SCORE
time.sleep(.1)
elif state == State.IDLE:
x = led_matrix.width()
while x > -title.width:
# break if state has changed, (don't wait for scroll to finish)
if state != State.IDLE:
break
led_matrix.erase()
led_matrix.sprite(title, (x, led_matrix.height()/2 - (title.height/2)))
led_matrix.show()
x -= 1
time.sleep(.05)
elif state == State.SCORE:
led_matrix.erase()
led_matrix.text(str(len(field.strikers)))
# led_matrix.text(str(len(field.horizontal_strikers) + len(field.vertical_strikers)))
led_matrix.show()
elif state == State.EXIT:
GPIO.cleanup()
led_matrix.cleanup()
sys.exit(0)
else:
raise ValueError("Invalid State")
|
scottsilverlabs/raspberrystem
|
rstem/projects/led_matrix_games/aspirin.py
|
Python
|
apache-2.0
| 9,366 | 0.009075 |
import sqlite3 as sql
from flask.json import jsonify
from flask import current_app
def total_entries():
with sql.connect("names.db") as con:
cur = con.cursor()
entries = cur.execute("SELECT count(*) FROM names").fetchone()
con.commit()
return '{}\n'.format('{}\n'.format(entries)[1:-3])
def select_entries_by_name(name):
with sql.connect("names.db") as con:
cur = con.cursor()
query = cur.execute("SELECT id, year, gender, count FROM names WHERE name = '{}';".format(name))
con.commit()
cached = current_app.cache.get('a_key')
if cached:
return cached #"The value is cached: {}\n".format(cached)
result = [dict({'id': row[0], 'year': row[1], 'gender': row[2], 'count': row[3]}) for row in query.fetchall()]
current_app.cache.set('a_key', result, timeout=180)
return result
#return jsonify({'Entries for %s' % name: entries})
def insert_name(name,year,gender,count):
with sql.connect("names.db") as con:
cur = con.cursor()
try:
cur.execute("INSERT INTO names (name,year,gender,count) VALUES ('{}',{},'{}',{})".format(name,year,gender,count) )
con.commit()
new_id = cur.lastrowid
return str(new_id)
except Exception as e:
print e
return 'The baby is already present in the DataBase.'
def first_and_last(name):
with sql.connect("names.db") as con:
cur = con.cursor()
last = cur.execute("select MAX(year) from names where name='{}';".format(name) ).fetchone()
first = cur.execute("select MIN(year) from names where name='{}';".format(name) ).fetchone()
con.commit()
return 'Last year is: %s \nFirst year is: %s' % ( \
'{}'.format('{}\n'.format(last)[1:-3]), \
'{}'.format('{}\n'.format(first)[1:-3]))
|
carlitos26/RESTful-Web-service
|
browser-version/app/modules.py
|
Python
|
gpl-3.0
| 1,874 | 0.012807 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
DAG designed to test a PythonOperator that calls a functool.partial
"""
import functools
import logging
from datetime import datetime
from airflow.models import DAG
from airflow.operators.python import PythonOperator
DEFAULT_DATE = datetime(2016, 1, 1)
default_args = dict(start_date=DEFAULT_DATE, owner='airflow')
class CallableClass:
"""
Class that is callable.
"""
def __call__(self):
"""A __call__ method """
def a_function(_, __):
"""A function with two args """
partial_function = functools.partial(a_function, arg_x=1)
class_instance = CallableClass()
logging.info('class_instance type: %s', type(class_instance))
dag = DAG(dag_id='test_task_view_type_check', default_args=default_args)
dag_task1 = PythonOperator(
task_id='test_dagrun_functool_partial',
dag=dag,
python_callable=partial_function,
)
dag_task2 = PythonOperator(
task_id='test_dagrun_instance',
dag=dag,
python_callable=class_instance,
)
|
airbnb/airflow
|
tests/dags/test_task_view_type_check.py
|
Python
|
apache-2.0
| 1,768 | 0 |
from . import data_reduce
import numpy as np
from . import readout_classifier
class single_shot_readout:
"""
Single shot readout class
Args:
adc (Instrument): a device that measures a complex vector for each readout trigger (an ADC)
prepare_seqs (list of pulses.sequence): a dict of sequences of control pulses. The keys are use for state identification.
ro_seq (pulses.sequence): a sequence of control pulses that is used to generate the reaout pulse of the DAC.
pulse_generator (pulses.pulse_generator): pulse generator used to concatenate and set waveform sequences on the DAC.
ro_delay_seq (pulses.sequence): Sequence used to align the DAC and ADC (readout delay compensation)
adc_measurement_name (str): name of measurement on ADC
"""
def __init__(self, adc, prepare_seqs, ro_seq, pulse_generator, ro_delay_seq = None, _readout_classifier = None, adc_measurement_name='Voltage'):
self.adc = adc
self.ro_seq = ro_seq
self.prepare_seqs = prepare_seqs
self.ro_delay_seq = ro_delay_seq
self.pulse_generator = pulse_generator
self.repeat_samples = 2
self.save_last_samples = False
self.train_test_split = 0.8
self.measurement_name = ''
# self.dump_measured_samples = False
self.measure_avg_samples = True
#self.measure_cov_samples = False
self.measure_hists = True
self.measure_feature_w_threshold = True
#self.measure_features = True
#self.cutoff_start = 0
if not _readout_classifier:
self.readout_classifier = readout_classifier.linear_classifier()
else:
self.readout_classifier = _readout_classifier
self.adc_measurement_name = adc_measurement_name
self.filter_binary = {'get_points':lambda: (self.adc.get_points()[adc_measurement_name][0],),
'get_dtype': lambda: int,
'get_opts': lambda: {},
'filter': self.filter_binary_func}
# def measure_delay(self, ro_channel):
# import matplotlib.pyplot as plt
# from scipy.signal import resample
# self.pulse_generator.set_seq(self.ro_delay_seq)
# first_nonzero = int(np.nonzero(np.abs(self.pulse_generator.channels[ro_channel].get_waveform()))[0][0]/self.pulse_generator.channels[ro_channel].get_clock()*self.adc.get_clock())
# ro_dac_waveform = self.pulse_generator.channels[ro_channel].awg_I.get_waveform(channel=self.pulse_generator.channels[ro_channel].awg_ch_I)+\
# 1j*self.pulse_generator.channels[ro_channel].awg_Q.get_waveform(channel=self.pulse_generator.channels[ro_channel].awg_ch_Q)
# ro_dac_waveform = resample(ro_dac_waveform, num=int(len(ro_dac_waveform)/self.pulse_generator.channels[ro_channel].get_clock()*self.adc.get_clock()))
# ro_adc_waveform = np.mean(self.adc.measure()['Voltage'], axis=0)
# ro_dac_waveform = ro_dac_waveform - np.mean(ro_dac_waveform)
# ro_adc_waveform = ro_adc_waveform - np.mean(ro_adc_waveform)
# xc = np.abs(np.correlate(ro_dac_waveform, ro_adc_waveform, 'same'))
# xc_max = np.argmax(xc)
# delay = int((xc_max - first_nonzero)/2)
# #plt.figure('delay')
# #plt.plot(ro_dac_waveform[first_nonzero:])
# #plt.plot(ro_adc_waveform[delay:])
# #plt.plot(ro_adc_waveform)
# #print ('Measured delay is {} samples'.format(delay), first_nonzero, xc_max)
# return delay
def calibrate(self):
X = []
y = []
for class_id, prepare_seq in enumerate(self.prepare_seqs):
for i in range(self.repeat_samples):
# pulse sequence to prepare state
self.pulse_generator.set_seq(prepare_seq+self.ro_seq)
measurement = self.adc.measure()
if type(self.adc_measurement_name) is list:
raise ValueError('Multiqubit readout not implemented') #need multiqubit readdout implementation
else:
X.append(measurement[self.adc_measurement_name])
y.extend([class_id]*len(self.adc.get_points()[self.adc_measurement_name][0][1]))
X = np.reshape(X, (-1, len(self.adc.get_points()[self.adc_measurement_name][-1][1]))) # last dimension is the feature dimension
y = np.asarray(y)
# if self.dump_measured_samples or self.save_last_samples:
# self.calib_X = X#np.reshape(X, (len(self.prepare_seqs), -1, len(self.adc.get_points()[self.adc_measurement_name][-1][1])))
# self.calib_y = y
scores = readout_classifier.evaluate_classifier(self.readout_classifier, X, y)
self.readout_classifier.fit(X, y)
self.scores = scores
self.confusion_matrix = readout_classifier.confusion_matrix(y, self.readout_classifier.predict(X))
def get_opts(self):
opts = {}
scores = {score_name:{'log':False} for score_name in readout_classifier.readout_classifier_scores}
opts.update(scores)
if self.measure_avg_samples:
avg_samples = {'avg_sample'+str(_class):{'log':False} for _class in self.readout_classifier.class_list}
#features = {'feature'+str(_class):{'log':False} for _class in self.readout_classifier.class_list}
opts.update(avg_samples)
#meas.update(features)
if self.measure_hists:
#hists = {'hists':{'log':Fas}}
opts['hists'] = {'log':False}
opts['proba_points'] = {'log':False}
if self.measure_feature_w_threshold:
opts['feature'] = {'log':False}
opts['threshold'] = {'log':False}
return opts
def measure(self):
self.calibrate()
meas = {}
# if self.dump_measured_samples:
# self.dump_samples(name=self.measurement_name)
meas.update(self.scores)
if self.measure_avg_samples:
avg_samples = {'avg_sample'+str(_class):self.readout_classifier.class_averages[_class] for _class in self.readout_classifier.class_list}
#features = {'feature'+str(_class):self.readout_classifier.class_features[_class] for _class in self.readout_classifier.class_list}
meas.update(avg_samples)
#meas.update(features)
if self.measure_hists:
meas['hists'] = self.readout_classifier.hists
meas['proba_points'] = self.readout_classifier.proba_points
if self.measure_feature_w_threshold:
meas['feature'] = self.readout_classifier.feature
meas['threshold'] = self.readout_classifier.threshold
return meas
def get_points(self):
points = {}
scores = {score_name:[] for score_name in readout_classifier.readout_classifier_scores}
points.update(scores)
if self.measure_avg_samples:
avg_samples = {'avg_sample'+str(_class):[('Time',np.arange(self.adc.get_nop())/self.adc.get_clock(), 's')] for _class in self.readout_classifier.class_list}
#features = {'feature'+str(_class):[('Time',np.arange(self.adc.get_nop())/self.adc.get_clock(), 's')] for _class in self.readout_classifier.class_list}
points.update(avg_samples)
#points.update(features)
if self.measure_hists:
points['hists'] = [('class', self.readout_classifier.class_list, ''), ('bin', np.arange(self.readout_classifier.nbins), '')]
points['proba_points'] = [('bin', np.arange(self.readout_classifier.nbins), '')]
if self.measure_feature_w_threshold:
points['feature'] = [('Time',np.arange(self.adc.get_nop())/self.adc.get_clock(), 's')]
points['threshold'] = []
return points
def get_dtype(self):
dtypes = {}
scores = {score_name:float for score_name in readout_classifier.readout_classifier_scores}
dtypes.update(scores)
if self.measure_avg_samples:
avg_samples = {'avg_sample'+str(_class):self.adc.get_dtype()[self.adc_measurement_name] for _class in self.readout_classifier.class_list}
features = {'feature'+str(_class):self.adc.get_dtype()[self.adc_measurement_name] for _class in self.readout_classifier.class_list}
dtypes.update(avg_samples)
dtypes.update(features)
if self.measure_hists:
dtypes['hists'] = float
dtypes['proba_points'] = float
if self.measure_feature_w_threshold:
dtypes['feature'] = np.complex
dtypes['threshold'] = float
return dtypes
# def dump_samples(self, name):
# from .save_pkl import save_pkl
# header = {'type':'Readout classification X', 'name':name}
# measurement = {'Readout classification X':(['Sample ID', 'time'],
# [np.arange(self.calib_X.shape[0]), np.arange(self.calib_X.shape[1])/self.adc.get_clock()],
# self.calib_X),
# 'Readout classification y':(['Sample ID'],
# [np.arange(self.calib_X.shape[0])],
# self.calib_y)}
# save_pkl(header, measurement, plot=False)
def filter_binary_func(self, x):
return self.readout_classifier.predict(x[self.adc_measurement_name])
|
ooovector/qtlab_replacement
|
single_shot_readout.py
|
Python
|
gpl-3.0
| 8,239 | 0.028766 |
#!/usr/bin/python
import fileinput
import string
import sys
import os
ar = 'ar'
fortran_compiler = 'ftn'
fortran_opt_flags = '-O3'
fortran_link_flags = '-O1'
c_compiler = 'cc'
c_opt_flags = '-O3'
src_dir = './src/'
obj_dir = './obj/'
exe_dir = './exe/'
lib_name = 'tce_sort_f77_basic.a'
count = '100'
rank = '30'
ranks = [rank,rank,rank,rank]
size = int(ranks[0])*int(ranks[1])*int(ranks[2])*int(ranks[3])
sizechar = str(size)
def perm(l):
sz = len(l)
if sz <= 1:
return [l]
return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])]
indices = ['4','3','2','1']
#all_permutations = [indices]
#transpose_list = [indices]
#loop_list = [indices]
all_permutations = perm(indices)
transpose_list = perm(indices)
loop_list = perm(indices)
print fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F'
os.system(fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F')
os.system('ar -r '+lib_name+' tce_sort_hirata.o')
print fortran_compiler+' '+fortran_opt_flags+' -c glass_correct.F'
os.system(fortran_compiler+' '+fortran_opt_flags+' -c glass_correct.F')
os.system('ar -r '+lib_name+' glass_correct.o')
print c_compiler+' '+c_opt_flags+' -c tce_sort_4kg.c'
os.system(c_compiler+' '+c_opt_flags+' -c tce_sort_4kg.c')
os.system('ar -r '+lib_name+' tce_sort_4kg.o')
print c_compiler+' '+c_opt_flags+' -c tce_sort_4kg_4321.c'
os.system(c_compiler+' '+c_opt_flags+' -c tce_sort_4kg_4321.c')
os.system('ar -r '+lib_name+' tce_sort_4kg_4321.o')
for transpose_order in transpose_list:
dummy = 0
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
driver_name = 'transpose_'+A+B+C+D
print driver_name
source_name = driver_name+'_driver.F'
lst_name = driver_name+'_driver.lst'
source_file = open(source_name,'w')
source_file.write(' PROGRAM ARRAYTEST\n')
source_file.write('#include "mpif.h"\n')
source_file.write(' REAL*8 before('+ranks[0]+','+ranks[0]+','+ranks[0]+','+ranks[0]+')\n')
source_file.write(' REAL*8 after_jeff('+sizechar+')\n')
source_file.write(' REAL*8 after_hirata('+sizechar+')\n')
source_file.write(' REAL*8 after_glass('+sizechar+')\n')
source_file.write(' REAL*8 factor\n')
source_file.write(' REAL*8 Tstart,Tfinish,Thirata,Tglass,Tjeff\n')
source_file.write(' REAL*8 Tspeedup,Tbest\n')
source_file.write(' INTEGER*4 i,j,k,l\n')
source_file.write(' INTEGER*4 aSize(4)\n')
source_file.write(' INTEGER*4 perm(4)\n')
source_file.write(' INTEGER*4 fastest(4)\n')
source_file.write(' INTEGER ierror\n')
source_file.write(' LOGICAL glass_correct\n')
source_file.write(' EXTERNAL glass_correct\n')
source_file.write(' call mpi_init(ierror)\n')
source_file.write(' aSize(1) = '+ranks[0]+'\n')
source_file.write(' aSize(2) = '+ranks[1]+'\n')
source_file.write(' aSize(3) = '+ranks[2]+'\n')
source_file.write(' aSize(4) = '+ranks[3]+'\n')
source_file.write(' perm(1) = '+A+'\n')
source_file.write(' perm(2) = '+B+'\n')
source_file.write(' perm(3) = '+C+'\n')
source_file.write(' perm(4) = '+D+'\n')
source_file.write(' DO 70 i = 1, '+ranks[0]+'\n')
source_file.write(' DO 60 j = 1, '+ranks[1]+'\n')
source_file.write(' DO 50 k = 1, '+ranks[2]+'\n')
source_file.write(' DO 40 l = 1, '+ranks[3]+'\n')
source_file.write(' before(i,j,k,l) = l + k*10 + j*100 + i*1000\n')
source_file.write('40 CONTINUE\n')
source_file.write('50 CONTINUE\n')
source_file.write('60 CONTINUE\n')
source_file.write('70 CONTINUE\n')
source_file.write(' factor = 1.0\n')
source_file.write(' Tbest=999999.0\n')
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO 30 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4(before, after_hirata,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & perm(1), perm(2), perm(3), perm(4), factor)\n')
source_file.write('30 CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' Thirata=(Tfinish-Tstart)\n')
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' Tstart=rtc()\n')
source_file.write(' IF( ((perm(1).eq.4).and.(perm(2).eq.3)).and.\n')
source_file.write(' & ((perm(3).eq.2).and.(perm(4).eq.1)) ) THEN\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO 31 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4kg_4321_(before, after_glass,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & factor)\n')
source_file.write('31 CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' ELSEIF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO 32 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4kg_(before, after_glass,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & perm(1), perm(2), perm(3), perm(4), factor)\n')
source_file.write('32 CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' ENDIF\n')
#source_file.write(' Tfinish=rtc()\n')
source_file.write(' Tglass=(Tfinish-Tstart)\n')
source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
#source_file.write(' PRINT*," i after_glass(i)\n')
#source_file.write(' & after_hirata(i)"\n')
source_file.write(' DO 33 i = 1, '+sizechar+'\n')
source_file.write(' IF (after_glass(i).ne.after_hirata(i)) THEN\n')
source_file.write(' PRINT*,"glass error ",i,after_glass(i),after_hirata(i)\n')
source_file.write(' ENDIF\n')
source_file.write('33 CONTINUE\n')
source_file.write(' ENDIF\n')
source_file.write(' write(6,*) "TESTING TRANPOSE TYPE '+A+B+C+D+'"\n')
source_file.write(' write(6,*) "==================="\n')
source_file.write(' write(6,*) "The compilation flags were:"\n')
for option in range(0,len(fortran_opt_flags.split())):
source_file.write(' write(6,*) "'+fortran_opt_flags.split()[option]+'"\n')
source_file.write(' write(6,*) "==================="\n')
source_file.write(' write(6,*) "Hirata Reference = ",Thirata,"seconds"\n')
source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' write(6,*) "KGlass Reference = ",Tglass,"seconds"\n')
source_file.write(' ENDIF\n')
source_file.write(' write(6,1001) "Algorithm","Jeff","Speedup","Best","Best Speedup"\n')
for loop_order in loop_list:
dummy = dummy+1
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
subroutine_name = 'trans_'+A+B+C+D+'_loop_'+a+b+c+d+'_'
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO '+str(100+dummy)+' i = 1, '+count+'\n')
source_file.write(' CALL '+subroutine_name+'(before, after_jeff,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & factor)\n')
source_file.write(str(100+dummy)+' CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' Tjeff=(Tfinish-Tstart)\n')
source_file.write(' Tspeedup=Thirata/Tjeff\n')
source_file.write(' Tbest=min(Tjeff,Tbest)\n')
source_file.write(' if(Tjeff.eq.Tbest) then \n')
source_file.write(' fastest(1)='+a+'\n')
source_file.write(' fastest(2)='+b+'\n')
source_file.write(' fastest(3)='+c+'\n')
source_file.write(' fastest(4)='+d+'\n')
source_file.write(' endif\n')
# source_file.write(' goto 911\n') ########################
if 0 < dummy < 10:
nice_dummy=' '+str(dummy)
if 9 < dummy < 100:
nice_dummy=' '+str(dummy)
if 99 < dummy < 999:
nice_dummy=''+str(dummy)
#source_file.write(' PRINT*,"Loop '+a+b+c+d+' ",Tjeff,Tspeedup\n')
source_file.write(' write(6,1100) "'+nice_dummy+' Loop '+a+b+c+d+' ",\n')
source_file.write(' & Tjeff,Tspeedup,Tbest,Thirata/Tbest\n')
#source_file.write(' DO '+str(500+dummy)+' i = 1, '+sizechar+'\n')
#source_file.write(' IF (after_jeff(i).ne.after_hirata(i)) THEN\n')
#source_file.write(' write(6,*),"transpose is wrong for element = ",i\n')
#source_file.write(' ENDIF\n')
#source_file.write(str(500+dummy)+' CONTINUE\n')
#source_file.write(' PRINT*," i, after_jeff(i),after_hirata(i)"\n')
source_file.write(' DO '+str(500+dummy)+' i = 1, '+sizechar+'\n')
source_file.write(' IF (after_jeff(i).ne.after_hirata(i)) THEN\n')
source_file.write(' PRINT*,"jeff error ",i,after_jeff(i),after_hirata(i)\n')
source_file.write(' ENDIF\n')
source_file.write(str(500+dummy)+' CONTINUE\n')
source_file.write(' write(6,1020) "The best loop order for '+A+B+C+D+' is:",\n')
source_file.write(' & fastest(1),fastest(2),fastest(3),fastest(4)\n')
source_file.write(' write(6,1030) "The best time is:",Tbest\n')
source_file.write(' write(6,1030) "The best speedup is:",Thirata/Tbest\n')
source_file.write(' call mpi_finalize(ierror)\n')
source_file.write(' STOP\n')
source_file.write(' 1001 format(1x,a13,a12,a15,a9,a18)\n')
source_file.write(' 1020 format(1x,a30,8x,4i1)\n')
source_file.write(' 1030 format(1x,a30,1f12.5)\n')
source_file.write(' 1100 format(1x,a16,4f12.5)\n')
source_file.write(' 911 continue\n')
source_file.write(' END\n')
source_file.close()
print fortran_compiler+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+' -o '+exe_dir+driver_name+'.x'
os.system(fortran_compiler+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' -o '+exe_dir+driver_name+'.x')
os.system('mv '+source_name+' '+src_dir)
|
jeffhammond/spaghetty
|
branches/old/python/archive/build_executables_basic.py
|
Python
|
bsd-2-clause
| 11,373 | 0.006419 |
import unittest
from models import heliosat
import numpy as np
from netcdf import netcdf as nc
from datetime import datetime
import os
import glob
class TestPerformance(unittest.TestCase):
def setUp(self):
# os.system('rm -rf static.nc temporal_cache products')
os.system('rm -rf temporal_cache products/estimated')
os.system('rm -rf temporal_cache')
os.system('cp -rf data_argentina mock_data')
self.files = glob.glob('mock_data/goes13.*.BAND_01.nc')
def tearDown(self):
os.system('rm -rf mock_data')
def test_main(self):
begin = datetime.now()
heliosat.workwith('mock_data/goes13.2015.*.BAND_01.nc', 32)
end = datetime.now()
elapsed = (end - begin).total_seconds()
first, last = min(self.files), max(self.files)
to_dt = heliosat.to_datetime
processed = (to_dt(last) - to_dt(first)).total_seconds()
processed_days = processed / 3600. / 24
scale_shapes = (2245. / 86) * (3515. / 180) * (30. / processed_days)
estimated = elapsed * scale_shapes / 3600.
print "Scaling total time to %.2f hours." % estimated
print "Efficiency achieved: %.2f%%" % (3.5 / estimated * 100.)
if __name__ == '__main__':
unittest.run()
|
ahMarrone/solar_radiation_model
|
tests/performance_test.py
|
Python
|
mit
| 1,278 | 0 |
import datetime
from typing import Optional, TypedDict
from backend.common.sitevars.sitevar import Sitevar
class WebConfig(TypedDict):
travis_job: str
tbaClient_endpoints_sha: str
current_commit: str
deploy_time: str
endpoints_sha: str
commit_time: str
class AndroidConfig(TypedDict):
min_app_version: int
latest_app_version: int
class IOSConfig(TypedDict):
min_app_version: int
latest_app_version: int
class ContentType(TypedDict):
current_season: int
max_season: int
web: Optional[WebConfig]
android: Optional[AndroidConfig]
ios: Optional[IOSConfig]
class ApiStatus(Sitevar[ContentType]):
@staticmethod
def key() -> str:
return "apistatus"
@staticmethod
def description() -> str:
return "For setting max year, min app versions, etc."
@staticmethod
def default_value() -> ContentType:
current_year = datetime.datetime.now().year
return ContentType(
current_season=current_year,
max_season=current_year,
web=None,
android=None,
ios=None,
)
@classmethod
def status(cls) -> ContentType:
return cls.get()
|
the-blue-alliance/the-blue-alliance
|
src/backend/common/sitevars/apistatus.py
|
Python
|
mit
| 1,216 | 0 |
from .max import max
from pyramda.private.asserts import assert_equal
def max_test():
assert_equal(max([1, 3, 4, 2]), 4)
|
jackfirth/pyramda
|
pyramda/relation/max_test.py
|
Python
|
mit
| 127 | 0 |
"""
Script for selecting a good number of basis functions.
Too many or too few basis functions will introduce numerical error.
True solution must be known.
Run the program several times, varying the value of the -N option.
There may be a way to improve on this brute force method.
"""
# To allow __main__ in subdirectory
import sys
sys.path.append(sys.path[0] + '/..')
import argparse
import numpy as np
import ps.ps
import io_util
import problems
import problems.boundary
import copy
from multiprocessing import Pool
parser = argparse.ArgumentParser()
io_util.add_arguments(parser, ('problem', 'N'))
args = parser.parse_args()
problem = problems.problem_dict[args.problem]()
boundary = problems.boundary.OuterSine(problem.R)
problem.boundary = boundary
# Options to pass to the solver
options = {
'problem': problem,
'N': args.N,
'scheme_order': 4,
}
meta_options = {
'procedure_name': 'optimize_basis',
}
io_util.print_options(options, meta_options)
def my_print(t):
print('n_circle={} n_radius={} error={}'.format(*t))
def worker(t):
options['n_circle'] = t[0]
options['n_radius'] = t[1]
my_solver = ps.ps.PizzaSolver(options)
result = my_solver.run()
t = (t[0], t[1], result.error)
my_print(t)
return t
all_options = []
# Tweak the following ranges as needed
for n_circle in range(30, 100, 5):
for n_radius in range(17, n_circle, 4):
all_options.append((n_circle, n_radius))
with Pool(4) as p:
results = p.map(worker, all_options)
min_error = float('inf')
for t in results:
if t[2] < min_error:
min_error = t[2]
min_t = t
print()
my_print(min_t)
|
srmagura/potential
|
scripts/optimize_basis.py
|
Python
|
gpl-3.0
| 1,662 | 0.006619 |
from .model import SVC
|
ljwolf/spvcm
|
spvcm/svc/__init__.py
|
Python
|
mit
| 23 | 0 |
import os
import socket
from airflow import DAG
from airflow.contrib.hooks import SSHHook
from airflow.operators import PythonOperator
from airflow.operators import BashOperator
from airflow.operators import BranchPythonOperator
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks import RedisHook
from airflow.hooks.mysql_hook import MySqlHook
from datetime import datetime, timedelta
from airflow.models import Variable
from airflow.operators import TriggerDagRunOperator
from airflow.operators.subdag_operator import SubDagOperator
from pprint import pprint
import itertools
import socket
import sys
import time
import re
import random
import logging
import traceback
import os
import json
#################################################################DAG CONFIG####################################################################################
default_args = {
'owner': 'wireless',
'depends_on_past': False,
'start_date': datetime(2017, 03, 30,13,00),
'email': ['vipulsharma144@gmail.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1),
'provide_context': True,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
PARENT_DAG_NAME = "SYNC"
main_etl_dag=DAG(dag_id=PARENT_DAG_NAME, default_args=default_args, schedule_interval='@once')
SQLhook=MySqlHook(mysql_conn_id='application_db')
redis_hook_2 = RedisHook(redis_conn_id="redis_hook_2")
#################################################################FUCTIONS####################################################################################
def get_host_ip_mapping():
path = Variable.get("hosts_mk_path")
try:
host_var = load_file(path)
ipaddresses = host_var.get('ipaddresses')
return ipaddresses
except IOError:
logging.error("File Name not correct")
return None
except Exception:
logging.error("Please check the HostMK file exists on the path provided ")
return None
def load_file(file_path):
#Reset the global vars
host_vars = {
"all_hosts": [],
"ipaddresses": {},
"host_attributes": {},
"host_contactgroups": [],
}
try:
execfile(file_path, host_vars, host_vars)
del host_vars['__builtins__']
except IOError, e:
pass
return host_vars
def process_host_mk():
path = Variable.get("hosts_mk_path")
hosts = {}
site_mapping = {}
all_site_mapping =[]
all_list = []
device_dict = {}
start = 0
tech_wise_device_site_mapping = {}
try:
text_file = open(path, "r")
except IOError:
logging.error("File Name not correct")
return "notify"
except Exception:
logging.error("Please check the HostMK file exists on the path provided ")
return "notify"
lines = text_file.readlines()
host_ip_mapping = get_host_ip_mapping()
for line in lines:
if "all_hosts" in line:
start = 1
if start == 1:
hosts["hostname"] = line.split("|")[0]
hosts["device_type"] = line.split("|")[1]
site_mapping["hostname"] = line.split("|")[0].strip().strip("'")
site_mapping['site'] = line.split("site:")[1].split("|")[0].strip()
site_mapping['device_type'] = line.split("|")[1].strip()
all_list.append(hosts.copy())
all_site_mapping.append(site_mapping.copy())
if ']\n' in line:
start = 0
all_list[0]['hostname'] = all_list[0].get("hostname").strip('all_hosts += [\'')
all_site_mapping[0] ['hostname'] = all_site_mapping[0].get("hostname").strip('all_hosts += [\'')
break
print "LEN of ALL LIST is %s"%(len(all_list))
if len(all_list) > 1:
for device in all_list:
device_dict[device.get("hostname").strip().strip("'")] = device.get("device_type").strip()
Variable.set("hostmk.dict",str(device_dict))
for site_mapping in all_site_mapping:
if site_mapping.get('device_type') not in tech_wise_device_site_mapping.keys():
tech_wise_device_site_mapping[site_mapping.get('device_type')] = {site_mapping.get('site'):[{"hostname":site_mapping.get('hostname'),"ip_address":host_ip_mapping.get(site_mapping.get('hostname'))}]}
else:
if site_mapping.get('site') not in tech_wise_device_site_mapping.get(site_mapping.get('device_type')).keys():
tech_wise_device_site_mapping.get(site_mapping.get('device_type'))[site_mapping.get('site')] = [{"hostname":site_mapping.get('hostname'),"ip_address":host_ip_mapping.get(site_mapping.get('hostname'))}]
else:
tech_wise_device_site_mapping.get(site_mapping.get('device_type')).get(site_mapping.get('site')).append({"hostname":site_mapping.get('hostname'),"ip_address":host_ip_mapping.get(site_mapping.get('hostname'))})
Variable.set("hostmk.dict.site_mapping",str(tech_wise_device_site_mapping))
count = 0
for x in tech_wise_device_site_mapping:
for y in tech_wise_device_site_mapping.get(x):
count = count+len(tech_wise_device_site_mapping.get(x).get(y))\
print "COUNT : %s"%(count)
return 0
else:
return -4
def dict_rows(cur):
desc = cur.description
return [
dict(zip([col[0] for col in desc], row))
for row in cur.fetchall()
]
def execute_query(query):
conn = SQLhook.get_conn()
cursor = conn.cursor()
cursor.execute(query)
data = dict_rows(cursor)
cursor.close()
return data
def createDict(data):
#TODOL There are 3 levels of critality handle all those(service_critical,critical,dtype_critical)
rules = {}
ping_rule_dict = {}
operator_name_with_operator_in = eval(Variable.get("special_operator_services")) #here we input the operator name in whcih we wish to apply IN operator
service_name_with_operator_in = []
for operator_name in operator_name_with_operator_in:
service_name = "_".join(operator_name.split("_")[:-1])
service_name_with_operator_in.append(service_name)
for device in data:
service_name = device.get('service')
device_type = device.get('devicetype')
if device.get('dtype_ds_warning') and device.get('dtype_ds_critical'):
device['critical'] = device.get('dtype_ds_critical')
device['warning'] = device.get('dtype_ds_warning')
elif device.get('service_warning') and device.get('service_critical'):
device['critical'] = device.get('service_critical')
device['warning'] = device.get('service_warning')
if service_name == 'radwin_uas' and device['critical'] == "":
continue
if service_name:
name = str(service_name)
rules[name] = {}
if device.get('critical'):
rules[name]={"Severity1":["critical",{'name': str(name)+"_critical", 'operator': 'greater_than' if ("_rssi" not in name) and ("_uas" not in name) else "less_than_equal_to", 'value': device.get('critical') or device.get('dtype_ds_critical')}]}
else:
rules[name]={"Severity1":["critical",{'name': str(name)+"_critical", 'operator': 'greater_than', 'value': ''}]}
if device.get('warning'):
rules[name].update({"Severity2":["warning",{'name': str(name)+"_warning", 'operator': 'greater_than' if ("_rssi" not in name) and ("_uas" not in name) else "less_than_equal_to" , 'value': device.get('warning') or device.get('dtype_ds_warning')}]})
else:
rules[name].update({"Severity2":["warning",{'name': str(name)+"_warning", 'operator': 'greater_than', 'value': ''}]})
if device_type not in ping_rule_dict:
if device.get('ping_pl_critical') and device.get('ping_pl_warning') and device.get('ping_rta_critical') and device.get('ping_rta_warning'):
ping_rule_dict[device_type] = {
'ping_pl_critical' : device.get('ping_pl_critical'),
'ping_pl_warning': device.get('ping_pl_warning') ,
'ping_rta_critical': device.get('ping_rta_critical'),
'ping_rta_warning': device.get('ping_rta_warning')
}
for device_type in ping_rule_dict:
if ping_rule_dict.get(device_type).get('ping_pl_critical'):
rules[device_type+"_pl"]={}
rules[device_type+"_pl"].update({"Severity1":["critical",{'name': device_type+"_pl_critical", 'operator': 'greater_than', 'value': float(ping_rule_dict.get(device_type).get('ping_pl_critical')) or ''}]})
if ping_rule_dict.get(device_type).get('ping_pl_warning'):
rules[device_type+"_pl"].update({"Severity2":["warning",{'name': device_type+"_pl_warning", 'operator': 'greater_than', 'value': float(ping_rule_dict.get(device_type).get('ping_pl_warning')) or ''}]})
rules[device_type+"_pl"].update({"Severity3":["up",{'name': device_type+"_pl_up", 'operator': 'less_than', 'value': float(ping_rule_dict.get(device_type).get('ping_pl_warning')) or ''},'AND',{'name': device_type+"_pl_up", 'operator': 'greater_than_equal_to', 'value': 0}]})
rules[device_type+"_pl"].update({"Severity4":["down",{'name': device_type+"_pl_down", 'operator': 'equal_to', 'value': 100}]})
if ping_rule_dict.get(device_type).get('ping_rta_critical'):
rules[device_type+"_rta"] = {}
rules[device_type+"_rta"].update({"Severity1":["critical",{'name': device_type+"_rta_critical", 'operator': 'greater_than', 'value': float(ping_rule_dict.get(device_type).get('ping_rta_critical')) or ''}]})
if ping_rule_dict.get(device_type).get('ping_rta_warning'):
rules[device_type+"_rta"].update({"Severity2":["warning",{'name': device_type+"_rta_warning", 'operator': 'greater_than', 'value': float(ping_rule_dict.get(device_type).get('ping_rta_warning')) or ''}]})
rules[device_type+"_rta"].update({"Severity3":["up",{'name': device_type+"_rta_up", 'operator': 'less_than', 'value': float(ping_rule_dict.get(device_type).get('ping_rta_warning'))},'AND',{'name': device_type+"_rta_up", 'operator': 'greater_than', 'value': 0 }]})
#TODO: This is a seperate module should be oneto prevent re-looping ovver rules
for rule in rules:
if rule in set(service_name_with_operator_in):
#service_name = "_".join(rule.split("_")[0:4])
service_rules = rules.get(rule)
for i in range(1,len(service_rules)+1):
severities = service_rules.get("Severity%s"%i)
for x in range(1,len(severities),2):
if severities[x].get("name") in operator_name_with_operator_in.keys():
severities[x]["operator"] = operator_name_with_operator_in.get(severities[x].get("name"))
return rules
def process_kpi_rules(all_services_dict):
#TODO Update this code for both ul_issue and other KPIS
kpi_rule_dict = {}
formula_mapper = eval(Variable.get('kpi_rule_function_mapper'))
for service in formula_mapper:
is_Function = False if "round" in formula_mapper.get(service) else True
kpi_rule_dict[service] = {
"name":service,
"isFunction":is_Function,
"formula":formula_mapper.get(service),
"isarray":[False,False],
"service":service,
"arraylocations":0
}
print kpi_rule_dict
return kpi_rule_dict
def generate_service_rules():
service_threshold_query = Variable.get('q_get_thresholds')
#creating Severity Rules
data = execute_query(service_threshold_query)
rules_dict = createDict(data)
Variable.set("rules",str(rules_dict))
#can only be done if generate_service_rules is completed and there is a rule Variable in Airflow Variables
def generate_kpi_rules():
service_rules = eval(Variable.get('rules'))
processed_kpi_rules = process_kpi_rules(service_rules)
#Variable.set("kpi_rules",str(processed_kpi_rules))
def generate_kpi_prev_states():
ul_tech = eval(Variable.get('ul_issue_kpi_technologies'))
old_pl_data = redis_hook_2.get("all_devices_state")
all_device_type_age_dict = {}
for techs_bs in ul_tech:
redis_hook_2.set("kpi_ul_prev_state_%s"%(ul_tech.get(techs_bs)),old_pl_data)
redis_hook_2.set("kpi_ul_prev_state_%s"%(techs_bs),old_pl_data)
def generate_backhaul_inventory_for_util():
backhaul_inventory_data_query="""
select
device_device.ip_address,
device_device.device_name,
device_devicetype.name,
device_device.mac_address,
device_devicetype.agent_tag,
site_instance_siteinstance.name,
device_device.device_alias,
device_devicetechnology.name as techno_name,
group_concat(service_servicedatasource.name separator '$$') as port_name,
group_concat(inventory_basestation.bh_port_name separator '$$') as port_alias,
group_concat(inventory_basestation.bh_capacity separator '$$') as port_wise_capacity
from device_device
inner join
(device_devicetechnology, device_devicetype,
machine_machine, site_instance_siteinstance)
on
(
device_devicetype.id = device_device.device_type and
device_devicetechnology.id = device_device.device_technology and
machine_machine.id = device_device.machine_id and
site_instance_siteinstance.id = device_device.site_instance_id
)
inner join
(inventory_backhaul)
on
(device_device.id = inventory_backhaul.bh_configured_on_id OR device_device.id = inventory_backhaul.aggregator_id OR
device_device.id = inventory_backhaul.pop_id OR
device_device.id = inventory_backhaul.bh_switch_id OR
device_device.id = inventory_backhaul.pe_ip_id)
left join
(inventory_basestation)
on
(inventory_backhaul.id = inventory_basestation.backhaul_id)
left join
(service_servicedatasource)
on
(inventory_basestation.bh_port_name = service_servicedatasource.alias)
where
device_device.is_deleted=0 and
device_device.host_state <> 'Disable'
and
device_devicetype.name in ('Cisco','Juniper','RiCi', 'PINE','Huawei','PE')
group by device_device.ip_address;
"""
backhaul_data = execute_query(backhaul_inventory_data_query)
bh_cap_mappng = {}
for device in backhaul_data:
dev_name = device.get('device_name')
bh_cap_mappng[device.get('device_name')] = {
'port_name' : device.get('port_name').split("$$") if device.get('port_name') else None,
'port_wise_capacity': device.get('port_wise_capacity').split("$$") if device.get('port_wise_capacity') else None,
'ip_address':device.get('ip_address'),
'port_alias':device.get('port_alias').split("$$") if device.get('port_alias') else None,
'capacity': {}
}
if device.get('port_name') and device.get('port_wise_capacity'):
for index,port in enumerate(bh_cap_mappng.get(device.get('device_name')).get('port_name')):
#print index,bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity'),device.get('port_name')
try:
port_capacity = bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[index]
except IndexError:
try:
port_capacity = bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[index-1]
except Exception:
port_capacity = bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[0]
except Exception:
port_capacity = bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[0]
bh_cap_mappng.get(device.get('device_name')).get('capacity').update({port:port_capacity})
for key_dic in ['port_alias']:
if bh_cap_mappng.get(dev_name).get(key_dic) and len(bh_cap_mappng.get(dev_name).get(key_dic)) > 1:
new_ports = []
for index_m,port_v in enumerate(set(bh_cap_mappng.get(dev_name).get(key_dic))):
if ',' in port_v:
def_ports = port_v.split(',')
new_ports.extend(def_ports)
for port in def_ports:
try:
bh_cap_mappng.get(device.get('device_name')).get('capacity').update({port:bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[index_m]})
except Exception:
bh_cap_mappng.get(device.get('device_name')).get('capacity').update({port:bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[index_m]})
else:
new_ports.append(port_v)
try:
bh_cap_mappng.get(device.get('device_name')).get('capacity').update({port_v:bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[index_m]})
except Exception:
bh_cap_mappng.get(device.get('device_name')).get('capacity').update({port_v:bh_cap_mappng.get(device.get('device_name')).get('port_wise_capacity')[index_m-1]})
bh_cap_mappng.get(dev_name)[key_dic] = new_ports
print "Setting redis Key backhaul_capacities with backhaul capacities "
#for dev in bh_cap_mappng:
bh_cap_mappng.get('11389').get('capacity').update({'GigabitEthernet0_0_24':'44'})
print bh_cap_mappng.get('11389').get('capacity')
redis_hook_2.set("backhaul_capacities",str(bh_cap_mappng))
print "Successfully Created Key: backhaul_capacities in Redis. "
def generate_basestation_inventory_for_util():
basestation_inventory_data_query="""
select
DISTINCT(device_device.ip_address),
device_device.device_name,
device_devicetype.name,
device_device.mac_address,
device_device.ip_address,
device_devicetype.agent_tag,
inventory_sector.name,
site_instance_siteinstance.name,
device_device.device_alias,
device_devicetechnology.name as techno_name,
inventory_circuit.qos_bandwidth as QoS_BW
from device_device
inner join
(device_devicetechnology, device_devicemodel, device_devicetype, machine_machine, site_instance_siteinstance, inventory_sector)
on
(
device_devicetype.id = device_device.device_type and
device_devicetechnology.id = device_device.device_technology and
device_devicemodel.id = device_device.device_model and
machine_machine.id = device_device.machine_id and
site_instance_siteinstance.id = device_device.site_instance_id and
inventory_sector.sector_configured_on_id = device_device.id
)
left join (inventory_circuit)
on (
inventory_sector.id = inventory_circuit.sector_id
)
where device_device.is_deleted=0
and
device_device.host_state <> 'Disable'
and
device_devicetechnology.name in ('WiMAX', 'P2P', 'PMP')
and
device_devicetype.name in ('Radwin2KBS', 'CanopyPM100AP', 'CanopySM100AP', 'StarmaxIDU', 'Radwin5KBS','Cambium450iAP');
"""
basestation_data = execute_query(basestation_inventory_data_query)
bh_cap_mappng = {}
for device in basestation_data:
bh_cap_mappng[device.get('device_name')] = {
'qos_bandwidth': device.get('QoS_BW') if device.get('QoS_BW') else None,
'ip_address':device.get('ip_address'),
}
print "Setting redis Key basestation_capacities with basestation capacities "
#for dev in bh_cap_mappng:
# print bh_cap_mappng.get(dev).get('capacity')
redis_hook_2.set("basestation_capacities",str(bh_cap_mappng))
print "Successfully Created Key: basestation_capacities in Redis. "
##################################################################TASKS#########################################################################3
create_devicetype_mapping_task = PythonOperator(
task_id="generate_host_devicetype_mapping",
provide_context=False,
python_callable=process_host_mk,
#params={"redis_hook_2":redis_hook_2},
dag=main_etl_dag)
create_severity_rules_task = PythonOperator(
task_id="generate_service_rules",
provide_context=False,
python_callable=generate_service_rules,
#params={"redis_hook_2":redis_hook_2},
dag=main_etl_dag)
create_kpi_rules_task = PythonOperator(
task_id="generate_kpi_rules",
provide_context=False,
python_callable=generate_kpi_rules,
#params={"redis_hook_2":redis_hook_2},
dag=main_etl_dag)
create_kpi_prev_states = PythonOperator(
task_id="generate_kpi_previous_states",
provide_context=False,
python_callable=generate_kpi_prev_states,
#params={"redis_hook_2":redis_hook_2},
dag=main_etl_dag)
generate_backhaul_data = PythonOperator(
task_id="generate_backhaul_inventory",
provide_context=False,
python_callable=generate_backhaul_inventory_for_util,
#params={"table":"nocout_24_09_14"},
dag=main_etl_dag)
generate_basestation_data = PythonOperator(
task_id="generate_basestation_inventory",
provide_context=False,
python_callable=generate_basestation_inventory_for_util,
#params={"table":"nocout_24_09_14"},
dag=main_etl_dag)
##################################################################END#########################################################################3
|
vipul-tm/DAG
|
dags-ttpl/sync.py
|
Python
|
bsd-3-clause
| 19,607 | 0.033304 |
#!/Users/abhisheksamdaria/GitHub/pstHealth/venv/bin/python2.7
from __future__ import print_function
import base64
import os
import sys
if __name__ == "__main__":
# create font data chunk for embedding
font = "Tests/images/courB08"
print(" f._load_pilfont_data(")
print(" # %s" % os.path.basename(font))
print(" BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print("''')), Image.open(BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print("'''))))")
# End of file
|
samabhi/pstHealth
|
venv/bin/createfontdatachunk.py
|
Python
|
mit
| 600 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.photo'
db.add_column('events_event', 'photo',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.photo'
db.delete_column('events_event', 'photo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.day': {
'Meta': {'object_name': 'Day'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaboration_events'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['participant.Participant']"}),
'days': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['events.Day']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.TimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maps.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'own_events'", 'to': "orm['participant.Participant']"}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {})
},
'maps.location': {
'Meta': {'unique_together': "(('user', 'name'),)", 'object_name': 'Location'},
'area': ('django.contrib.gis.db.models.fields.PolygonField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marker': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_set'", 'to': "orm['auth.User']"})
},
'participant.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'participant.participant': {
'Meta': {'object_name': 'Participant'},
'approved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['participant.Category']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
}
}
complete_apps = ['events']
|
e-Luminate/eluminate_web
|
eluminate_web/apps/events/migrations/0006_auto__add_field_event_photo.py
|
Python
|
gpl-3.0
| 7,343 | 0.007899 |
import settings
import mysql.connector
from domain.domain import Article
from domain.domain import Project
from domain.domain import User
from domain.domain import Tag
import service.database as db
# 文章管理
class ArticleService:
# 查询最近发表的文章
def query_most_published_article(self):
conn = db.get_connection()
sql = "".join(["select a.id as id,a.author_id as author_id,",
"u.name as author_name,a.title as title,a.content as content,a.create_time as create_time,",
"a.publish_time as publish_time,a.last_update_time as last_update_time",
" from article as a left join user as u on a.author_id=u.id",
" order by a.publish_time desc limit 0,%(page_size)s"])
cursor = conn.cursor()
cursor.execute(sql, {"page_size": settings.app_settings["page_size"]})
articles = None
for (id, author_id, author_name, title,
content, create_time, publish_time, last_update_time) in cursor:
if (not articles):
articles = []
article = Article()
articles.append(article)
article.id = id
if (author_id):
u = User()
article.author = u
u.id = author_id
u.name = author_name
article.title = title
article.content = content
article.create_time = create_time
article.publish_time = publish_time
article.last_update_time = last_update_time
cursor.close()
conn.close()
return articles
# 根据标签查询文章列表
def query_article_by_tag(self, tag_id):
if (not tag_id):
return None
_tag_id = None
try:
_tag_id = int(tag_id)
except ValueError:
return None
sql = "".join(["select a.id as id,a.author_id as author_id,u.name as author_name",
",a.title as title,a.create_time as create_time,a.publish_time as publish_time",
",a.last_update_time as last_update_time",
" from article as a left join user as u on a.author_id=u.id",
" where a.publish_time is not null and a.id in (select article_id from article_tag where tag_id=%(tag_id)s)"])
conn = db.get_connection()
cursor = conn.cursor()
cursor.execute(sql, {"tag_id": _tag_id})
articles = None
for (id, author_id, author_name, title, create_time, publish_time, last_update_time) in cursor:
if (not articles):
articles = []
a = Article()
articles.append(a)
a.id = id
a.title = title
a.create_time = create_time
a.publish_time = publish_time
a.last_update_time = last_update_time
if (author_id):
u = User()
a.author = u
u.id = author_id
u.name = author_name
cursor.close()
conn.close()
return articles
# 根据文章 ID 查询文章
def find(self, article_id):
conn = db.get_connection()
sql = "".join(["select a.id as id,a.author_id as author_id,",
"u.name as author_name,a.title as title,a.content as content,a.create_time as create_time,",
"a.publish_time as publish_time,a.last_update_time as last_update_time",
" from article as a left join user as u on a.author_id=u.id",
" where a.id=%(article_id)s"])
cursor = conn.cursor()
cursor.execute(sql, {"article_id": article_id})
article = None
for (id, author_id, author_name, title, content, create_time, publish_time, last_update_time) in cursor:
if (not article):
article = Article()
article.id = id
article.title = title
article.content = content
article.create_time = create_time
article.publish_time = publish_time
article.last_update_time = last_update_time
if (author_id):
u = User()
article.author = u
u.id = author_id
u.name = author_name
cursor.close()
conn.close()
return article
# 添加新文章
def add(self, article):
# implement
return 1
# 标签管理
class TagService:
def list_all(self):
conn = db.get_connection()
if (not conn):
return None
sql = "".join(["select t.id as id, t.name as name, t.author_id as author_id, u.name as author_name",
",t.create_time as create_time,t.last_update_time as last_update_time",
" from tag as t left join user as u on t.author_id=u.id order by t.create_time desc"])
cursor = conn.cursor()
cursor.execute(sql)
tags = None
for (id, name, author_id, author_name, create_time, last_update_time) in cursor:
if (not tags):
tags = []
t = Tag()
tags.append(t)
t.id = id
t.name = name
t.create_time = create_time
t.last_update_time = last_update_time
if (author_id):
u = User()
t.author = u
u.id = author_id
u.name = author_name
cursor.close()
conn.close()
return tags
article_service = ArticleService()
tag_service = TagService()
|
hwangsyin/cbrc-devteam-blog
|
service/service.py
|
Python
|
apache-2.0
| 5,715 | 0.006211 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import datetime
from cms.api import create_page, publish_page, add_plugin
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered
from cms.models import Page, Placeholder
from cms.models.pluginmodel import CMSPlugin, PluginModelBase
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.plugins.utils import get_plugins_for_page
from cms.plugins.file.models import File
from cms.plugins.inherit.models import InheritPagePlaceholder
from cms.plugins.link.forms import LinkForm
from cms.plugins.link.models import Link
from cms.plugins.picture.models import Picture
from cms.plugins.text.models import Text
from cms.plugins.text.utils import (plugin_tags_to_id_list, plugin_tags_to_admin_html)
from cms.plugins.twitter.models import TwitterRecentEntries
from cms.test_utils.project.pluginapp.models import Article, Section
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import (
ArticlePluginModel)
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE, URL_CMS_PLUGIN_MOVE, \
URL_CMS_PAGE_ADD, URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE, URL_CMS_PLUGIN_REMOVE, \
URL_CMS_PLUGIN_HISTORY_EDIT
from cms.sitemaps.cms_sitemap import CMSSitemap
from cms.test_utils.util.context_managers import SettingsOverride
from cms.utils.copy_plugins import copy_plugins_to
from django.utils import timezone
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.forms.widgets import Media
from django.test.testcases import TestCase
import os
class DumbFixturePlugin(CMSPluginBase):
model = CMSPlugin
name = "Dumb Test Plugin. It does nothing."
render_template = ""
admin_preview = False
allow_children = True
def render(self, context, instance, placeholder):
return context
class PluginsTestBaseCase(CMSTestCase):
def setUp(self):
self.super_user = User(username="test", is_staff=True, is_active=True, is_superuser=True)
self.super_user.set_password("test")
self.super_user.save()
self.slave = User(username="slave", is_staff=True, is_active=True, is_superuser=False)
self.slave.set_password("slave")
self.slave.save()
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def approve_page(self, page):
response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk)
self.assertRedirects(response, URL_CMS_PAGE)
# reload page
return self.reload_page(page)
def get_request(self, *args, **kwargs):
request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs)
request.placeholder_media = Media()
return request
class PluginsTestCase(PluginsTestBaseCase):
def _create_text_plugin_on_page(self, page):
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
created_plugin_id = int(response.content)
self.assertEquals(created_plugin_id, CMSPlugin.objects.all()[0].pk)
return created_plugin_id
def _edit_text_plugin(self, plugin_id, text):
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
"body": text
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.get(pk=plugin_id)
return txt
def test_add_edit_plugin(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
txt = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEquals("Hello World", txt.body)
# edit body, but click cancel button
data = {
"body": "Hello World!!",
"_cancel": True,
}
edit_url = '%s%d/' % (URL_CMS_PLUGIN_EDIT, created_plugin_id)
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals("Hello World", txt.body)
def test_plugin_history_view(self):
"""
Test plugin history view
"""
import reversion
page_data = self.get_new_page_data()
# two versions created by simply creating the page
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
page_id = int(page.pk)
# page version 3
created_plugin_id = self._create_text_plugin_on_page(page)
# page version 4
txt = self._edit_text_plugin(created_plugin_id, "Hello Foo")
self.assertEquals("Hello Foo", txt.body)
# page version 5
txt = self._edit_text_plugin(created_plugin_id, "Hello Bar")
self.assertEquals("Hello Bar", txt.body)
versions = [v.pk for v in reversed(reversion.get_for_object(page))]
history_url = '%s%d/' % (
URL_CMS_PLUGIN_HISTORY_EDIT % (page_id, versions[-2]),
created_plugin_id)
response = self.client.get(history_url)
self.assertEquals(response.status_code, 200)
self.assertIn('Hello Foo', response.content)
def test_plugin_order(self):
"""
Test that plugin position is saved after creation
"""
page_en = create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_1 = add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
text_plugin_2 = add_plugin(ph_en, "TextPlugin", "en", body="I'm the second")
db_plugin_1 = CMSPlugin.objects.get(pk=text_plugin_1.pk)
db_plugin_2 = CMSPlugin.objects.get(pk=text_plugin_2.pk)
with SettingsOverride(CMS_PERMISSION=False):
self.assertEqual(text_plugin_1.position, 1)
self.assertEqual(db_plugin_1.position, 1)
self.assertEqual(text_plugin_2.position, 2)
self.assertEqual(db_plugin_2.position, 2)
## Finally we render the placeholder to test the actual content
rendered_placeholder = ph_en.render(self.get_context(page_en.get_absolute_url()), None)
self.assertEquals(rendered_placeholder, "I'm the firstI'm the second")
def test_add_cancel_plugin(self):
"""
Test that you can cancel a new plugin before editing and
that the plugin is removed.
"""
# add a new text plugin
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now click cancel instead of editing
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
"body": "Hello World",
"_cancel": True,
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
self.assertEquals(0, Text.objects.count())
def test_add_text_plugin_empty_tag(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
"body": '<div class="someclass"></div><p>foo</p>'
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals('<div class="someclass"></div><p>foo</p>', txt.body)
def test_add_text_plugin_html_sanitizer(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
"body": '<script>var bar="hacked"</script>'
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals('<script>var bar="hacked"</script>', txt.body)
def test_copy_plugins(self):
"""
Test that copying plugins works as expected.
"""
# create some objects
page_en = create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEquals(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEquals(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.get_plugins(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEquals(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body)
def test_remove_plugin_before_published(self):
"""
When removing a draft plugin we would expect the public copy of the plugin to also be removed
"""
# add a page
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': int(response.content)
}
remove_url = URL_CMS_PLUGIN_REMOVE
response = self.client.post(remove_url, plugin_data)
self.assertEquals(response.status_code, 200)
# there should be no plugins
self.assertEquals(0, CMSPlugin.objects.all().count())
def test_remove_plugin_after_published(self):
# add a page
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
plugin_id = int(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
self.assertEquals(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 1)
# publish page
response = self.client.post(URL_CMS_PAGE + "%d/change-status/" % page.pk, {1: 1})
self.assertEqual(response.status_code, 200)
self.assertEquals(Page.objects.count(), 2)
# there should now be two plugins - 1 draft, 1 public
self.assertEquals(CMSPlugin.objects.all().count(), 2)
# delete the plugin
plugin_data = {
'plugin_id': plugin_id
}
remove_url = URL_CMS_PLUGIN_REMOVE
response = self.client.post(remove_url, plugin_data)
self.assertEquals(response.status_code, 200)
# there should be no plugins
self.assertEquals(CMSPlugin.objects.all().count(), 1)
self.assertEquals(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=False).count(), 1)
def test_remove_plugin_not_associated_to_page(self):
"""
Test case for PlaceholderField
"""
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
ph = Placeholder(slot="subplugin")
ph.save()
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': ph.pk,
'parent': int(response.content)
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# no longer allowed for security reasons
self.assertEqual(response.status_code, 404)
def test_register_plugin_twice_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
# The first time we register the plugin is should work
plugin_pool.register_plugin(DumbFixturePlugin)
# Let's add it a second time. We should catch and exception
raised = False
try:
plugin_pool.register_plugin(DumbFixturePlugin)
except PluginAlreadyRegistered:
raised = True
self.assertTrue(raised)
# Let's also unregister the plugin now, and assert it's not in the
# pool anymore
plugin_pool.unregister_plugin(DumbFixturePlugin)
# Let's make sure we have the same number of plugins as before:
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_unregister_non_existing_plugin_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
raised = False
try:
# There should not be such a plugin registered if the others tests
# don't leak plugins
plugin_pool.unregister_plugin(DumbFixturePlugin)
except PluginNotRegistered:
raised = True
self.assertTrue(raised)
# Let's count, to make sure we didn't remove a plugin accidentally.
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_inheritplugin_media(self):
"""
Test case for InheritPagePlaceholder
"""
inheritfrompage = create_page('page to inherit from',
'nav_playground.html',
'en')
body = inheritfrompage.placeholders.get(slot="body")
plugin = TwitterRecentEntries(
plugin_type='TwitterRecentEntriesPlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
twitter_user='djangocms',
)
plugin.insert_at(None, position='last-child', save=True)
inheritfrompage.publish()
page = create_page('inherit from page',
'nav_playground.html',
'en',
published=True)
inherited_body = page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language=settings.LANGUAGE_CODE,
from_page=inheritfrompage,
from_language=settings.LANGUAGE_CODE)
inherit_plugin.insert_at(None, position='last-child', save=True)
page.publish()
self.client.logout()
response = self.client.get(page.get_absolute_url())
self.assertTrue('%scms/js/libs/jquery.tweet.js' % settings.STATIC_URL in response.content, response.content)
def test_inherit_plugin_with_empty_plugin(self):
inheritfrompage = create_page('page to inherit from',
'nav_playground.html',
'en', published=True)
body = inheritfrompage.placeholders.get(slot="body")
empty_plugin = CMSPlugin(
plugin_type='TextPlugin', # create an empty plugin
placeholder=body,
position=1,
language='en',
)
empty_plugin.insert_at(None, position='last-child', save=True)
other_page = create_page('other page', 'nav_playground.html', 'en', published=True)
inherited_body = other_page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language='en',
from_page=inheritfrompage,
from_language='en'
)
inherit_plugin.insert_at(None, position='last-child', save=True)
add_plugin(inherited_body, "TextPlugin", "en", body="foobar")
# this should not fail, even if there in an empty plugin
rendered = inherited_body.render(context=self.get_context(other_page.get_absolute_url()), width=200)
self.assertIn("foobar", rendered)
def test_render_textplugin(self):
# Setup
page = create_page("render test", "nav_playground.html", "en")
ph = page.placeholders.get(slot="body")
text_plugin = add_plugin(ph, "TextPlugin", "en", body="Hello World")
link_plugins = []
for i in range(0, 10):
link_plugins.append(add_plugin(ph, "LinkPlugin", "en",
target=text_plugin,
name="A Link %d" % i,
url="http://django-cms.org"))
text_plugin.text.body += '<img src="/static/cms/images/plugins/link.png" alt="Link - %s" id="plugin_obj_%d" title="Link - %s" />' % (
link_plugins[-1].name,
link_plugins[-1].pk,
link_plugins[-1].name,
)
text_plugin.save()
txt = text_plugin.text
ph = Placeholder.objects.get(pk=ph.pk)
with self.assertNumQueries(2):
# 1 query for the CMSPlugin objects,
# 1 query for each type of child object (1 in this case, all are Link plugins)
txt.body = plugin_tags_to_admin_html(
'\n'.join(["{{ plugin_object %d }}" % l.cmsplugin_ptr_id
for l in link_plugins]))
txt.save()
text_plugin = self.reload(text_plugin)
with self.assertNumQueries(2):
rendered = text_plugin.render_plugin(placeholder=ph)
for i in range(0, 10):
self.assertTrue('A Link %d' % i in rendered)
def test_copy_textplugin(self):
"""
Test that copying of textplugins replaces references to copied plugins
"""
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_base.insert_at(None, position='last-child', save=False)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
plugin_ref_1_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_ref_1_base.insert_at(plugin_base, position='last-child', save=False)
plugin_ref_1 = Text(body='')
plugin_ref_1_base.set_base_attr(plugin_ref_1)
plugin_ref_1.save()
plugin_ref_2_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=2,
language=self.FIRST_LANG)
plugin_ref_2_base.insert_at(plugin_base, position='last-child', save=False)
plugin_ref_2 = Text(body='')
plugin_ref_2_base.set_base_attr(plugin_ref_2)
plugin_ref_2.save()
plugin.body = plugin_tags_to_admin_html(
' {{ plugin_object %s }} {{ plugin_object %s }} ' % (str(plugin_ref_1.pk), str(plugin_ref_2.pk)))
plugin.save()
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEquals(CMSPlugin.objects.count(), 3)
self.assertEquals(Page.objects.all().count(), 1)
copy_data = {
'placeholder': placeholder.pk,
'language': self.SECOND_LANG,
'copy_from': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.content.count('<li '), 3)
# assert copy success
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.count(), 6)
plugins = list(Text.objects.all())
new_plugin = plugins[-1]
idlist = sorted(plugin_tags_to_id_list(new_plugin.body))
expected = sorted([plugins[3].pk, plugins[4].pk])
self.assertEquals(idlist, expected)
def test_empty_plugin_is_ignored(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.insert_at(None, position='last-child', save=True)
# this should not raise any errors, but just ignore the empty plugin
out = placeholder.render(self.get_context(), width=300)
self.assertFalse(len(out))
self.assertFalse(len(placeholder._en_plugins_cache))
def test_editing_plugin_changes_page_modification_time_in_sitemap(self):
now = timezone.now()
one_day_ago = now - datetime.timedelta(days=1)
page = create_page("page", "nav_playground.html", "en", published=True, publication_date=now)
page.creation_date = one_day_ago
page.changed_date = one_day_ago
plugin_id = self._create_text_plugin_on_page(page)
plugin = self._edit_text_plugin(plugin_id, "fnord")
actual_last_modification_time = CMSSitemap().lastmod(page)
self.assertEqual(plugin.changed_date - datetime.timedelta(microseconds=plugin.changed_date.microsecond),
actual_last_modification_time - datetime.timedelta(
microseconds=actual_last_modification_time.microsecond))
def test_moving_plugin_to_different_placeholder(self):
plugin_pool.register_plugin(DumbFixturePlugin)
page = create_page("page", "nav_playground.html", "en", published=True)
plugin_data = {
'plugin_type': 'DumbFixturePlugin',
'language': settings.LANGUAGES[0][0],
'placeholder': page.placeholders.get(slot='body').pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD % page.pk, plugin_data)
self.assertEquals(response.status_code, 200)
plugin_data['parent_id'] = int(response.content)
del plugin_data['placeholder']
response = self.client.post(URL_CMS_PLUGIN_ADD % page.pk, plugin_data)
self.assertEquals(response.status_code, 200)
post = {
'plugin_id': int(response.content),
'placeholder': 'right-column',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE % page.pk, post)
self.assertEquals(response.status_code, 200)
from cms.plugins.utils import build_plugin_tree
build_plugin_tree(page.placeholders.get(slot='right-column').get_plugins_list())
plugin_pool.unregister_plugin(DumbFixturePlugin)
def test_get_plugins_for_page(self):
page_en = create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = add_plugin(ph_en, "TextPlugin", "en", body="I'm inside an existing placeholder.")
# This placeholder is not in the template.
ph_en_not_used = page_en.placeholders.create(slot="not_used")
text_plugin_2 = add_plugin(ph_en_not_used, "TextPlugin", "en", body="I'm inside a non-existent placeholder.")
page_plugins = get_plugins_for_page(None, page_en, page_en.get_title_obj_attribute('language'))
db_text_plugin_1 = page_plugins.get(pk=text_plugin_1.pk)
self.assertRaises(CMSPlugin.DoesNotExist, page_plugins.get, pk=text_plugin_2.pk)
self.assertEquals(db_text_plugin_1.pk, text_plugin_1.pk)
def test_is_last_in_placeholder(self):
"""
Tests that children plugins don't affect the is_last_in_placeholder plugin method.
"""
page_en = create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
text_plugin_2 = add_plugin(ph_en, "TextPlugin", "en", body="I'm the second")
inner_text_plugin_1 = add_plugin(ph_en, "TextPlugin", "en", body="I'm the first child of text_plugin_1")
text_plugin_1.cmsplugin_set.add(inner_text_plugin_1)
self.assertEquals(text_plugin_2.is_last_in_placeholder(), True)
class FileSystemPluginTests(PluginsTestBaseCase):
def setUp(self):
super(FileSystemPluginTests, self).setUp()
call_command('collectstatic', interactive=False, verbosity=0, link=True)
def tearDown(self):
for directory in [settings.STATIC_ROOT, settings.MEDIA_ROOT]:
for root, dirs, files in os.walk(directory, topdown=False):
# We need to walk() the directory tree since rmdir() does not allow
# to remove non-empty directories...
for name in files:
# Start by killing all files we walked
os.remove(os.path.join(root, name))
for name in dirs:
# Now all directories we walked...
os.rmdir(os.path.join(root, name))
super(FileSystemPluginTests, self).tearDown()
def test_fileplugin_icon_uppercase(self):
page = create_page('testpage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot="body")
plugin = File(
plugin_type='FilePlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
)
plugin.file.save("UPPERCASE.JPG", SimpleUploadedFile("UPPERCASE.jpg", "content"), False)
plugin.insert_at(None, position='last-child', save=True)
self.assertNotEquals(plugin.get_icon_url().find('jpg'), -1)
class PluginManyToManyTestCase(PluginsTestBaseCase):
def setUp(self):
self.super_user = User(username="test", is_staff=True, is_active=True, is_superuser=True)
self.super_user.set_password("test")
self.super_user.save()
self.slave = User(username="slave", is_staff=True, is_active=True, is_superuser=False)
self.slave.set_password("slave")
self.slave.save()
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
# create 3 sections
self.sections = []
self.section_pks = []
for i in range(3):
section = Section.objects.create(name="section %s" % i)
self.sections.append(section)
self.section_pks.append(section.pk)
self.section_count = len(self.sections)
# create 10 articles by section
for section in self.sections:
for j in range(10):
Article.objects.create(
title="article %s" % j,
section=section
)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
def test_add_plugin_with_m2m(self):
# add a new text plugin
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
plugin_data = {
'plugin_type': "ArticlePlugin",
'language': self.FIRST_LANG,
'placeholder': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
plugin = ArticlePluginModel.objects.all()[0]
self.assertEquals(self.section_count, plugin.sections.count())
def test_add_plugin_with_m2m_and_publisher(self):
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
# add a plugin
plugin_data = {
'plugin_type': "ArticlePlugin",
'language': self.FIRST_LANG,
'placeholder': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(1, CMSPlugin.objects.all().count())
articles_plugin_pk = int(response.content)
self.assertEquals(articles_plugin_pk, CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
data = {
'title': "Articles Plugin 1",
'sections': self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
self.assertEquals(1, ArticlePluginModel.objects.count())
articles_plugin = ArticlePluginModel.objects.all()[0]
self.assertEquals(u'Articles Plugin 1', articles_plugin.title)
self.assertEquals(self.section_count, articles_plugin.sections.count())
# check publish box
page = publish_page(page, self.super_user)
# there should now be two plugins - 1 draft, 1 public
self.assertEquals(2, CMSPlugin.objects.all().count())
self.assertEquals(2, ArticlePluginModel.objects.all().count())
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
def test_copy_plugin_with_m2m(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = ArticlePluginModel(
plugin_type='ArticlePlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.insert_at(None, position='last-child', save=True)
edit_url = URL_CMS_PLUGIN_EDIT + str(plugin.pk) + "/"
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
self.assertEqual(ArticlePluginModel.objects.all()[0].sections.count(), self.section_count)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEquals(CMSPlugin.objects.count(), 1)
self.assertEquals(Page.objects.all().count(), 1)
copy_data = {
'placeholder': placeholder.pk,
'language': self.SECOND_LANG,
'copy_from': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.content.count('<li '), 1)
# assert copy success
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 1)
self.assertEquals(CMSPlugin.objects.count(), 2)
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
class PluginsMetaOptionsTests(TestCase):
''' TestCase set for ensuring that bugs like #992 are caught '''
# these plugins are inlined because, due to the nature of the #992
# ticket, we cannot actually import a single file with all the
# plugin variants in, because that calls __new__, at which point the
# error with splitted occurs.
def test_meta_options_as_defaults(self):
''' handling when a CMSPlugin meta options are computed defaults '''
# this plugin relies on the base CMSPlugin and Model classes to
# decide what the app_label and db_table should be
class TestPlugin(CMSPlugin):
pass
plugin = TestPlugin()
self.assertEqual(plugin._meta.db_table, 'cmsplugin_testplugin')
self.assertEqual(plugin._meta.app_label, 'tests') # because it's inlined
def test_meta_options_as_declared_defaults(self):
''' handling when a CMSPlugin meta options are declared as per defaults '''
# here, we declare the db_table and app_label explicitly, but to the same
# values as would be computed, thus making sure it's not a problem to
# supply options.
class TestPlugin2(CMSPlugin):
class Meta:
db_table = 'cmsplugin_testplugin2'
app_label = 'tests'
plugin = TestPlugin2()
self.assertEqual(plugin._meta.db_table, 'cmsplugin_testplugin2')
self.assertEqual(plugin._meta.app_label, 'tests') # because it's inlined
def test_meta_options_custom_app_label(self):
''' make sure customised meta options on CMSPlugins don't break things '''
class TestPlugin3(CMSPlugin):
class Meta:
app_label = 'one_thing'
plugin = TestPlugin3()
self.assertEqual(plugin._meta.db_table, 'cmsplugin_testplugin3') # because it's inlined
self.assertEqual(plugin._meta.app_label, 'one_thing')
def test_meta_options_custom_db_table(self):
''' make sure custom database table names are OK. '''
class TestPlugin4(CMSPlugin):
class Meta:
db_table = 'or_another'
plugin = TestPlugin4()
self.assertEqual(plugin._meta.db_table, 'or_another')
self.assertEqual(plugin._meta.app_label, 'tests') # because it's inlined
def test_meta_options_custom_both(self):
''' We should be able to customise app_label and db_table together '''
class TestPlugin5(CMSPlugin):
class Meta:
app_label = 'one_thing'
db_table = 'or_another'
plugin = TestPlugin5()
self.assertEqual(plugin._meta.db_table, 'or_another')
self.assertEqual(plugin._meta.app_label, 'one_thing')
class LinkPluginTestCase(PluginsTestBaseCase):
def test_does_not_verify_existance_of_url(self):
form = LinkForm(
{'name': 'Linkname', 'url': 'http://www.nonexistant.test'})
self.assertTrue(form.is_valid())
def test_opens_in_same_window_by_default(self):
"""Could not figure out how to render this plugin
Checking only for the values in the model"""
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test'})
link = form.save()
self.assertEquals(link.target, '')
def test_open_in_blank_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_blank'})
link = form.save()
self.assertEquals(link.target, '_blank')
def test_open_in_parent_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_parent'})
link = form.save()
self.assertEquals(link.target, '_parent')
def test_open_in_top_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_top'})
link = form.save()
self.assertEquals(link.target, '_top')
def test_open_in_nothing_else(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': 'artificial'})
self.assertFalse(form.is_valid())
class NoDatabasePluginTests(TestCase):
def test_render_meta_is_unique(self):
text = Text()
link = Link()
self.assertNotEqual(id(text._render_meta), id(link._render_meta))
def test_render_meta_does_not_leak(self):
text = Text()
link = Link()
text._render_meta.text_enabled = False
link._render_meta.text_enabled = False
self.assertFalse(text._render_meta.text_enabled)
self.assertFalse(link._render_meta.text_enabled)
link._render_meta.text_enabled = True
self.assertFalse(text._render_meta.text_enabled)
self.assertTrue(link._render_meta.text_enabled)
def test_db_table_hack(self):
# TODO: Django tests seem to leak models from test methods, somehow
# we should clear django.db.models.loading.app_cache in tearDown.
plugin_class = PluginModelBase('TestPlugin', (CMSPlugin,), {'__module__': 'cms.tests.plugins'})
self.assertEqual(plugin_class._meta.db_table, 'cmsplugin_testplugin')
def test_db_table_hack_with_mixin(self):
class LeftMixin: pass
class RightMixin: pass
plugin_class = PluginModelBase('TestPlugin2', (LeftMixin, CMSPlugin, RightMixin),
{'__module__': 'cms.tests.plugins'})
self.assertEqual(plugin_class._meta.db_table, 'cmsplugin_testplugin2')
class PicturePluginTests(PluginsTestBaseCase):
def test_link_or_page(self):
"""Test a validator: you can enter a url or a page_link, but not both."""
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
picture = Picture(url="test")
# Note: don't call full_clean as it will check ALL fields - including
# the image, which we haven't defined. Call clean() instead which
# just validates the url and page_link fields.
picture.clean()
picture.page_link = page
picture.url = None
picture.clean()
picture.url = "test"
self.assertRaises(ValidationError, picture.clean)
class SimplePluginTests(TestCase):
def test_simple_naming(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
self.assertEqual(MyPlugin.name, 'My Plugin')
def test_simple_context(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
plugin = MyPlugin(ArticlePluginModel, admin.site)
context = {}
out_context = plugin.render(context, 1, 2)
self.assertEqual(out_context['instance'], 1)
self.assertEqual(out_context['placeholder'], 2)
self.assertIs(out_context, context)
|
mpetyx/palmdrop
|
venv/lib/python2.7/site-packages/cms/tests/plugins.py
|
Python
|
apache-2.0
| 46,414 | 0.001982 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler.filters import numa_topology_filter
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
from nova.virt import hardware
class TestNUMATopologyFilter(test.NoDBTestCase):
def setUp(self):
super(TestNUMATopologyFilter, self).setUp()
self.filt_cls = numa_topology_filter.NUMATopologyFilter()
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = None
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_memory(self):
self.flags(ram_allocation_ratio=1)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_cpu(self):
self.flags(cpu_allocation_ratio=1)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_pass_set_limit(self):
self.flags(cpu_allocation_ratio=21)
self.flags(ram_allocation_ratio=1.3)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
limits_topology = hardware.VirtNUMALimitTopology.from_json(
host.limits['numa_topology'])
self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
self.assertEqual(limits_topology.cells[1].cpu_limit, 42)
self.assertEqual(limits_topology.cells[0].memory_limit, 665)
self.assertEqual(limits_topology.cells[1].memory_limit, 665)
|
orbitfp7/nova
|
nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
|
Python
|
apache-2.0
| 7,379 | 0.000678 |
#!/usr/bin/env python
from os.path import dirname, join
import plyer
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
curdir = dirname(__file__)
packages = [
'plyer',
'plyer.platforms',
'plyer.platforms.linux',
'plyer.platforms.android',
'plyer.platforms.win',
'plyer.platforms.win.libs',
'plyer.platforms.ios',
'plyer.platforms.macosx',
]
setup(
name='plyer',
version=plyer.__version__,
description='Platform-independant wrapper for platform-dependant APIs',
long_description=open(join(curdir, 'README.rst')).read(),
author='Kivy team',
author_email='mat@kivy.org',
url='https://plyer.readthedocs.org/en/latest/',
packages=packages,
package_data={'': ['LICENSE', 'README.rst']},
package_dir={'plyer': 'plyer'},
include_package_data=True,
license=open(join(curdir, 'LICENSE')).read(),
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
),
)
|
inclement/plyer
|
setup.py
|
Python
|
mit
| 1,374 | 0 |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Description: File system resilience testing application
# Author: Hubert Kario <hubert@kario.pl>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Hubert Kario. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""Handling of NBD requests."""
import struct
from .constants import Magic, RequestType
from ..compat import compat_str
class Error(Exception):
"""Exception describing what went wrong."""
def __repr__(self):
"""Format exception."""
return "request.{0}".format(super(Error, self).__repr__())
class NBDRequest(object):
"""Representation of single NBD protocol request."""
def __init__(self, req_type, handle, data_from, data_length, data=None):
"""Make a NBD protocol request object."""
self.req_type = req_type
self.handle = handle
self.data_from = data_from
self.data_length = data_length
self.data = data
def __eq__(self, other):
"""Check if the other object is equal to this object."""
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
"""Check if the other object is different from this object."""
return not self.__eq__(other)
def recvexactly(sock, size, flags=0):
"""recv exactly size bytes from socket."""
buff = bytearray(size)
view = memoryview(buff)
pos = 0
while pos < size:
read = sock.recv_into(view[pos:], size - pos, flags)
if not read:
raise Error("Incomplete read, expected {0}, read {1}"
.format(size, size))
pos += read
return buff
class NBDRequestSocket(object):
"""Handle requests on NBD socket."""
request_fmt = ">IIQQI"
request_length = struct.calcsize(request_fmt)
def __init__(self, sock):
"""Initialize the socket wrapper."""
self.sock = sock
def recv(self):
"""Receive a single request from socket and return it."""
data = recvexactly(self.sock, self.request_length)
assert len(data) == self.request_length
data = compat_str(data)
result_tuple = struct.unpack(self.request_fmt, data)
magic, req_type, handle, data_from, data_length = result_tuple
if magic != Magic.NBD_REQUEST_MAGIC:
raise Error("Request magic invalid: {0}".format(magic))
if req_type != RequestType.NBD_CMD_WRITE:
return NBDRequest(req_type, handle, data_from, data_length)
payload = recvexactly(self.sock, data_length)
return NBDRequest(req_type, handle, data_from, data_length, payload)
def send(self, request):
"""Send a single request through socket."""
data = struct.pack(self.request_fmt,
Magic.NBD_REQUEST_MAGIC,
request.req_type,
request.handle,
request.data_from,
request.data_length)
if request.req_type == RequestType.NBD_CMD_WRITE:
data = data + request.data
self.sock.sendall(data)
|
tomato42/fsresck
|
fsresck/nbd/request.py
|
Python
|
gpl-2.0
| 3,965 | 0 |
#!/usr/bin/env python3
# -----------------------------------------------------------------------------
# Copyright (C) British Crown (Met Office) & Contributors.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""A pre-commit hook on a Rosie Subversion repository.
Ensure that commits conform to the rules of Rosie.
"""
from fnmatch import fnmatch
import re
import shlex
import sys
import traceback
import metomi.rose
from metomi.rose.config import ConfigSyntaxError
from metomi.rose.macro import (
add_meta_paths,
get_reports_as_text,
load_meta_config,
)
from metomi.rose.macros import DefaultValidators
from metomi.rose.opt_parse import RoseOptionParser
from metomi.rose.popen import RosePopenError
from metomi.rose.reporter import Reporter
from metomi.rose.resource import ResourceLocator
from metomi.rose.scheme_handler import SchemeHandlersManager
from metomi.rosie.svn_hook import (
BadChange,
BadChanges,
InfoFileError,
RosieSvnHook,
)
class RosieSvnPreCommitHook(RosieSvnHook):
"""A pre-commit hook on a Rosie Subversion repository.
Ensure that commits conform to the rules of Rosie.
"""
IGNORES = "svnperms.conf"
RE_ID_NAMES = [r"[Ra-z]", r"[Oa-z]", r"[S\d]", r"[I\d]", r"[E\d]"]
TRUNK_KNOWN_KEYS_FILE = "trunk/rosie-keys"
def __init__(self, event_handler=None, popen=None):
super(RosieSvnPreCommitHook, self).__init__(event_handler, popen)
self.usertools_manager = SchemeHandlersManager(
[self.path], "rosie.usertools", ["verify_users"]
)
def _get_access_info(self, info_node):
"""Return (owner, access_list) from "info_node"."""
owner = info_node.get_value(["owner"])
access_list = info_node.get_value(["access-list"], "").split()
access_list.sort()
return owner, access_list
def _verify_users(
self, status, path, txn_owner, txn_access_list, bad_changes
):
"""Check txn_owner and txn_access_list.
For any invalid users, append to bad_changes and return True.
"""
# The owner and names in access list must be real users
conf = ResourceLocator.default().get_conf()
user_tool_name = conf.get_value(["rosa-svn", "user-tool"])
if not user_tool_name:
return False
user_tool = self.usertools_manager.get_handler(user_tool_name)
txn_users = set([txn_owner] + txn_access_list)
txn_users.discard("*")
bad_users = user_tool.verify_users(txn_users)
for bad_user in bad_users:
if txn_owner == bad_user:
bad_change = BadChange(
status, path, BadChange.USER, "owner=" + bad_user
)
bad_changes.append(bad_change)
if bad_user in txn_access_list:
bad_change = BadChange(
status, path, BadChange.USER, "access-list=" + bad_user
)
bad_changes.append(bad_change)
return bool(bad_users)
def run(self, repos, txn):
"""Apply the rule engine on transaction "txn" to repository "repos"."""
changes = set() # set([(status, path), ...])
for line in self._svnlook("changed", "-t", txn, repos).splitlines():
status, path = line.split(None, 1)
changes.add((status, path))
bad_changes = []
author = None
super_users = None
rev_info_map = {}
txn_info_map = {}
conf = ResourceLocator.default().get_conf()
ignores_str = conf.get_value(["rosa-svn", "ignores"], self.IGNORES)
ignores = shlex.split(ignores_str)
for status, path in sorted(changes):
if any(fnmatch(path, ignore) for ignore in ignores):
continue
names = path.split("/", self.LEN_ID + 1)
tail = None
if not names[-1]:
tail = names.pop()
# Directories above the suites must match the ID patterns
is_bad = False
for name, pattern in zip(names, self.RE_ID_NAMES):
if not re.compile(r"\A" + pattern + r"\Z").match(name):
is_bad = True
break
if is_bad:
msg = "Directories above the suites must match the ID patterns"
bad_changes.append(BadChange(status, path, content=msg))
continue
# At levels above the suites, can only add directories
if len(names) < self.LEN_ID:
if status[0] != self.ST_ADDED:
msg = (
"At levels above the suites, "
"can only add directories"
)
bad_changes.append(BadChange(status, path, content=msg))
continue
# Cannot have a file at the branch level
if len(names) == self.LEN_ID + 1 and tail is None:
msg = "Cannot have a file at the branch level"
bad_changes.append(BadChange(status, path, content=msg))
continue
# New suite should have an info file
if len(names) == self.LEN_ID and status == self.ST_ADDED:
if (self.ST_ADDED, path + "trunk/") not in changes:
bad_changes.append(
BadChange(status, path, BadChange.NO_TRUNK)
)
continue
path_trunk_info_file = path + self.TRUNK_INFO_FILE
if (self.ST_ADDED, path_trunk_info_file) not in changes and (
self.ST_UPDATED,
path_trunk_info_file,
) not in changes:
bad_changes.append(
BadChange(status, path, BadChange.NO_INFO)
)
continue
sid = "".join(names[0 : self.LEN_ID])
branch = names[self.LEN_ID] if len(names) > self.LEN_ID else None
path_head = "/".join(sid) + "/"
path_tail = path[len(path_head) :]
is_meta_suite = sid == "ROSIE"
if status != self.ST_DELETED:
# Check info file
if sid not in txn_info_map:
try:
txn_info_map[sid] = self._load_info(
repos, sid, branch=branch, transaction=txn
)
err = None
except ConfigSyntaxError as exc:
err = InfoFileError(InfoFileError.VALUE, exc)
except RosePopenError as exc:
err = InfoFileError(InfoFileError.NO_INFO, exc.stderr)
if err:
bad_changes.append(err)
txn_info_map[sid] = err
continue
# Suite must have an owner
txn_owner, txn_access_list = self._get_access_info(
txn_info_map[sid]
)
if not txn_owner:
bad_changes.append(
InfoFileError(InfoFileError.NO_OWNER)
)
continue
# No need to check other non-trunk changes
if branch and branch != "trunk":
continue
# For meta suite, make sure keys in keys file can be parsed
if is_meta_suite and path_tail == self.TRUNK_KNOWN_KEYS_FILE:
out = self._svnlook("cat", "-t", txn, repos, path)
try:
shlex.split(out)
except ValueError:
bad_changes.append(
BadChange(status, path, BadChange.VALUE)
)
continue
# User IDs of owner and access list must be real
if (
status != self.ST_DELETED
and path_tail == self.TRUNK_INFO_FILE
and not isinstance(txn_info_map[sid], InfoFileError)
):
txn_owner, txn_access_list = self._get_access_info(
txn_info_map[sid]
)
if self._verify_users(
status, path, txn_owner, txn_access_list, bad_changes
):
continue
reports = DefaultValidators().validate(
txn_info_map[sid],
load_meta_config(
txn_info_map[sid],
config_type=metomi.rose.INFO_CONFIG_NAME,
),
)
if reports:
reports_str = get_reports_as_text({None: reports}, path)
bad_changes.append(
BadChange(status, path, BadChange.VALUE, reports_str)
)
continue
# Can only remove trunk information file with suite
if status == self.ST_DELETED and path_tail == self.TRUNK_INFO_FILE:
if (self.ST_DELETED, path_head) not in changes:
bad_changes.append(
BadChange(status, path, BadChange.NO_INFO)
)
continue
# Can only remove trunk with suite
# (Don't allow replacing trunk with a copy from elsewhere, either)
if status == self.ST_DELETED and path_tail == "trunk/":
if (self.ST_DELETED, path_head) not in changes:
bad_changes.append(
BadChange(status, path, BadChange.NO_TRUNK)
)
continue
# New suite trunk: ignore the rest
if (self.ST_ADDED, path_head + "trunk/") in changes:
continue
# See whether author has permission to make changes
if author is None:
author = self._svnlook("author", "-t", txn, repos).strip()
if super_users is None:
super_users = []
for s_key in ["rosa-svn", "rosa-svn-pre-commit"]:
value = conf.get_value([s_key, "super-users"])
if value is not None:
super_users = shlex.split(value)
break
if sid not in rev_info_map:
rev_info_map[sid] = self._load_info(repos, sid, branch=branch)
owner, access_list = self._get_access_info(rev_info_map[sid])
admin_users = super_users + [owner]
# Only admin users can remove the suite
if author not in admin_users and not path_tail:
msg = "Only the suite owner can remove the suite"
bad_changes.append(BadChange(status, path, content=msg))
continue
# Admin users and those in access list can modify everything in
# trunk apart from specific entries in the trunk info file
if "*" in access_list or author in admin_users + access_list:
if path_tail != self.TRUNK_INFO_FILE:
continue
else:
msg = "User not in access list"
bad_changes.append(BadChange(status, path, content=msg))
continue
# Only the admin users can change owner and access list
if owner == txn_owner and access_list == txn_access_list:
continue
if author not in admin_users:
if owner != txn_owner:
bad_changes.append(
BadChange(
status, path, BadChange.PERM, "owner=" + txn_owner
)
)
else: # access list
bad_change = BadChange(
status,
path,
BadChange.PERM,
"access-list=" + " ".join(txn_access_list),
)
bad_changes.append(bad_change)
continue
if bad_changes:
raise BadChanges(bad_changes)
__call__ = run
def main():
"""Implement "rosa svn-pre-commit"."""
add_meta_paths()
opt_parser = RoseOptionParser()
opts, args = opt_parser.parse_args()
repos, txn = args
report = Reporter(opts.verbosity - opts.quietness)
hook = RosieSvnPreCommitHook(report)
try:
hook(repos, txn)
except Exception as exc:
report(exc)
if opts.debug_mode:
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
|
metomi/rose
|
metomi/rosie/svn_pre_commit.py
|
Python
|
gpl-3.0
| 13,444 | 0.000149 |
# encoding: utf-8
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .Rackspace import Rackspace as delegate_class
|
redhat-imaging/imagefactory
|
imagefactory_plugins/Rackspace/__init__.py
|
Python
|
apache-2.0
| 668 | 0 |
# From: https://gist.github.com/nathan-hoad/8966377
import os
import asyncio
import sys
from asyncio.streams import StreamWriter, FlowControlMixin
reader, writer = None, None
@asyncio.coroutine
def stdio(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
writer_transport, writer_protocol = yield from loop.connect_write_pipe(FlowControlMixin, os.fdopen(0, 'wb'))
writer = StreamWriter(writer_transport, writer_protocol, None, loop)
yield from loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader, writer
@asyncio.coroutine
def async_input(message):
if isinstance(message, str):
message = message.encode('utf8')
global reader, writer
if (reader, writer) == (None, None):
reader, writer = yield from stdio()
writer.write(message)
yield from writer.drain()
line = yield from reader.readline()
return line.decode('utf8').replace('\r', '').replace('\n', '')
|
dpdani/tBB
|
tBB/async_stdio.py
|
Python
|
gpl-3.0
| 1,064 | 0.00094 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import forms
from django.core.validators import validate_email, ValidationError
from slugify import slugify
from django.utils.translation import ugettext as _
from modeltranslation.forms import TranslationModelForm
from django.contrib.auth import get_user_model
from geonode.groups.models import GroupProfile
class GroupForm(TranslationModelForm):
slug = forms.SlugField(
max_length=20,
help_text=_("a short version of the name consisting only of letters, numbers, underscores and hyphens."),
widget=forms.HiddenInput,
required=False)
def clean_slug(self):
if GroupProfile.objects.filter(
slug__iexact=self.cleaned_data["slug"]).count() > 0:
raise forms.ValidationError(
_("A group already exists with that slug."))
return self.cleaned_data["slug"].lower()
def clean_title(self):
if GroupProfile.objects.filter(
title__iexact=self.cleaned_data["title"]).count() > 0:
raise forms.ValidationError(
_("A group already exists with that name."))
return self.cleaned_data["title"]
def clean(self):
cleaned_data = self.cleaned_data
name = cleaned_data.get("title")
slug = slugify(name)
cleaned_data["slug"] = slug
return cleaned_data
class Meta:
model = GroupProfile
exclude = ['group']
class GroupUpdateForm(forms.ModelForm):
def clean_name(self):
if GroupProfile.objects.filter(
name__iexact=self.cleaned_data["title"]).count() > 0:
if self.cleaned_data["title"] == self.instance.name:
pass # same instance
else:
raise forms.ValidationError(
_("A group already exists with that name."))
return self.cleaned_data["title"]
class Meta:
model = GroupProfile
exclude = ['group']
class GroupMemberForm(forms.Form):
role = forms.ChoiceField(choices=[
("member", "Member"),
("manager", "Manager"),
])
user_identifiers = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'user-select'}))
def clean_user_identifiers(self):
value = self.cleaned_data["user_identifiers"]
new_members, errors = [], []
for ui in value.split(","):
ui = ui.strip()
try:
validate_email(ui)
try:
new_members.append(get_user_model().objects.get(email=ui))
except get_user_model().DoesNotExist:
new_members.append(ui)
except ValidationError:
try:
new_members.append(
get_user_model().objects.get(
username=ui))
except get_user_model().DoesNotExist:
errors.append(ui)
if errors:
message = (
"The following are not valid email addresses or "
"usernames: %s; not added to the group" %
", ".join(errors))
raise forms.ValidationError(message)
return new_members
class GroupInviteForm(forms.Form):
invite_role = forms.ChoiceField(label="Role", choices=[
("member", "Member"),
("manager", "Manager"),
])
invite_user_identifiers = forms.CharField(
label="E-mail addresses list",
widget=forms.Textarea)
def clean_user_identifiers(self):
value = self.cleaned_data["invite_user_identifiers"]
invitees, errors = [], []
for ui in value.split(","):
ui = ui.strip()
try:
validate_email(ui)
try:
invitees.append(get_user_model().objects.get(email=ui))
except get_user_model().DoesNotExist:
invitees.append(ui)
except ValidationError:
try:
invitees.append(get_user_model().objects.get(username=ui))
except get_user_model().DoesNotExist:
errors.append(ui)
if errors:
message = (
"The following are not valid email addresses or "
"usernames: %s; no invitations sent" %
", ".join(errors))
raise forms.ValidationError(message)
return invitees
|
ingenieroariel/geonode
|
geonode/groups/forms.py
|
Python
|
gpl-3.0
| 5,314 | 0.000188 |
# -*- coding: utf-8 -*-
"""
irc/server.py
Copyright © 2009 Ferry Boender
Copyright © 2012 Jason R. Coombs
This server has basic support for:
* Connecting
* Channels
* Nicknames
* Public/private messages
It is MISSING support for notably:
* Server linking
* Modes (user and channel)
* Proper error reporting
* Basically everything else
It is mostly useful as a testing tool or perhaps for building something like a
private proxy on. Do NOT use it in any kind of production code or anything that
will ever be connected to by the public.
"""
#
# Very simple hacky ugly IRC server.
#
# Todo:
# - Encode format for each message and reply with events.codes['needmoreparams']
# - starting server when already started doesn't work properly. PID file is not changed, no error messsage is displayed.
# - Delete channel if last user leaves.
# - [ERROR] <socket.error instance at 0x7f9f203dfb90> (better error msg required)
# - Empty channels are left behind
# - No Op assigned when new channel is created.
# - User can /join multiple times (doesn't add more to channel, does say 'joined')
# - PING timeouts
# - Allow all numerical commands.
# - Users can send commands to channels they are not in (PART)
# Not Todo (Won't be supported)
# - Server linking.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function, absolute_import
import argparse
import logging
import socket
import select
import re
from . import client
from . import _py2_compat
from . import logging as log_util
from . import events
from . import buffer
SRV_WELCOME = "Welcome to %s v%s, the ugliest IRC server in the world." % (
__name__, client.VERSION)
log = logging.getLogger(__name__)
class IRCError(Exception):
"""
Exception thrown by IRC command handlers to notify client of a server/client error.
"""
def __init__(self, code, value):
self.code = code
self.value = value
def __str__(self):
return repr(self.value)
@classmethod
def from_name(cls, name, value):
return cls(events.codes[name], value)
class IRCChannel(object):
"""
Object representing an IRC channel.
"""
def __init__(self, name, topic='No topic'):
self.name = name
self.topic_by = 'Unknown'
self.topic = topic
self.clients = set()
class IRCClient(_py2_compat.socketserver.BaseRequestHandler):
"""
IRC client connect and command handling. Client connection is handled by
the `handle` method which sets up a two-way communication with the client.
It then handles commands sent by the client by dispatching them to the
handle_ methods.
"""
class Disconnect(BaseException): pass
def __init__(self, request, client_address, server):
self.user = None
self.host = client_address # Client's hostname / ip.
self.realname = None # Client's real name
self.nick = None # Client's currently registered nickname
self.send_queue = [] # Messages to send to client (strings)
self.channels = {} # Channels the client is in
_py2_compat.socketserver.BaseRequestHandler.__init__(self, request,
client_address, server)
def handle(self):
log.info('Client connected: %s', self.client_ident())
self.buffer = buffer.LineBuffer()
try:
while True:
self._handle_one()
except self.Disconnect:
self.request.close()
def _handle_one(self):
"""
Handle one read/write cycle.
"""
ready_to_read, ready_to_write, in_error = select.select(
[self.request], [self.request], [self.request], 0.1)
if in_error:
raise self.Disconnect()
# Write any commands to the client
while self.send_queue and ready_to_write:
msg = self.send_queue.pop(0)
self._send(msg)
# See if the client has any commands for us.
if ready_to_read:
self._handle_incoming()
def _handle_incoming(self):
try:
data = self.request.recv(1024)
except Exception:
raise self.Disconnect()
if not data:
raise self.Disconnect()
self.buffer.feed(data)
for line in self.buffer:
self._handle_line(line)
def _handle_line(self, line):
try:
log.debug('from %s: %s' % (self.client_ident(), line))
command, sep, params = line.partition(' ')
handler = getattr(self, 'handle_%s' % command.lower(), None)
if not handler:
log.info('No handler for command: %s. '
'Full line: %s' % (command, line))
raise IRCError.from_name('unknowncommand',
'%s :Unknown command' % command)
response = handler(params)
except AttributeError as e:
log.error(_py2_compat.str(e))
raise
except IRCError as e:
response = ':%s %s %s' % (self.server.servername, e.code, e.value)
log.error(response)
except Exception as e:
response = ':%s ERROR %r' % (self.server.servername, e)
log.error(response)
raise
if response:
self._send(response)
def _send(self, msg):
log.debug('to %s: %s', self.client_ident(), msg)
self.request.send(msg + '\r\n')
def handle_nick(self, params):
"""
Handle the initial setting of the user's nickname and nick changes.
"""
nick = params
# Valid nickname?
if re.search('[^a-zA-Z0-9\-\[\]\'`^{}_]', nick):
raise IRCError.from_name('erroneusnickname', ':%s' % nick)
if self.server.clients.get(nick, None) == self:
# Already registered to user
return
if nick in self.server.clients:
# Someone else is using the nick
raise IRCError.from_name('nicknameinuse', 'NICK :%s' % (nick))
if not self.nick:
# New connection and nick is available; register and send welcome
# and MOTD.
self.nick = nick
self.server.clients[nick] = self
response = ':%s %s %s :%s' % (self.server.servername,
events.codes['welcome'], self.nick, SRV_WELCOME)
self.send_queue.append(response)
response = ':%s 376 %s :End of MOTD command.' % (
self.server.servername, self.nick)
self.send_queue.append(response)
return
# Nick is available. Change the nick.
message = ':%s NICK :%s' % (self.client_ident(), nick)
self.server.clients.pop(self.nick)
self.nick = nick
self.server.clients[self.nick] = self
# Send a notification of the nick change to all the clients in the
# channels the client is in.
for channel in self.channels.values():
self._send_to_others(message, channel)
# Send a notification of the nick change to the client itself
return message
def handle_user(self, params):
"""
Handle the USER command which identifies the user to the server.
"""
params = params.split(' ', 3)
if len(params) != 4:
raise IRCError.from_name('needmoreparams',
'USER :Not enough parameters')
user, mode, unused, realname = params
self.user = user
self.mode = mode
self.realname = realname
return ''
def handle_ping(self, params):
"""
Handle client PING requests to keep the connection alive.
"""
response = ':%s PONG :%s' % (self.server.servername, self.server.servername)
return response
def handle_join(self, params):
"""
Handle the JOINing of a user to a channel. Valid channel names start
with a # and consist of a-z, A-Z, 0-9 and/or '_'.
"""
channel_names = params.split(' ', 1)[0] # Ignore keys
for channel_name in channel_names.split(','):
r_channel_name = channel_name.strip()
# Valid channel name?
if not re.match('^#([a-zA-Z0-9_])+$', r_channel_name):
raise IRCError.from_name('nosuchchannel',
'%s :No such channel' % r_channel_name)
# Add user to the channel (create new channel if not exists)
channel = self.server.channels.setdefault(r_channel_name, IRCChannel(r_channel_name))
channel.clients.add(self)
# Add channel to user's channel list
self.channels[channel.name] = channel
# Send the topic
response_join = ':%s TOPIC %s :%s' % (channel.topic_by, channel.name, channel.topic)
self.send_queue.append(response_join)
# Send join message to everybody in the channel, including yourself and
# send user list of the channel back to the user.
response_join = ':%s JOIN :%s' % (self.client_ident(), r_channel_name)
for client in channel.clients:
client.send_queue.append(response_join)
nicks = [client.nick for client in channel.clients]
response_userlist = ':%s 353 %s = %s :%s' % (self.server.servername, self.nick, channel.name, ' '.join(nicks))
self.send_queue.append(response_userlist)
response = ':%s 366 %s %s :End of /NAMES list' % (self.server.servername, self.nick, channel.name)
self.send_queue.append(response)
def handle_privmsg(self, params):
"""
Handle sending a private message to a user or channel.
"""
target, sep, msg = params.partition(' ')
if not msg:
raise IRCError.from_name('needmoreparams',
'PRIVMSG :Not enough parameters')
message = ':%s PRIVMSG %s %s' % (self.client_ident(), target, msg)
if target.startswith('#') or target.startswith('$'):
# Message to channel. Check if the channel exists.
channel = self.server.channels.get(target)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
if not channel.name in self.channels:
# The user isn't in the channel.
raise IRCError.from_name('cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
self._send_to_others(message, channel)
else:
# Message to user
client = self.server.clients.get(target, None)
if not client:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
client.send_queue.append(message)
def _send_to_others(self, message, channel):
"""
Send the message to all clients in the specified channel except for
self.
"""
other_clients = [client for client in channel.clients
if not client == self]
for client in other_clients:
client.send_queue.append(message)
def handle_topic(self, params):
"""
Handle a topic command.
"""
channel_name, sep, topic = params.partition(' ')
channel = self.server.channels.get(channel_name)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % channel_name)
if not channel.name in self.channels:
# The user isn't in the channel.
raise IRCError.from_name('cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
if topic:
channel.topic = topic.lstrip(':')
channel.topic_by = self.nick
message = ':%s TOPIC %s :%s' % (self.client_ident(), channel_name,
channel.topic)
return message
def handle_part(self, params):
"""
Handle a client parting from channel(s).
"""
for pchannel in params.split(','):
if pchannel.strip() in self.server.channels:
# Send message to all clients in all channels user is in, and
# remove the user from the channels.
channel = self.server.channels.get(pchannel.strip())
response = ':%s PART :%s' % (self.client_ident(), pchannel)
if channel:
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.channels.pop(pchannel)
else:
response = ':%s 403 %s :%s' % (self.server.servername, pchannel, pchannel)
self.send_queue.append(response)
def handle_quit(self, params):
"""
Handle the client breaking off the connection with a QUIT command.
"""
response = ':%s QUIT :%s' % (self.client_ident(), params.lstrip(':'))
# Send quit message to all clients in all channels user is in, and
# remove the user from the channels.
for channel in self.channels.values():
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
def handle_dump(self, params):
"""
Dump internal server information for debugging purposes.
"""
print("Clients:", self.server.clients)
for client in self.server.clients.values():
print(" ", client)
for channel in client.channels.values():
print(" ", channel.name)
print("Channels:", self.server.channels)
for channel in self.server.channels.values():
print(" ", channel.name, channel)
for client in channel.clients:
print(" ", client.nick, client)
def client_ident(self):
"""
Return the client identifier as included in many command replies.
"""
return client.NickMask.from_params(self.nick, self.user,
self.server.servername)
def finish(self):
"""
The client conection is finished. Do some cleanup to ensure that the
client doesn't linger around in any channel or the client list, in case
the client didn't properly close the connection with PART and QUIT.
"""
log.info('Client disconnected: %s', self.client_ident())
response = ':%s QUIT :EOF from client' % self.client_ident()
for channel in self.channels.values():
if self in channel.clients:
# Client is gone without properly QUITing or PARTing this
# channel.
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.server.clients.pop(self.nick)
log.info('Connection finished: %s', self.client_ident())
def __repr__(self):
"""
Return a user-readable description of the client
"""
return '<%s %s!%s@%s (%s)>' % (
self.__class__.__name__,
self.nick,
self.user,
self.host[0],
self.realname,
)
class IRCServer(_py2_compat.socketserver.ThreadingMixIn,
_py2_compat.socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
channels = {}
"Existing channels (IRCChannel instances) by channel name"
clients = {}
"Connected clients (IRCClient instances) by nick name"
def __init__(self, *args, **kwargs):
self.servername = 'localhost'
self.channels = {}
self.clients = {}
_py2_compat.socketserver.TCPServer.__init__(self, *args, **kwargs)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--address", dest="listen_address",
default='127.0.0.1', help="IP on which to listen")
parser.add_argument("-p", "--port", dest="listen_port", default=6667,
type=int, help="Port on which to listen")
log_util.add_arguments(parser)
return parser.parse_args()
def main():
options = get_args()
log_util.setup(options)
log.info("Starting irc.server")
#
# Start server
#
try:
bind_address = options.listen_address, options.listen_port
ircserver = IRCServer(bind_address, IRCClient)
log.info('Listening on {listen_address}:{listen_port}'.format(
**vars(options)))
ircserver.serve_forever()
except socket.error as e:
log.error(repr(e))
raise SystemExit(-2)
if __name__ == "__main__":
main()
|
sim0629/irc
|
irc/server.py
|
Python
|
lgpl-2.1
| 17,734 | 0.002425 |
import random
import time
import datetime
from consts import *
__all__ = ['gen_valid_id', 'gen_list_page', 'log']
def gen_valid_id(collection):
def gen_id():
_id = ''
for i in range(4):
_id += random.choice('0123456789')
return _id
id = gen_id()
while collection.find_one({'id': id}):
id = gen_id()
return id
def gen_list_page(collection, status, page=1):
page = int(page)
left = (page - 1) * 15
right = left + 15
all = collection.find({'status': status}).sort([('id', 1)])
max_page = int((all.count()-1) / 15) + 1 if all.count() else 0
if page > max_page:
return PAGE_NOT_EXIST
elif page < 1:
return ARGS_INCORRECT
header = '===== {0}/{1} =====\n'.format(page, max_page)
selected = all[left:right]
return header + '\n'.join([
'{id} {title} ({comment})'.format(**i) for i in selected])
def log(m):
with open('log', 'a') as f:
if m.type == 'text': exp=m.content
elif m.type == 'image': exp=m.img
elif m.type == 'link': exp=';'.join([m.title, m.description, m.url])
else: exp=str(dict(m))
f.write(LOG.format(datetime.datetime.fromtimestamp(
time.time()).strftime('%Y-%m-%d %H:%M:%S'), m.source, m.type, exp))
def add_key(key, value):
from pymongo import MongoClient
collection = MongoClient()['SongsDistributor']['collection']
for i in ('checked', 'pending'):
collection.update_many({'status': i}, {'$set': {key: value}})
print('ok')
|
oyiadin/Songs-Distributor
|
utils.py
|
Python
|
mit
| 1,548 | 0.00646 |
# -----------------------------------------------------------------------------
#
# This file is the copyrighted property of Tableau Software and is protected
# by registered patents and other applicable U.S. and international laws and
# regulations.
#
# Unlicensed use of the contents of this file is prohibited. Please refer to
# the NOTICES.txt file for further details.
#
# -----------------------------------------------------------------------------
from ctypes import *
from . import Libs
class TableauException(Exception):
def __init__(self, errorCode, message):
Exception.__init__(self, message)
self.errorCode = errorCode
self.message = message
def __str__(self):
return 'TableauException ({0}): {1}'.format(self.errorCode, self.message)
def GetLastErrorMessage():
common_lib = Libs.LoadLibs().load_lib('Common')
common_lib.TabGetLastErrorMessage.argtypes = []
common_lib.TabGetLastErrorMessage.restype = c_wchar_p
return wstring_at(common_lib.TabGetLastErrorMessage())
|
corystreet/pyOdbcToTde
|
Static/TableauSDK-9100.15.0828.1711/tableausdk/Exceptions.py
|
Python
|
gpl-2.0
| 1,040 | 0.003846 |
# Copyright (C) 2011 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
from gi.repository import GConf
from gi.repository import Gst
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
DEFAULT_PITCH = 0
DEFAULT_RATE = 0
_speech_manager = None
class SpeechManager(GObject.GObject):
__gtype_name__ = 'SpeechManager'
__gsignals__ = {
'play': (GObject.SignalFlags.RUN_FIRST, None, []),
'pause': (GObject.SignalFlags.RUN_FIRST, None, []),
'stop': (GObject.SignalFlags.RUN_FIRST, None, [])
}
MIN_PITCH = -100
MAX_PITCH = 100
MIN_RATE = -100
MAX_RATE = 100
def __init__(self, **kwargs):
GObject.GObject.__init__(self, **kwargs)
self._player = _GstSpeechPlayer()
self._player.connect('play', self._update_state, 'play')
self._player.connect('stop', self._update_state, 'stop')
self._player.connect('pause', self._update_state, 'pause')
self._voice_name = self._player.get_default_voice()
self._pitch = DEFAULT_PITCH
self._rate = DEFAULT_RATE
self._is_playing = False
self._is_paused = False
self.restore()
def _update_state(self, player, signal):
self._is_playing = (signal == 'play')
self._is_paused = (signal == 'pause')
self.emit(signal)
def get_is_playing(self):
return self._is_playing
is_playing = GObject.property(type=bool, getter=get_is_playing,
setter=None, default=False)
def get_is_paused(self):
return self._is_paused
is_paused = GObject.property(type=bool, getter=get_is_paused,
setter=None, default=False)
def get_pitch(self):
return self._pitch
def get_rate(self):
return self._rate
def set_pitch(self, pitch):
self._pitch = pitch
self.save()
def set_rate(self, rate):
self._rate = rate
self.save()
def say_text(self, text):
if text:
self._player.speak(self._pitch, self._rate, self._voice_name, text)
def say_selected_text(self):
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
clipboard.request_text(self.__primary_selection_cb, None)
def pause(self):
self._player.pause_sound_device()
def restart(self):
self._player.restart_sound_device()
def stop(self):
self._player.stop_sound_device()
def __primary_selection_cb(self, clipboard, text, user_data):
self.say_text(text)
def save(self):
client = GConf.Client.get_default()
client.set_int('/desktop/sugar/speech/pitch', self._pitch)
client.set_int('/desktop/sugar/speech/rate', self._rate)
logging.debug('saving speech configuration pitch %s rate %s',
self._pitch, self._rate)
def restore(self):
client = GConf.Client.get_default()
self._pitch = client.get_int('/desktop/sugar/speech/pitch')
self._rate = client.get_int('/desktop/sugar/speech/rate')
logging.debug('loading speech configuration pitch %s rate %s',
self._pitch, self._rate)
class _GstSpeechPlayer(GObject.GObject):
__gsignals__ = {
'play': (GObject.SignalFlags.RUN_FIRST, None, []),
'pause': (GObject.SignalFlags.RUN_FIRST, None, []),
'stop': (GObject.SignalFlags.RUN_FIRST, None, [])
}
def __init__(self):
GObject.GObject.__init__(self)
self._pipeline = None
def restart_sound_device(self):
if self._pipeline is None:
logging.debug('Trying to restart not initialized sound device')
return
self._pipeline.set_state(Gst.State.PLAYING)
self.emit('play')
def pause_sound_device(self):
if self._pipeline is None:
return
self._pipeline.set_state(Gst.State.PAUSED)
self.emit('pause')
def stop_sound_device(self):
if self._pipeline is None:
return
self._pipeline.set_state(Gst.State.NULL)
self.emit('stop')
def make_pipeline(self, command):
if self._pipeline is not None:
self.stop_sound_device()
del self._pipeline
self._pipeline = Gst.parse_launch(command)
bus = self._pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.__pipe_message_cb)
def __pipe_message_cb(self, bus, message):
if message.type == Gst.MessageType.EOS:
self._pipeline.set_state(Gst.State.NULL)
self.emit('stop')
elif message.type == Gst.MessageType.ERROR:
self._pipeline.set_state(Gst.State.NULL)
self.emit('stop')
def speak(self, pitch, rate, voice_name, text):
# TODO workaround for http://bugs.sugarlabs.org/ticket/1801
if not [i for i in text if i.isalnum()]:
return
self.make_pipeline('espeak name=espeak ! autoaudiosink')
src = self._pipeline.get_by_name('espeak')
src.props.text = text
src.props.pitch = pitch
src.props.rate = rate
src.props.voice = voice_name
src.props.track = 2 # track for marks
self.restart_sound_device()
def get_all_voices(self):
all_voices = {}
for voice in Gst.ElementFactory.make('espeak', None).props.voices:
name, language, dialect = voice
if dialect != 'none':
all_voices[language + '_' + dialect] = name
else:
all_voices[language] = name
return all_voices
def get_default_voice(self):
"""Try to figure out the default voice, from the current locale ($LANG)
Fall back to espeak's voice called Default."""
voices = self.get_all_voices()
locale = os.environ.get('LANG', '')
language_location = locale.split('.', 1)[0].lower()
language = language_location.split('_')[0]
# if the language is es but not es_es default to es_la (latin voice)
if language == 'es' and language_location != 'es_es':
language_location = 'es_la'
best = voices.get(language_location) or voices.get(language) \
or 'default'
logging.debug('Best voice for LANG %s seems to be %s',
locale, best)
return best
def get_speech_manager():
global _speech_manager
if _speech_manager is None:
_speech_manager = SpeechManager()
return _speech_manager
|
gusDuarte/sugar
|
src/jarabe/model/speech.py
|
Python
|
gpl-2.0
| 7,214 | 0.000832 |
"""
PyLEMS API module.
:author: Gautham Ganapathy
:organization: LEMS (https://github.com/organizations/LEMS)
"""
from lems.model.fundamental import *
from lems.model.structure import *
from lems.model.dynamics import *
from lems.model.simulation import *
from lems.model.component import *
from lems.model.model import Model
|
LEMS/pylems
|
lems/api.py
|
Python
|
lgpl-3.0
| 328 | 0 |
import argparse, collections, configparser, json, math, mysql.connector as sql, praw, os, requests, sys, time
from datetime import datetime
from pprint import pprint
from mysql.connector import errorcode
from requests import HTTPError
from requests import ConnectionError
from fcntl import flock, LOCK_EX, LOCK_NB
# Print strings in verbose mode
def verbose(info) :
if args.verbose:
printUTF8(info)
def printUTF8(info) :
print(info.encode('ascii', 'replace').decode())
# Connect to MySQL using config entries
def connect() :
db_params = {
'user' : config["MySQL"]["user"],
'password' : config["MySQL"]["password"],
'host' : config["MySQL"]["host"],
'port' : int(config["MySQL"]["port"]),
'database' : config["MySQL"]['database'],
'charset' : 'utf8',
'collation' : 'utf8_general_ci',
'buffered' : True
}
return sql.connect(**db_params)
# Get all jobs from the database
def getJobs(conn) :
cursor = conn.cursor()
query = ("SELECT job_id, zombie_head, state, query, description, submission_cooldown_seconds \
FROM job \
WHERE job.state > 0 AND zombie_head = %s \
ORDER BY job_id")
cursor.execute(query,[args.head])
return cursor
# Perform search
def search(r, query) :
# Attempt to reach Reddit
attempt = 1
while attempt <= 3 :
try :
submissions = list(r.search(query, limit=None))
return submissions
except (ConnectionError, HTTPError) as err :
sleep_time = 2**(attempt - 1)
verbose("Connection attempt " + str(attempt) + " failed. "
"Sleeping for " + str(sleep_time) + " second(s).")
time.sleep(sleep_time)
attempt = attempt + 1
print("***** Error: Unable to query Reddit. Terminating.")
sys.exit(1)
# Replace 'MoreComments object'
def getComments(comment) :
attempt = 1
while attempt <= 3 :
try :
comments = comment.comments(update=False)
return comments
except (ConnectionError, HTTPError) as err :
sleep_time = 2**(attempt - 1)
verbose("Connection attempt " + str(attempt) + " failed. "
"Sleeping for " + str(sleep_time) + " second(s).")
time.sleep(sleep_time)
attempt = attempt + 1
except (AttributeError, TypeError) :
return None
return None
# Add a submission to the DB
def addSubmission(conn, job_id, submission) :
cursor = conn.cursor()
query = "REPLACE INTO submission (job_id, submission_id, subreddit_id, " \
"subreddit, title, author, url, permalink, thumbnail, name, selftext, " \
"over_18, is_self, created_utc, num_comments, ups, downs, score) VALUES " \
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
values = [
job_id,
submission.id,
submission.subreddit_id,
submission.subreddit.display_name,
submission.title,
submission.author.name,
submission.url,
submission.permalink,
submission.thumbnail,
submission.name,
submission.selftext,
submission.over_18,
submission.is_self,
datetime.fromtimestamp(submission.created_utc).strftime('%Y-%m-%d %H:%M:%S'),
submission.num_comments,
submission.ups,
submission.downs,
submission.score
]
try :
cursor.execute(query, values)
conn.commit()
return True
except sql.Error as err :
verbose("")
verbose(">>>> Warning: Could not add Submission: " + str(err))
verbose(" Query: " + cursor.statement)
return False
finally :
cursor.close()
# Add an entry to the submission score history
def addSubmissionScoreHistory(conn, job_id, submission) :
cursor = conn.cursor()
query = "INSERT INTO submission_score_history (job_id, submission_id, timestamp, ups, " \
"downs, score) VALUES (%s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE job_id=job_id"
values = [
job_id,
submission.id,
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
submission.ups,
submission.downs,
submission.score
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
verbose("")
verbose(">>>> Warning: Could not add Submission score history: " + str(err))
verbose(" Query: " + cursor.statement)
finally :
cursor.close()
# Get the submission's last run time
def getSubmissionRunTime(conn, job_id, submission_id) :
cursor = conn.cursor()
query = "SELECT last_run FROM submission WHERE job_id=%s AND submission_id=%s LIMIT 1"
values = [
job_id,
submission_id
]
try :
cursor.execute(query, values)
for(last_run) in cursor :
if (last_run[0] is not None) :
return last_run[0]
return -1
except sql.Error as err :
verbose(">>>> Warning: Could not get the submission last run time: " + str(err))
verbose(" Query: " + cursor.statement)
finally:
cursor.close()
# Update the submission's last run time
def updateSubmissionRunTime(conn, job_id, submission_id) :
cursor = conn.cursor()
query = "UPDATE submission SET last_run=%s WHERE job_id=%s AND submission_id=%s"
values = [
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
job_id,
submission_id
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
verbose(">>>> Warning: Could not update submission run time: " + str(err))
verbose(" Query: " + cursor.statement)
finally:
cursor.close()
# Add a comment to the DB
def addComment(conn, job_id, submission_id, comment) :
cursor = conn.cursor()
query = "REPLACE INTO comment (job_id, submission_id, comment_id, " \
"parent_id, author, body, created_utc, ups, downs) VALUES " \
"(%s, %s, %s, %s, %s, %s, %s, %s, %s) "
values = [
job_id,
submission_id,
comment.id,
comment.parent_id,
None if comment.author is None else comment.author.name,
comment.body,
datetime.fromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S'),
comment.ups,
comment.downs
]
try :
cursor.execute(query, values)
conn.commit()
return True
except sql.Error as err :
verbose("")
verbose(">>>> Warning: Could not add Comment: " + str(err))
verbose(" Query: " + cursor.statement)
return False
finally :
cursor.close()
# Add an entry to the comment score history
def addCommentScoreHistory(conn, job_id, comment) :
cursor = conn.cursor()
query = "INSERT INTO comment_score_history (job_id, comment_id, timestamp, ups, " \
"downs) VALUES (%s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE job_id=job_id"
values = [
job_id,
comment.id,
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
comment.ups,
comment.downs
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
verbose("")
verbose(">>>> Warning: Could not add Submission score history: " + str(err))
verbose(" Query: " + cursor.statement)
finally :
cursor.close()
# Add an entry into the job history table
def addJobHistory(conn, job_id, success, total_results = 0) :
return
cursor = conn.cursor()
query = "INSERT INTO job_history (job_id, timestamp, status, total_results) " \
"VALUES(%s, %s, %s, %s, %s)"
values = [
job_id,
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
"success" if success else "failure",
total_results
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
verbose(">>>> Warning: Could not add job_history entry: " + str(err))
verbose(" Query: " + cursor.statement)
finally:
cursor.close()
# Update the stored job's last run time and total results
def updateJobStats(conn, job_id, total_results) :
cursor = conn.cursor()
query = "UPDATE job SET last_count=%s, last_run=%s WHERE job_id=%s"
values = [
total_results,
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
job_id
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
verbose(">>>> Warning: Could not update job: " + str(err))
verbose(" Query: " + cursor.statement)
finally:
cursor.close()
# Recursively parse all of the comments
def parseCommentTree(conn, job_id, submission_id, comment) :
global submission_count, submission_total, comment_count, comment_total
queue = collections.deque()
queue.append(comment)
while len(queue) > 0:
next = queue.popleft();
if isinstance(next, praw.objects.MoreComments) :
more_comments = getComments(next)
if more_comments is not None :
queue.extendleft(more_comments)
else :
success = addComment(conn, job_id, submission_id, next)
if success :
comment_count = comment_count + 1
# Show status logging
if args.verbose :
sys.stdout.write("\rProgress: Submission: {}/{}, Comment: {}/{}".format(submission_count, submission_total, comment_count, comment_total))
addCommentScoreHistory(conn, job_id, next)
queue.extend(next.replies)
# Main function
if __name__ == '__main__' :
# Handle command line arguments
parser = argparse.ArgumentParser(description="A Reddit variation of TwitterGoggles")
parser.add_argument('head', type=int, help="Specify the head #")
parser.add_argument('-v','--verbose', default=True, action="store_true", help="Show additional logs")
parser.add_argument('-d','--delay', type=int, default=0, help="Delay execution by DELAY seconds")
args = parser.parse_args()
# Handle config settings
config = configparser.ConfigParser()
script_dir = os.path.dirname(__file__)
config_file = os.path.join(script_dir, 'config/settings.cfg')
config.read(config_file)
# Handle file locking
lock = open(config["Misc"]["lockfile"], 'a')
try :
flock(lock, LOCK_EX | LOCK_NB)
except IOError :
print("Unable to lock file", config["Misc"]["lockfile"] + ".","Terminating.")
sys.exit(1)
# Display startup info
print("vvvvv Start:", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
verbose("Verbose Mode: Enabled")
print("Head:", args.head)
print("Delay:", args.delay)
if (args.delay > 0) :
time.sleep(args.delay)
print("Connecting to database...")
try :
run_total_count = 0
conn = connect()
print("Connected")
# Get all of the jobs for this head
jobs = getJobs(conn)
if not jobs.rowcount :
print("\nUnable to find any jobs to run. Please make sure there are entries in the 'job'"
+ " table, that their 'zombie_head' value matches {}, and the 'state' value is greater"
+ " than 0.\n".format(args.head))
# Initialize the Reddit wrapper
r = praw.Reddit(user_agent = config["Reddit"]["user-agent"])
# Iterate over all of the jobs found
for (job_id, zombie_head, state, query, description, submission_cooldown_seconds) in jobs :
printUTF8("+++++ Job ID:" + str(job_id) + "\tQuery:" + query + "\tDescription:" + description)
submissions = search(r, query)
submission_count = 0
submission_total = len(submissions)
for submission in submissions :
last_run = getSubmissionRunTime(conn, job_id, submission.id)
if (last_run != -1 and (datetime.now() - last_run).total_seconds() < submission_cooldown_seconds) :
print("Skipping submission id", submission.id, "because it has been parsed in the past", submission_cooldown_seconds, "second(s).")
submission_count = submission_count + 1
continue
comment_count = 0
# Insert the submission in the DB
success = addSubmission(conn, job_id, submission)
submission_count = submission_count + 1
comment_total = submission.num_comments
if success :
addSubmissionScoreHistory(conn, job_id, submission)
for comment in submission.comments :
parseCommentTree(conn, job_id, submission.id, comment)
updateSubmissionRunTime(conn, job_id, submission.id)
addJobHistory(conn, job_id, True, submission_total)
updateJobStats(conn, job_id, submission_total)
verbose("")
print("Total Results:", submission_total)
run_total_count = run_total_count + submission_total
except sql.Error as err :
print(err)
print("Terminating.")
sys.exit(1)
else :
conn.close()
finally :
print("$$$$$ Run total count: " + str(run_total_count))
print("^^^^^ Stop:", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
|
pmaconi/RedditGoggles
|
reddit-goggles.py
|
Python
|
mit
| 11,792 | 0.037907 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'', include('social.apps.django_app.urls', namespace='social')),
# API
url(r'^usernames/(?P<username>\w+)/$','api.views.username_availability',name='username_availability'),
url(r'^users/social-data/(?P<backend>\w+)/$','api.views.fetch_social_data',name='fetch_social_data'),
url(r'^users/refresh-social-data/(?P<backend>\w+)/$','api.views.refresh_social_data',name='refresh_social_data'),
url(r'^users/save-data/(?P<resumeId>\d+)/$','api.views.save_data'),
url(r'^users/get-data/(?P<resumeId>\d+)/$','api.views.get_resume_data'),
url(r'^user/get-all-cv/$','dashboard.views.get_all_resumes'),
url(r'^user/dashboard/$','dashboard.views.show_dashboard'),
url(r'^user/create-new-cv/$','dashboard.views.create_new_resume'),
# test api
url(r'^fb-graph-test/$','api.views.fb_graph_test'),
url(r'^github-api-test/$','api.views.github_api_test'),
url(r'^linkedin-api-test/$','api.views.linkedin_api_test'),
# forget password implementation
url(r'^forgot-password/$','api.views.password_reset_middleware', name='forgot_password'),
url(r'^users/password/reset/$', 'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/users/password/reset/done/'}),
url(r'^users/password/reset/done/$', 'django.contrib.auth.views.password_reset_done'),
url(r'^users/password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/users/password/done/'}),
url(r'^users/password/done/$', 'django.contrib.auth.views.password_reset_complete'),
url(r'^$', 'api.views.home',name='home'),
url(r'^signup/(?P<backend>[^/]+)/$', 'api.views.signup', name='signup'),
url(r'^signup/$' , RedirectView.as_view(url='/signup/username/')),
url(r'^email-sent/', 'api.views.validation_sent'),
url(r'^resumizr-login/(?P<backend>[^/]+)/$', 'api.views.username_login', name='username_login'),
url(r'^login/$','api.views.login', name='login'),
url(r'^logout/$','api.views.logout', name='logout'),
url(r'^app/$','api.views.app',name='app'),
url(r'^admin/', include(admin.site.urls)),
url(r'^generate/cvform/(?P<resumeNum>\d+)/$','api.views.generateForm', name='generateform'),
url(r'^write/cv_to_pdf/$','pdfconvertor.views.writepdf', name='writepdf'),
url(r'^preview/cv/$','api.views.previewCv', name='preview'),
url(r'^landing_page/','api.views.landing_page', name='landing_page'),
)
#development media server
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}), )
|
psych0der/resumizr
|
resumizr/urls.py
|
Python
|
mit
| 3,010 | 0.01495 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import six
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
class Slider( GafferUI.Widget ) :
ValueChangedReason = IECore.Enum.create( "Invalid", "SetValues", "Click", "IndexAdded", "IndexRemoved", "DragBegin", "DragMove", "DragEnd", "Increment" )
# The min and max arguments define the numeric values at the ends of the slider.
# By default, values outside this range will be clamped, but hardMin and hardMax
# may be specified to move the point at which the clamping happens outside of the
# slider itself.
#
# A single slider may show more than one value. Multiple values may be specified
# by passing a list to the `values` argument, or calling `setValues()` after
# construction.
def __init__( self, values=0.5, min=0, max=1, hardMin=None, hardMax=None, **kw ) :
if "value" in kw :
# Backwards compatibility with old `value` argument
assert( values == 0.5 )
values = kw["value"]
del kw["value"]
GafferUI.Widget.__init__( self, _Widget(), **kw )
self.__min = min
self.__max = max
self.__hardMin = hardMin if hardMin is not None else self.__min
self.__hardMax = hardMax if hardMax is not None else self.__max
self.__selectedIndex = None
self.__sizeEditable = False
self.__minimumSize = 1
self.__increment = None
self.__snapIncrement = None
self.__hoverPositionVisible = False
self.__hoverEvent = None # The mouseMove event that gives us hover status
self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
self.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ), scoped = False )
self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
self.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ), scoped = False )
self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ), scoped = False )
self.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ), scoped = False )
self.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ), scoped = False )
self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
self.__values = []
if isinstance( values, ( six.integer_types, float ) ) :
self.__setValuesInternal( [ values ], self.ValueChangedReason.SetValues )
else :
self.__setValuesInternal( values, self.ValueChangedReason.SetValues )
## Convenience function to call setValues( [ value ] )
def setValue( self, value ) :
self.setValues( [ value ] )
## Convenience function returning getValues()[0] if there
# is only one value, and raising ValueError if not.
def getValue( self ) :
if len( self.__values ) != 1 :
raise ValueError
return self.__values[0]
def setValues( self, values ) :
self.__setValuesInternal( values, self.ValueChangedReason.SetValues )
def getValues( self ) :
return self.__values
## A signal emitted whenever a value has been changed. Slots should
# have the signature slot( Slider, ValueChangedReason ).
def valueChangedSignal( self ) :
try :
return self.__valueChangedSignal
except :
self.__valueChangedSignal = Gaffer.Signals.Signal2()
return self.__valueChangedSignal
## Returns True if a user would expect the specified sequence
# of changes to be merged into one undoable event.
@classmethod
def changesShouldBeMerged( cls, firstReason, secondReason ) :
if type( firstReason ) != type( secondReason ) :
return False
return ( firstReason, secondReason ) in (
# click and drag
( cls.ValueChangedReason.Click, cls.ValueChangedReason.DragBegin ),
( cls.ValueChangedReason.DragBegin, cls.ValueChangedReason.DragMove ),
( cls.ValueChangedReason.DragMove, cls.ValueChangedReason.DragMove ),
( cls.ValueChangedReason.DragMove, cls.ValueChangedReason.DragEnd ),
# increment
( cls.ValueChangedReason.Increment, cls.ValueChangedReason.Increment ),
)
def setRange( self, min, max, hardMin=None, hardMax=None ) :
if hardMin is None :
hardMin = min
if hardMax is None :
hardMax = max
if min==self.__min and max==self.__max and hardMin==self.__hardMin and hardMax==self.__hardMax :
return
self.__min = min
self.__max = max
self.__hardMin = hardMin
self.__hardMax = hardMax
self.__setValuesInternal( self.__values, self.ValueChangedReason.Invalid ) # reclamps the values to the range if necessary
self._qtWidget().update()
def getRange( self ) :
return self.__min, self.__max, self.__hardMin, self.__hardMax
def indexRemovedSignal( self ) :
signal = getattr( self, "_indexRemovedSignal", None )
if signal is None :
signal = GafferUI.WidgetEventSignal()
self._indexRemovedSignal = signal
return signal
def setSelectedIndex( self, index ) :
if self.__selectedIndex == index :
return
if index is not None :
if not len( self.__values ) or index < 0 or index >= len( self.__values ) :
raise IndexError
self.__selectedIndex = index
self._qtWidget().update()
signal = getattr( self, "_selectedIndexChangedSignal", None )
if signal is not None :
signal( self )
## May return None to indicate that no index is selected.
def getSelectedIndex( self ) :
return self.__selectedIndex
def selectedIndexChangedSignal( self ) :
signal = getattr( self, "_selectedIndexChangedSignal", None )
if signal is None :
signal = GafferUI.WidgetSignal()
self._selectedIndexChangedSignal = signal
return signal
## Determines whether or not values may be added/removed
def setSizeEditable( self, editable ) :
self.__sizeEditable = editable
def getSizeEditable( self ) :
return self.__sizeEditable
## Sets a size after which no more values can
# be removed.
def setMinimumSize( self, minimumSize ) :
self.__minimumSize = minimumSize
def getMinimumSize( self ) :
return self.__minimumSize
## Sets the value increment added/subtracted
# when using the cursor keys. The default value of None
# uses an increment equivalent to the size of one pixel at
# the current slider size. An increment of 0 can be specified
# to disable the behaviour entirely.
def setIncrement( self, increment ) :
self.__increment = increment
def getIncrement( self ) :
return self.__increment
## Sets the increment used for snapping values generated
# by interactions such as drags and button presses. Snapping
# can be ignored by by holding the `Ctrl` modifier.
def setSnapIncrement( self, increment ) :
self.__snapIncrement = increment
def getSnapIncrement( self ) :
return self.__snapIncrement
def setHoverPositionVisible( self, visible ) :
self.__hoverPositionVisible = visible
def getHoverPositionVisible( self ) :
return self.__hoverPositionVisible
## May be overridden by derived classes to customise
# the drawing of the background.
def _drawBackground( self, painter ) :
size = self.size()
valueRange = self.__max - self.__min
if valueRange == 0 :
return
idealSpacing = 10
idealNumTicks = float( size.x ) / idealSpacing
tickStep = valueRange / idealNumTicks
logTickStep = math.log10( tickStep )
flooredLogTickStep = math.floor( logTickStep )
tickStep = math.pow( 10, flooredLogTickStep )
blend = (logTickStep - flooredLogTickStep)
tickValue = math.floor( self.__min / tickStep ) * tickStep
i = 0
while tickValue <= self.__max :
x = size.x * ( tickValue - self.__min ) / valueRange
if i % 100 == 0 :
height0 = height1 = 0.75
alpha0 = alpha1 = 1
elif i % 50 == 0 :
height0 = 0.75
height1 = 0.5
alpha0 = alpha1 = 1
elif i % 10 == 0 :
height0 = 0.75
height1 = 0.25
alpha0 = alpha1 = 1
elif i % 5 == 0 :
height0 = 0.5
height1 = 0
alpha0 = 1
alpha1 = 0
else :
height0 = 0.25
height1 = 0
alpha0 = 1
alpha1 = 0
alpha = alpha0 + (alpha1 - alpha0) * blend
height = height0 + (height1 - height0) * blend
pen = QtGui.QPen()
pen.setWidth( 0 )
pen.setColor( QtGui.QColor( 0, 0, 0, alpha * 255 ) )
painter.setPen( pen )
painter.drawLine( x, size.y, x, size.y * ( 1 - height ) )
tickValue += tickStep
i += 1
## May be overridden by derived classes to customise the
# drawing of the value indicator.
#
# `value` : The value itself.
# `position` : The widget-relative position where the
# indicator should be drawn.
# `state` : A GafferUI.Style.State. DisabledState is used
# to draw hover indicators, since there is
# currently no dedicated state for this purpose.
def _drawValue( self, painter, value, position, state ) :
size = self.size()
pen = QtGui.QPen( QtGui.QColor( 0, 0, 0, 255 ) )
pen.setWidth( 1 )
painter.setPen( pen )
if state == state.NormalState :
color = QtGui.QColor( 128, 128, 128, 255 )
else :
color = QtGui.QColor( 119, 156, 255, 255 )
painter.setBrush( QtGui.QBrush( color ) )
if state == state.DisabledState :
painter.setOpacity( 0.5 )
if position < 0 :
painter.drawPolygon(
QtGui.QPolygonF(
[
QtCore.QPointF( 8, 4 ),
QtCore.QPointF( 8, size.y - 4 ),
QtCore.QPointF( 2, size.y / 2 ),
]
)
)
elif position > size.x :
painter.drawPolygon(
QtGui.QPolygonF(
[
QtCore.QPointF( size.x - 8, 4 ),
QtCore.QPointF( size.x - 8, size.y - 4 ),
QtCore.QPointF( size.x - 2, size.y / 2 ),
]
)
)
else :
painter.drawEllipse( QtCore.QPoint( position, size.y / 2 ), size.y / 4, size.y / 4 )
def __indexUnderMouse( self ) :
size = self.size()
mousePosition = GafferUI.Widget.mousePosition( relativeTo = self ).x
result = None
for i, v in enumerate( self.__values ) :
# clamp value inside range so we can select
# handles representing points outside the widget.
v = max( min( v, self.__max ), self.__min )
dist = math.fabs( mousePosition - self.__valueToPosition( v ) )
if result is None or dist < minDist :
result = i
minDist = dist
if not self.getSizeEditable() :
# when the size isn't editable, we consider the closest
# position to be under the mouse, this makes it easy
# to just click anywhere to move the closest point.
return result
else :
# but when the size is editable, we consider points to
# be under the mouse when they genuinely are beneath it,
# so that clicks elsewhere can add points.
if minDist < size.y / 2.0 :
return result
else :
return None
def __leave( self, widget ) :
self.__hoverEvent = None
self._qtWidget().update()
def __mouseMove( self, widget, event ) :
if not event.buttons :
self.__hoverEvent = event
self._qtWidget().update()
def __buttonPress( self, widget, event ) :
if event.buttons != GafferUI.ButtonEvent.Buttons.Left :
return
index = self.__indexUnderMouse()
if index is not None :
self.setSelectedIndex( index )
if len( self.getValues() ) == 1 :
self.__setValueInternal( index, self.__eventValue( event ), self.ValueChangedReason.Click )
elif self.getSizeEditable() :
values = self.getValues()[:]
values.append( self.__eventValue( event ) )
self.__setValuesInternal( values, self.ValueChangedReason.IndexAdded )
self.setSelectedIndex( len( self.getValues() ) - 1 )
# Clear hover so we don't draw hover state on top
# of a just-clicked value or during drags.
self.__hoverEvent = None
self._qtWidget().update()
return True
def __dragBegin( self, widget, event ) :
if event.buttons == GafferUI.ButtonEvent.Buttons.Left and self.getSelectedIndex() is not None :
return IECore.NullObject.defaultNullObject()
return None
def __dragEnter( self, widget, event ) :
if event.sourceWidget is self :
return True
return False
def __dragMove( self, widget, event ) :
self.__setValueInternal(
self.getSelectedIndex(),
self.__eventValue( event ),
self.ValueChangedReason.DragMove
)
def __dragEnd( self, widget, event ) :
self.__dragMove( widget, event )
def __keyPress( self, widget, event ) :
if self.getSelectedIndex() is None :
return False
if event.key in ( "Left", "Right", "Up", "Down" ) :
if self.__increment == 0 :
return False
if self.__increment is None :
increment = ( self.__max - self.__min ) / float( self.size().x )
else :
increment = self.__increment
x = self.getValues()[self.getSelectedIndex()]
x += increment if event.key in ( "Right", "Up" ) else -increment
if not (event.modifiers & event.modifiers.Shift ) :
x = max( self.__min, min( self.__max, x ) )
self.__setValueInternal(
self.getSelectedIndex(), x,
self.ValueChangedReason.Increment,
)
return True
elif event.key in ( "Backspace", "Delete" ) :
index = self.getSelectedIndex()
if index is not None and self.getSizeEditable() and len( self.getValues() ) > self.getMinimumSize() :
del self.__values[index]
signal = getattr( self, "_indexRemovedSignal", None )
if signal is not None :
signal( self, index )
self.__emitValueChanged( self.ValueChangedReason.IndexRemoved )
self._qtWidget().update()
return True
return False
def __setValueInternal( self, index, value, reason ) :
values = self.getValues()[:]
values[index] = value
self.__setValuesInternal( values, reason )
def __setValuesInternal( self, values, reason ) :
# We _always_ clamp to the hard min and max, as those are not optional.
# Optional clamping to soft min and max is performed before calling this
# function, typically in `__eventValue()`.
values = [ max( self.__hardMin, min( self.__hardMax, x ) ) for x in values ]
dragBeginOrEnd = reason in ( self.ValueChangedReason.DragBegin, self.ValueChangedReason.DragEnd )
if values == self.__values and not dragBeginOrEnd :
# early out if the values haven't changed, but not if the
# reason is either end of a drag - we always signal those so
# that they will always come in matching pairs.
return
self.__values = values
self._qtWidget().update()
self.__emitValueChanged( reason )
def __emitValueChanged( self, reason ) :
try :
signal = self.__valueChangedSignal
except :
return
signal( self, reason )
def __eventValue( self, event ) :
f = event.line.p0.x / float( self.size().x )
value = self.__min + ( self.__max - self.__min ) * f
if not (event.modifiers & event.modifiers.Shift) :
# Clamp
value = max( self.__min, min( self.__max, value ) )
if self.__snapIncrement and not (event.modifiers & GafferUI.ModifiableEvent.Modifiers.Control) :
# Snap
value = self.__snapIncrement * round( value / self.__snapIncrement )
return value
def __valueToPosition( self, value ) :
r = self.__max - self.__min
f = ( ( value - self.__min ) / r ) if r != 0 else 0
return f * self.size().x
def __draw( self, painter ) :
self._drawBackground( painter )
indexUnderMouse = self.__indexUnderMouse()
for index, value in enumerate( self.getValues() ) :
self._drawValue(
painter,
value,
self.__valueToPosition( value ),
GafferUI.Style.State.HighlightedState if index == indexUnderMouse or index == self.getSelectedIndex()
else GafferUI.Style.State.NormalState
)
if self.__hoverEvent is not None :
if (
self.getHoverPositionVisible() or
( self.getSizeEditable() and indexUnderMouse is None )
) :
self._drawValue(
painter,
self.__eventValue( self.__hoverEvent ),
self.__valueToPosition( self.__eventValue( self.__hoverEvent ) ),
state = GafferUI.Style.State.DisabledState
)
class _Widget( QtWidgets.QWidget ) :
def __init__( self, parent=None ) :
QtWidgets.QWidget.__init__( self, parent )
self.setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed ) )
self.setFocusPolicy( QtCore.Qt.ClickFocus )
def sizeHint( self ) :
return QtCore.QSize( 200, 18 )
def paintEvent( self, event ) :
owner = GafferUI.Widget._owner( self )
painter = QtGui.QPainter( self )
painter.setRenderHint( QtGui.QPainter.Antialiasing )
owner._Slider__draw( painter )
def event( self, event ) :
if event.type() == event.ShortcutOverride :
if event.key() in ( QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace ) :
event.accept()
return True
if event.key() in ( QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_Left, QtCore.Qt.Key_Right ) :
if GafferUI.Widget._owner( self ).getIncrement() != 0 :
event.accept()
return True
return QtWidgets.QWidget.event( self, event )
|
hradec/gaffer
|
python/GafferUI/Slider.py
|
Python
|
bsd-3-clause
| 18,399 | 0.051796 |
from unittest import TestCase
from paramiko import SSHException
from pyinfra.api import Config, State
from pyinfra.api.connect import connect_all
from pyinfra.api.exceptions import NoGroupError, NoHostError, PyinfraError
from ..paramiko_util import PatchSSHTestCase
from ..util import make_inventory
class TestInventoryApi(TestCase):
def test_inventory_creation(self):
inventory = make_inventory()
# Check length
assert len(inventory.hosts) == 2
# Get a host
host = inventory.get_host('somehost')
assert host.data.ssh_user == 'vagrant'
# Check our group data
assert inventory.get_group_data('test_group') == {
'group_data': 'hello world',
}
def test_tuple_host_group_inventory_creation(self):
inventory = make_inventory(
hosts=[
('somehost', {'some_data': 'hello'}),
],
tuple_group=([
('somehost', {'another_data': 'world'}),
], {
'tuple_group_data': 'word',
}),
)
# Check host data
host = inventory.get_host('somehost')
assert host.data.some_data == 'hello'
assert host.data.another_data == 'world'
# Check group data
assert host.data.tuple_group_data == 'word'
def test_host_and_group_errors(self):
inventory = make_inventory()
with self.assertRaises(NoHostError):
inventory.get_host('i-dont-exist')
with self.assertRaises(NoGroupError):
inventory.get_group('i-dont-exist')
class TestStateApi(PatchSSHTestCase):
def test_fail_percent(self):
inventory = make_inventory((
'somehost',
('thinghost', {'ssh_hostname': SSHException}),
'anotherhost',
))
state = State(inventory, Config(FAIL_PERCENT=1))
# Ensure we would fail at this point
with self.assertRaises(PyinfraError) as context:
connect_all(state)
assert context.exception.args[0] == 'Over 1% of hosts failed (33%)'
# Ensure the other two did connect
assert len(state.active_hosts) == 2
|
Fizzadar/pyinfra
|
tests/test_api/test_api.py
|
Python
|
mit
| 2,193 | 0 |
"""
Copyright 2016 Rasmus Larsen
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE.txt file for details.
"""
import sys
import time
from sacred import Experiment
from core.ALEEmulator import ALEEmulator
from dqn.Agent import Agent
from dqn.DoubleDQN import DoubleDQN
ex = Experiment('double-dqn')
@ex.config
def net_config():
conv_layers = 3
conv_units = [32, 64, 64]
filter_sizes = [8, 4, 3]
strides = [4, 2, 1]
state_frames = 4
fc_layers = 1
fc_units = [512]
in_width = 84
in_height = 84
discount = 0.99
device = '/gpu:0'
lr = 0.00025
opt_decay = 0.95
momentum = 0.0
opt_eps = 0.01
target_sync = 1e4
clip_delta = 1.0
tensorboard = False
tensorboard_freq = 50
@ex.config
def emu_config():
rom_path = '../ale-git/roms/'
rom_name = 'breakout'
display_screen = True
frame_skip = 4
repeat_prob = 0.0
color_avg = True
random_seed = 42
random_start = 30
@ex.config
def agent_config():
hist_size = 1e5
eps = 1.0
eps_min = 0.1
eps_decay = (eps - eps_min) / 1e6
batch_size = 32
train_start = 5e3
train_frames = 5e6
test_freq = 5e4
test_frames = 5e3
update_freq = 4
@ex.command
def test(_config):
emu = ALEEmulator(_config)
_config['num_actions'] = emu.num_actions
net = DoubleDQN(_config)
net.load(_config['rom_name'])
agent = Agent(emu, net, _config)
agent.next(0) # put a frame into the replay memory, TODO: should not be necessary
agent.test()
@ex.automain
def main(_config, _log):
sys.stdout = open('log_' + _config['rom_name'] + time.strftime('%H%M%d%m', time.gmtime()), 'w', buffering=True)
print "#{}".format(_config)
emu = ALEEmulator(_config)
_config['num_actions'] = emu.num_actions
net = DoubleDQN(_config)
agent = Agent(emu, net, _config)
agent.train()
|
rlrs/deep-rl
|
run_double.py
|
Python
|
mit
| 1,928 | 0.001556 |
import gtk
from plugin_base.find_extension import FindExtension
class SizeFindFiles(FindExtension):
"""Size extension for find files tool"""
def __init__(self, parent):
FindExtension.__init__(self, parent)
# create container
table = gtk.Table(2, 4, False)
table.set_border_width(5)
table.set_col_spacings(5)
# create interface
self._adjustment_max = gtk.Adjustment(value=50.0, lower=0.0, upper=100000.0, step_incr=0.1, page_incr=10.0)
self._adjustment_min = gtk.Adjustment(value=0.0, lower=0.0, upper=10.0, step_incr=0.1, page_incr=10.0)
label = gtk.Label('<b>{0}</b>'.format(_('Match file size')))
label.set_alignment(0.0, 0.5)
label.set_use_markup(True)
label_min = gtk.Label(_('Minimum:'))
label_min.set_alignment(0, 0.5)
label_min_unit = gtk.Label(_('MB'))
label_max = gtk.Label(_('Maximum:'))
label_max.set_alignment(0, 0.5)
label_max_unit = gtk.Label(_('MB'))
self._entry_max = gtk.SpinButton(adjustment=self._adjustment_max, digits=2)
self._entry_min = gtk.SpinButton(adjustment=self._adjustment_min, digits=2)
self._entry_max.connect('value-changed', self._max_value_changed)
self._entry_min.connect('value-changed', self._min_value_changed)
self._entry_max.connect('activate', self._parent.find_files)
self._entry_min.connect('activate', lambda entry: self._entry_max.grab_focus())
# pack interface
table.attach(label, 0, 3, 0, 1, xoptions=gtk.FILL)
table.attach(label_min, 0, 1, 1, 2, xoptions=gtk.FILL)
table.attach(self._entry_min, 1, 2, 1, 2, xoptions=gtk.FILL)
table.attach(label_min_unit, 2, 3, 1, 2, xoptions=gtk.FILL)
table.attach(label_max, 0, 1, 2, 3, xoptions=gtk.FILL)
table.attach(self._entry_max, 1, 2, 2, 3, xoptions=gtk.FILL)
table.attach(label_max_unit, 2, 3, 2, 3, xoptions=gtk.FILL)
self.vbox.pack_start(table, False, False, 0)
def _max_value_changed(self, entry):
"""Assign value to adjustment handler"""
self._adjustment_min.set_upper(entry.get_value())
def _min_value_changed(self, entry):
"""Assign value to adjustment handler"""
self._adjustment_max.set_lower(entry.get_value())
def get_title(self):
"""Return i18n title for extension"""
return _('Size')
def is_path_ok(self, path):
"""Check is specified path fits the cirteria"""
size = self._parent._provider.get_stat(path).size
size_max = self._entry_max.get_value() * 1048576
size_min = self._entry_min.get_value() * 1048576
return size_min < size < size_max
|
Hammer2900/SunflowerX
|
application/plugins/find_file_extensions/size.py
|
Python
|
gpl-3.0
| 2,732 | 0.00183 |
import yaml
from os import makedirs
from os.path import join,dirname,realpath,isdir
script_dir = dirname(realpath(__file__))
default_yml_filepath = join(script_dir,'defaults.yml')
defaults = {
"output_dir": 'output',
"header_img_dir": 'imgs/headers/',
"scaled_img_dir": 'imgs/scaled/',
"original_img_dir": 'imgs/original/',
"header_img_url": 'imgs/headers/',
"scaled_img_url": 'imgs/scaled/',
"original_img_url": 'imgs/original/',
"template_dir": join(script_dir,'templates'),
"max_article_img_width": 710,
"max_avatar_width": 710,
"database_file": "database.yml",
"static_dir": join(script_dir,'static'),
"copyright_msg": None,
"extra_links": [],
"import_to_discourse": False,
"strapline": None,
}
config = dict()
def getConfig():
if not config:
raise RuntimeError('config not loaded yet')
return config
def loadConfig(yml_filepath):
config.update(defaults)
with open(yml_filepath) as f:
patch = yaml.load(f.read())
config.update(patch)
# make paths absolute
config['header_img_dir'] = join(config['output_dir'],config['header_img_dir'])
config['scaled_img_dir'] = join(config['output_dir'],config['scaled_img_dir'])
config['original_img_dir'] = join(config['output_dir'],config['original_img_dir'])
config['database_file'] = join(config['output_dir'],config['database_file'])
def makeDirs():
if not config:
raise RuntimeError('config not loaded yet')
for key in ['header_img_dir','scaled_img_dir','original_img_dir']:
path = config[key]
if not isdir(path):
makedirs(path)
|
naggie/dsblog
|
dsblog/environment.py
|
Python
|
mit
| 1,659 | 0.010247 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
from datetime import *
class clv_medicament_template_history(models.Model):
_name = 'clv_medicament.template.history'
medicament_template_id = fields.Many2one('clv_medicament.template', 'Medicament Template', required=True)
user_id = fields.Many2one ('res.users', 'User', required=True)
date = fields.Datetime("Date", required=True)
state = fields.Selection([('draft','Draft'),
('revised','Revised'),
('waiting','Waiting'),
('done','Done'),
('canceled','Canceled'),
], string='Status', default='draft', readonly=True, required=True, help="")
notes = fields.Text(string='Notes')
_order = "date desc"
_defaults = {
'user_id': lambda obj,cr,uid,context: uid,
'date': lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
}
class clv_medicament_template(models.Model):
_inherit = 'clv_medicament.template'
history_ids = fields.One2many('clv_medicament.template.history', 'medicament_template_id', 'Medicament Template History', readonly=True)
active_history = fields.Boolean('Active History',
help="If unchecked, it will allow you to disable the history without removing it.",
default=True)
@api.one
def insert_clv_medicament_template_history(self, medicament_template_id, state, notes):
if self.active_history:
values = {
'medicament_template_id': medicament_template_id,
'state': state,
'notes': notes,
}
self.pool.get('clv_medicament.template.history').create(self._cr, self._uid, values)
@api.multi
def write(self, values):
if (not 'state' in values) and (not 'date' in values):
notes = values.keys()
self.insert_clv_medicament_template_history(self.id, self.state, notes)
return super(clv_medicament_template, self).write(values)
@api.one
def button_draft(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'draft'
self.insert_clv_medicament_template_history(self.id, 'draft', '')
@api.one
def button_revised(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'revised'
self.insert_clv_medicament_template_history(self.id, 'revised', '')
@api.one
def button_waiting(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'waiting'
self.insert_clv_medicament_template_history(self.id, 'waiting', '')
@api.one
def button_done(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'done'
self.insert_clv_medicament_template_history(self.id, 'done', '')
@api.one
def button_cancel(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'canceled'
self.insert_clv_medicament_template_history(self.id, 'canceled', '')
@api.one
def set_to_draft(self, *args):
self.state = 'draft'
self.create_workflow()
return True
|
CLVsol/odoo_addons
|
clv_medicament_template/history/clv_medicament_template_history.py
|
Python
|
agpl-3.0
| 4,727 | 0.008462 |
import mytest
print '----This is func1----'
mytest.world.func1()
print '----This is func2----'
mytest.simple.func2()
print '----This is func3----'
mytest.whatever.func3()
print '----This is myobj using MyClass----'
myobj = mytest.MyClass('nick', 'florida')
myobj.hello()
|
bonno800/pynet
|
week9exercise9-a-b.py
|
Python
|
apache-2.0
| 275 | 0 |
# Copyright 2014 Cirruspath, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: James Horey
# Email: jhorey@cirruspath.com
#
from flask import Flask, request, redirect, url_for
import json
import os
from pom.triggers.poster import Poster
from pom.triggers.github import GitHub
from pom.triggers.salesforce import Salesforce
from pom.clients.oauth2 import OAuth2
import requests
from requests.exceptions import ConnectionError
import sys
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import urllib
from uuid import uuid4
import yaml
app = Flask(__name__)
#
# Read in all the known oauth providers.
#
CONFIG_DIR = os.path.dirname(os.path.dirname(__file__)) + "/config"
providers = {}
for f in os.listdir(CONFIG_DIR + "/providers"):
n, e = os.path.splitext(f)
print "Source: " + n
providers[n] = OAuth2(n, CONFIG_DIR + "/providers/" + f, os.environ['POM_APPS'])
#
# Instantiate all the triggers.
#
triggers = []
yaml_file = open(CONFIG_DIR + "/pom.yaml", 'r')
config = yaml.load(yaml_file)
redirect_uri = config["callback"]
if 'triggers' in config:
trigger_list = config["triggers"].split(",")
for t in trigger_list:
if t == "github":
triggers.append(GitHub())
elif t == "salesforce":
triggers.append(Salesforce())
elif t == "poster":
triggers.append(Poster())
else:
trigger_list = []
#
# Responses store the oauth state machine.
#
responses = {}
#
# The default page redirects the user to the source OAuth page.
#
@app.route('/', methods=['GET'])
def authorize():
state = str(uuid4())
if 'session' in request.args:
session = request.args['session']
else:
session = state
if 'source' in request.args:
source = providers[request.args['source']]
print "Using the %s OAuth server" % request.args['source']
else:
print "Using the Salesforce OAuth server"
source = providers["salesforce"]
payload = { 'scope' : source.scopes,
'state' : state,
'redirect_uri' : redirect_uri + '/' + source.name,
'response_type' : 'code',
'client_id' : source.consumer_key,
'access_type' : 'offline'}
url = source.authorize_url + "?" + urllib.urlencode(payload)
responses[state] = { 'stage' : 'authorize',
'session' : session }
if 'redirect' in request.args:
responses[state]['redirect'] = request.args['redirect']
print "Using the %s user redirect" % responses[state]['redirect']
return redirect(url)
#
# Fetch a new access token using a refresh token.
#
@app.route('/refresh', methods=['DELETE'])
def revoke_access_token():
refresh_token = request.args['refresh']
source = providers[request.args['source']]
payload = { 'token' : refresh_token }
resp = requests.post(source.revoke_url, params = payload)
return resp.text
#
# Fetch a new access token using a refresh token.
#
@app.route('/refresh', methods=['GET'])
def refresh_access_token():
refresh_token = request.args['refresh']
source = providers[request.args['source']]
print "refreshing with " + refresh_token
payload = { 'client_id' : source.consumer_key,
'client_secret' : source.consumer_secret,
'grant_type' : 'refresh_token',
'refresh_token' : refresh_token }
resp = requests.post(source.token_url, params = payload)
return resp.text
def _get_access_token(source, auth_code, state, session, redirect=None):
try:
payload = { 'client_id' : source.consumer_key,
'client_secret' : source.consumer_secret,
'grant_type' : 'authorization_code',
'code' : auth_code,
'redirect_uri' : redirect_uri + '/' + source.name}
headers = {'Accept' : 'application/json'}
# headers = {'content-type': 'application/x-www-form-urlencoded',
# 'content-length' : 256}
res = requests.post(source.access_token_url,
data = payload,
headers = headers)
if res.status_code == requests.codes.ok:
resp_json = res.json()
print "JSON response: " + str(resp_json)
if 'access_token' in resp_json:
resp = None
resp_json['source'] = source.name
if redirect:
resp_json['_user_redirect'] = redirect
for t in triggers:
resp = t.consume_access_key(resp_json)
responses[state] = { 'stage' : 'authorized',
'resp' : resp_json }
if resp:
return resp.text
else:
return json.dumps( {'status' : 'authorized',
'session' : session } )
else:
error_msg = "unauthorized"
else:
error_msg = "unreachable"
return json.dumps( {"status" : "failed",
"error" : error_msg,
"session" : session } )
except ConnectionError as e:
print str(e)
#
# The generic callback method. Should be supplemented with the provider source
# name so that we know what to do.
#
@app.route('/callback/<source_name>', methods=['GET'])
def callback(source_name):
source = providers[source_name]
if 'code' in request.args:
auth_code = request.args["code"]
state = request.args["state"]
session = responses[state]['session']
if 'redirect' in responses[state]:
redirect = responses[state]['redirect']
else:
redirect = None
responses[state]['stage'] = 'callback'
return _get_access_token(source, auth_code, state, session, redirect)
else:
return json.dumps( {'status' : 'failed',
'error' : 'authentication' } )
#
# Retrieve the access & refresh keys.
#
@app.route('/key', methods=['GET'])
def key():
if 'session' in request.args and request.args['session'] in responses:
resp = responses[request.args['session']]
if resp['stage'] == 'authorized':
return resp['resp']['access_key']
return json.dumps( {'status' : 'failed',
'error' : 'could not find access key' } )
def main():
if 'POM_SSL' in os.environ:
key_dir = os.environ['POM_SSL']
else:
key_dir = os.path.dirname(os.path.dirname(__file__)) + "/keys"
if not 'POM_APPS' in os.environ:
print "POM_APPS should be set to a directory with our application OAuth credentials"
exit(1)
print "Using SSL certificate in " + key_dir
try:
http_server = HTTPServer(WSGIContainer(app),
ssl_options={
"certfile": key_dir + "/server.crt",
"keyfile": key_dir + "/server.key",
})
http_server.listen(port=int(sys.argv[2]),
address=sys.argv[1])
IOLoop.instance().start()
except:
pass
|
cirruspath/python-oauth2-middleware
|
pom/server/pomserver.py
|
Python
|
apache-2.0
| 7,922 | 0.012497 |
import numpy as np
import pandas as pd
from plotnine import (ggplot, aes, geom_area, geom_ribbon,
facet_wrap, scale_x_continuous, theme)
n = 4 # No. of ribbions in a vertical stack
m = 100 # Points
width = 2*np.pi # width of each ribbon
x = np.linspace(0, width, m)
df = pd.DataFrame({
'x': np.tile(x, n),
'ymin': np.hstack([np.sin(x)+2*i for i in range(n)]),
'ymax': np.hstack([np.sin(x)+2*i+1 for i in range(n)]),
'z': np.repeat(range(n), m)
})
_theme = theme(subplots_adjust={'right': 0.85})
def test_ribbon_aesthetics():
p = (ggplot(df, aes('x', ymin='ymin', ymax='ymax',
group='factor(z)')) +
geom_ribbon() +
geom_ribbon(aes('x+width', alpha='z')) +
geom_ribbon(aes('x+2*width', linetype='factor(z)'),
color='black', fill=None, size=2) +
geom_ribbon(aes('x+3*width', color='z'),
fill=None, size=2) +
geom_ribbon(aes('x+4*width', fill='factor(z)')) +
geom_ribbon(aes('x+5*width', size='z'),
color='black', fill=None) +
scale_x_continuous(
breaks=[i*2*np.pi for i in range(7)],
labels=['0'] + [r'${}\pi$'.format(2*i) for i in range(1, 7)])
)
assert p + _theme == 'ribbon_aesthetics'
def test_area_aesthetics():
p = (ggplot(df, aes('x', 'ymax+2', group='factor(z)')) +
geom_area() +
geom_area(aes('x+width', alpha='z')) +
geom_area(aes('x+2*width', linetype='factor(z)'),
color='black', fill=None, size=2) +
geom_area(aes('x+3*width', color='z'),
fill=None, size=2) +
geom_area(aes('x+4*width', fill='factor(z)')) +
geom_area(aes('x+5*width', size='z'),
color='black', fill=None) +
scale_x_continuous(
breaks=[i*2*np.pi for i in range(7)],
labels=['0'] + [r'${}\pi$'.format(2*i) for i in range(1, 7)])
)
assert p + _theme == 'area_aesthetics'
def test_ribbon_facetting():
p = (ggplot(df, aes('x', ymin='ymin', ymax='ymax',
fill='factor(z)')) +
geom_ribbon() +
facet_wrap('~ z')
)
assert p + _theme == 'ribbon_facetting'
|
has2k1/plotnine
|
plotnine/tests/test_geom_ribbon_area.py
|
Python
|
gpl-2.0
| 2,328 | 0 |
import os
import amo.search
from .models import Reindexing
from django.core.management.base import CommandError
# shortcut functions
is_reindexing_amo = Reindexing.objects.is_reindexing_amo
flag_reindexing_amo = Reindexing.objects.flag_reindexing_amo
unflag_reindexing_amo = Reindexing.objects.unflag_reindexing_amo
get_indices = Reindexing.objects.get_indices
def index_objects(ids, model, search, index=None, transforms=None):
if index is None:
index = model._get_index()
indices = Reindexing.objects.get_indices(index)
if transforms is None:
transforms = []
qs = model.objects.no_cache().filter(id__in=ids)
for t in transforms:
qs = qs.transform(t)
for ob in qs:
data = search.extract(ob)
for index in indices:
model.index(data, bulk=True, id=ob.id, index=index)
amo.search.get_es().flush_bulk(forced=True)
def raise_if_reindex_in_progress(site):
"""Checks if the database indexation flag is on for the given site.
If it's on, and if no "FORCE_INDEXING" variable is present in the env,
raises a CommandError.
"""
already_reindexing = Reindexing.objects._is_reindexing(site)
if already_reindexing and 'FORCE_INDEXING' not in os.environ:
raise CommandError("Indexation already occuring. Add a FORCE_INDEXING "
"variable in the environ to force it")
|
anaran/olympia
|
lib/es/utils.py
|
Python
|
bsd-3-clause
| 1,403 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shows', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='show',
options={'verbose_name_plural': 'shows', 'ordering': ['datetime', 'cinema', 'id'], 'verbose_name': 'show'},
),
]
|
Cinemair/cinemair-server
|
cinemair/shows/migrations/0002_auto_20150712_2126.py
|
Python
|
mit
| 430 | 0.002326 |
# This file is part of James CI.
#
# James CI is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# James CI is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with James CI. If not, see <http://www.gnu.org/licenses/>.
#
#
# Copyright (C)
# 2017 Alexander Haase <ahaase@alexhaase.de>
#
import enum
@enum.unique
class Status(enum.IntEnum):
"""
This enum class will be used for defining the status of a
:py:class:`~.Pipeline` or :py:class:`~.Job`. Multiple statuses may be
compared by their value.
.. note::
The minimum of a list of statuses will be the *worst* status of the list.
However, if the list has a status of :py:attr:`created`,
:py:attr:`pending` or :py:attr:`running`, these will have priority,
indicating not all jobs have finished yet.
"""
created = enum.auto()
pending = enum.auto()
running = enum.auto()
canceled = enum.auto()
errored = enum.auto()
failed = enum.auto()
success = enum.auto()
def __str__(self):
"""
Return the status name as string. This function is required to remove
the enum's class name prefix when string representation is required.
"""
return self.name
def final(self):
"""
:return: If the status is a final state or not.
:rtype: bool
"""
return (self in [self.canceled, self.errored, self.failed,
self.success])
|
alehaa/james
|
jamesci/status.py
|
Python
|
gpl-3.0
| 1,897 | 0.000527 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
labels, columns_to_variables):
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones_like(labels,
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
Returns:
An `estimator.ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope, values=features.values(), partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, labels, mode, _train_op_fn, logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
An `estimator.ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._MultiClassHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
loss_type = "squared_loss"
else:
return ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, labels,
columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
return head.head_ops(features, labels, mode, _train_op_fn, logits)
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
# TODO(zoy): Give an unsupported error if enable_centered_bias is
# requested for SDCA once its default changes to False.
self._feature_columns = feature_columns
assert self._feature_columns
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": self._optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"num_ps_replicas": config.num_ps_replicas if config else 0,
"joint_weights": _joint_weight,
})
self._estimator = estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
self._additional_run_hook = (chief_hook if self._estimator.config.is_chief
else None)
def get_estimator(self):
return self._estimator
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
if monitors is None:
monitors = []
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
for monitor in deprecated_monitors:
monitor.set_estimator(self)
monitor._lock_estimator() # pylint: disable=protected-access
if self._additional_run_hook:
monitors.append(self._additional_run_hook)
result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors,
max_steps=max_steps)
for monitor in deprecated_monitors:
monitor._unlock_estimator() # pylint: disable=protected-access
return result
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(x=x, y=y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Runs inference to determine the predicted class."""
key = prediction_key.PredictionKey.CLASSES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Runs inference to determine the class probability predictions."""
key = prediction_key.PredictionKey.PROBABILITIES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
def get_variable_names(self):
return self._estimator.get_variable_names()
def get_variable_value(self, name):
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name in self.get_variable_names():
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.get_variable_value("linear/bias_weight")
@property
def config(self):
return self._estimator.config
@property
def model_dir(self):
return self._estimator.model_dir
class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: dimension of the label for multilabels.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = feature_columns
assert self._feature_columns
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._regression_head( # pylint: disable=protected-access
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": self._optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"num_ps_replicas": config.num_ps_replicas if config else 0,
"joint_weights": _joint_weights,
})
self._estimator = estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
self._additional_run_hook = (chief_hook if self._estimator.config.is_chief
else None)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
if monitors is None:
monitors = []
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
for monitor in deprecated_monitors:
monitor.set_estimator(self)
monitor._lock_estimator() # pylint: disable=protected-access
if self._additional_run_hook:
monitors.append(self._additional_run_hook)
result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors,
max_steps=max_steps)
for monitor in deprecated_monitors:
monitor._unlock_estimator() # pylint: disable=protected-access
return result
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(x=x, y=y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Runs inference to determine the predicted class."""
key = prediction_key.PredictionKey.SCORES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
def get_variable_names(self):
return self._estimator.get_variable_names()
def get_variable_value(self, name):
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name in self.get_variable_names():
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.get_variable_value("linear/bias_weight")
@property
def config(self):
return self._estimator.config
@property
def model_dir(self):
return self._estimator.model_dir
|
nanditav/15712-TensorFlow
|
tensorflow/contrib/learn/python/learn/estimators/linear.py
|
Python
|
apache-2.0
| 31,382 | 0.004716 |
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Papaly(Platform):
"""A <Platform> object for Papaly."""
def __init__(self):
self.platformName = "Papaly"
self.tags = ["social"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://papaly.com/<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["<title>Page not found</title>"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
i3visio/osrframework
|
osrframework/wrappers/papaly.py
|
Python
|
agpl-3.0
| 3,867 | 0.004397 |
from __future__ import unicode_literals
from collections import defaultdict
import datetime
import json
from moto.compat import OrderedDict
from moto.core import BaseBackend
from moto.core.utils import unix_time
from .comparisons import get_comparison_func
class DynamoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_json'):
return obj.to_json()
def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
self.type = list(type_as_dict.keys())[0]
self.value = list(type_as_dict.values())[0]
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return (
self.type == other.type and
self.value == other.value
)
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values)
class Item(object):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
self.hash_key = hash_key
self.hash_key_type = hash_key_type
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
def __repr__(self):
return "Item: {0}".format(self.to_json())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
attributes[attribute_key] = attribute.value
return {
"Attributes": attributes
}
def describe_attrs(self, attributes):
if attributes:
included = {}
for key, value in self.attrs.items():
if key in attributes:
included[key] = value
else:
included = self.attrs
return {
"Item": included
}
class Table(object):
def __init__(self, name, hash_key_attr, hash_key_type,
range_key_attr=None, range_key_type=None, read_capacity=None,
write_capacity=None):
self.name = name
self.hash_key_attr = hash_key_attr
self.hash_key_type = hash_key_type
self.range_key_attr = range_key_attr
self.range_key_type = range_key_type
self.read_capacity = read_capacity
self.write_capacity = write_capacity
self.created_at = datetime.datetime.utcnow()
self.items = defaultdict(dict)
@property
def has_range_key(self):
return self.range_key_attr is not None
@property
def describe(self):
results = {
"Table": {
"CreationDateTime": unix_time(self.created_at),
"KeySchema": {
"HashKeyElement": {
"AttributeName": self.hash_key_attr,
"AttributeType": self.hash_key_type
},
},
"ProvisionedThroughput": {
"ReadCapacityUnits": self.read_capacity,
"WriteCapacityUnits": self.write_capacity
},
"TableName": self.name,
"TableStatus": "ACTIVE",
"ItemCount": len(self),
"TableSizeBytes": 0,
}
}
if self.has_range_key:
results["Table"]["KeySchema"]["RangeKeyElement"] = {
"AttributeName": self.range_key_attr,
"AttributeType": self.range_key_type
}
return results
def __len__(self):
count = 0
for key, value in self.items.items():
if self.has_range_key:
count += len(value)
else:
count += 1
return count
def __nonzero__(self):
return True
def __bool__(self):
return self.__nonzero__()
def put_item(self, item_attrs):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs)
if range_value:
self.items[hash_value][range_value] = item
else:
self.items[hash_value] = item
return item
def get_item(self, hash_key, range_key):
if self.has_range_key and not range_key:
raise ValueError("Table has a range key, but no range key was passed into get_item")
try:
if range_key:
return self.items[hash_key][range_key]
else:
return self.items[hash_key]
except KeyError:
return None
def query(self, hash_key, range_comparison, range_objs):
results = []
last_page = True # Once pagination is implemented, change this
if self.range_key_attr:
possible_results = self.items[hash_key].values()
else:
possible_results = list(self.all_items())
if range_comparison:
for result in possible_results:
if result.range_key.compare(range_comparison, range_objs):
results.append(result)
else:
# If we're not filtering on range key, return all values
results = possible_results
return results, last_page
def all_items(self):
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
yield item
else:
yield hash_set
def scan(self, filters):
results = []
scanned_count = 0
last_page = True # Once pagination is implemented, change this
for result in self.all_items():
scanned_count += 1
passes_all_conditions = True
for attribute_name, (comparison_operator, comparison_objs) in filters.items():
attribute = result.attrs.get(attribute_name)
if attribute:
# Attribute found
if not attribute.compare(comparison_operator, comparison_objs):
passes_all_conditions = False
break
elif comparison_operator == 'NULL':
# Comparison is NULL and we don't have the attribute
continue
else:
# No attribute found and comparison is no NULL. This item fails
passes_all_conditions = False
break
if passes_all_conditions:
results.append(result)
return results, scanned_count, last_page
def delete_item(self, hash_key, range_key):
try:
if range_key:
return self.items[hash_key].pop(range_key)
else:
return self.items.pop(hash_key)
except KeyError:
return None
class DynamoDBBackend(BaseBackend):
def __init__(self):
self.tables = OrderedDict()
def create_table(self, name, **params):
table = Table(name, **params)
self.tables[name] = table
return table
def delete_table(self, name):
return self.tables.pop(name, None)
def update_table_throughput(self, name, new_read_units, new_write_units):
table = self.tables[name]
table.read_capacity = new_read_units
table.write_capacity = new_write_units
return table
def put_item(self, table_name, item_attrs):
table = self.tables.get(table_name)
if not table:
return None
return table.put_item(item_attrs)
def get_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts):
table = self.tables.get(table_name)
if not table:
return None, None
hash_key = DynamoType(hash_key_dict)
range_values = [DynamoType(range_value) for range_value in range_value_dicts]
return table.query(hash_key, range_comparison, range_values)
def scan(self, table_name, filters):
table = self.tables.get(table_name)
if not table:
return None, None, None
scan_filters = {}
for key, (comparison_operator, comparison_values) in filters.items():
dynamo_types = [DynamoType(value) for value in comparison_values]
scan_filters[key] = (comparison_operator, dynamo_types)
return table.scan(scan_filters)
def delete_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.delete_item(hash_key, range_key)
dynamodb_backend = DynamoDBBackend()
|
silveregg/moto
|
moto/dynamodb/models.py
|
Python
|
apache-2.0
| 9,875 | 0.00081 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetTariff
# Returns an individual Tariff object with a given id.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTariff(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTariff Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTariff, self).__init__(temboo_session, '/Library/Genability/TariffData/GetTariff')
def new_input_set(self):
return GetTariffInputSet()
def _make_result_set(self, result, path):
return GetTariffResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTariffChoreographyExecution(session, exec_id, path)
class GetTariffInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTariff
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((conditional, string) The App ID provided by Genability.)
"""
super(GetTariffInputSet, self)._set_input('AppID', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Genability.)
"""
super(GetTariffInputSet, self)._set_input('AppKey', value)
def set_MasterTariffID(self, value):
"""
Set the value of the MasterTariffID input for this Choreo. ((required, integer) The master tariff id. This can be retrieved in the output of the GetTariffs Choreo.)
"""
super(GetTariffInputSet, self)._set_input('MasterTariffID', value)
def set_PopulateProperties(self, value):
"""
Set the value of the PopulateProperties input for this Choreo. ((optional, boolean) Set to "true" to populate the properties for the returned Tariffs.)
"""
super(GetTariffInputSet, self)._set_input('PopulateProperties', value)
def set_PopulateRates(self, value):
"""
Set the value of the PopulateRates input for this Choreo. ((optional, boolean) Set to "true" to populate the rate details for the returned Tariffs.)
"""
super(GetTariffInputSet, self)._set_input('PopulateRates', value)
class GetTariffResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTariff Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Genability.)
"""
return self._output.get('Response', None)
class GetTariffChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTariffResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Genability/TariffData/GetTariff.py
|
Python
|
apache-2.0
| 4,018 | 0.004978 |
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def action_button_confirm(self):
procurement_obj = self.env['procurement.order']
procurement_group_obj = self.env['procurement.group']
res = super(SaleOrder, self).action_button_confirm()
for line in self.order_line:
valid = self._validate_service_project_for_procurement(
line.product_id)
if valid:
if not self.procurement_group_id:
vals = self._prepare_procurement_group(self)
group = procurement_group_obj.create(vals)
self.write({'procurement_group_id': group.id})
vals = self._prepare_order_line_procurement(
self, line, group_id=self.procurement_group_id.id)
vals['name'] = self.name + ' - ' + line.product_id.name
procurement = procurement_obj.create(vals)
procurement.run()
return res
def _validate_service_project_for_procurement(self, product):
routes = product.route_ids.filtered(lambda r: r.id in [
self.env.ref('procurement_service_project.route_serv_project').id])
return product.type == 'service' and routes
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
service_project_task = fields.Many2one(
comodel_name='project.task', string='Generated task from procurement',
copy=False)
|
esthermm/odoo-addons
|
procurement_service_project/models/sale_order.py
|
Python
|
agpl-3.0
| 1,663 | 0 |
# -*- encoding: utf-8 -*-
# from django.shortcuts import render, render_to_response, redirect, get_object_or_404, get_list_or_404, Http404
from django.core.cache import cache
from django.shortcuts import *
from django.views.generic import TemplateView, FormView
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template import RequestContext
from django import template
from models import proyecto
from .forms import *
#from Logica.ConexionBD import adminBD
import funciones
import sys
#~ from administradorConsultas import AdministradorConsultas # Esta la comente JAPeTo
#~ from manejadorArchivos import obtener_autores # Esta la comente JAPeTo
#~ from red import Red # Esta la comente JAPeTo
from Logica import ConsumirServicios, procesamientoScopusXml, procesamientoArxiv
# import igraph
import traceback
import json
import django.utils
from Logica.ConexionBD.adminBD import AdminBD
from principal.parameters import *
from principal.permisos import *
# sys.setdefaultencoding is cancelled by site.py
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
# Create your views here.
# @login_required
#ruta = "/home/administrador/ManejoVigtech/ArchivosProyectos/"
sesion_proyecto=None
proyectos_list =None
model_proyecto =None
id_proyecto = None
##nombre_proyecto = None
class home(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
global proyectos_list
global model_proyecto
try:
existe_proyecto = False
proyectos_list = get_list_or_404(proyecto, idUsuario=self.request.user)
for project in proyectos_list:
if project == model_proyecto:
existe_proyecto = True
if not (existe_proyecto):
model_proyecto = None
except:
# print traceback.format_exc()
proyectos_list = None
model_proyecto = None
return {'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos}
class RegistrarUsuario(FormView):
template_name = "registrarUsuario.html"
form_class = FormularioRegistrarUsuario
success_url = reverse_lazy('RegistrarUsuarios')
def form_valid(self, form):
user = form.save()
messages.success(self.request, "Se ha creado exitosamente el usuario")
return redirect('login')
def cambia_mensaje(crfsession,proyecto,usuario,borrar, mensaje,valor):
# print ">>>> AQUI ESTOY"+str(borrar)+" & "+str(mensaje)
try:
cache_key = "%s_%s_%s" % (crfsession,proyecto.replace(" ",""),usuario)
data = cache.get(cache_key)
if data:
data['estado'] = valor
data['mensaje'] += mensaje
if borrar :
data['mensaje'] = mensaje
cache.set(cache_key, data)
else:
cache.set(cache_key, {
'estado': 0,
'mensaje' : mensaje
})
except:
pass
@login_required
def nuevo_proyecto(request):
global id_proyecto
global model_proyecto
global proyectos_list
if request.method == 'POST':
form = FormularioCrearProyecto(request.POST)
fraseB = request.POST.get('fraseB')
fraseA = request.POST.get('fraseA')
autor = request.POST.get('autor')
words = request.POST.get('words')
before = request.POST.get('before')
after = request.POST.get('after')
limArxiv = request.POST.get('limArxiv')
limSco = request.POST.get('limSco')
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"",0)
busqueda = fraseB + "," + words + "," + fraseA + "," + autor + "," + before + "," + after
# print "busca "+busqueda+", by japeto"
if form.is_valid():
nombreDirectorio = form.cleaned_data['nombre']
articulos = {}
modelo_proyecto = form.save(commit=False)
modelo_proyecto.idUsuario = request.user
# print "formulario valido, by japeto"
# print "2"
# proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
# proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
#modelo_proyecto.calificacion=5
modelo_proyecto.fraseBusqueda = busqueda
modelo_proyecto.save()
proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
model_proyecto = get_object_or_404(proyecto, id_proyecto=modelo_proyecto.id_proyecto)
id_proyecto = model_proyecto.id_proyecto
#Creacion del directorio donde se guardaran los documentos respectivos del proyecto creado.
mensajes_pantalla="<p class='text-primary'><span class='fa fa-send fa-fw'></span>Se ha creado el Directorio para el proyecto</p>"
funciones.CrearDirectorioProyecto(modelo_proyecto.id_proyecto, request.user)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,mensajes_pantalla,6)
# print "se crea directorio, by japeto"
if fraseB != "":
try:
"""
Descarga de documentos de Google Arxiv
"""
# print "descarga de documentos, by japeto"
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Descarga de documentos de Arxiv</p>",12)
articulos_arxiv= ConsumirServicios.consumir_arxiv(fraseB, request.user.username, str(modelo_proyecto.id_proyecto), limArxiv)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Descarga de documentos terminada</p>",18)
except:
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA: </b>Descarga de documentos de Arxiv</p>",12)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"STOP",0)
print traceback.format_exc()
try:
"""
Descarga de documentos de Google Scopus
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Descarga de documentos de Scopus</p>",24)
articulos_scopus = ConsumirServicios.consumir_scopus(fraseB, request.user.username, str(modelo_proyecto.id_proyecto), limSco)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Descarga de documentos terminada</p>",30)
except:
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA: </b>Descarga de documentos de Scopus</p>",24)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"STOP",0)
print traceback.format_exc()
try:
"""
Inserción de metadatos Arxiv
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inica la inserción de metadatos Arxiv</p>",36)
xml = open(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/salida.xml")
procesamientoArxiv.insertar_metadatos_bd(xml, str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>La inserción de metadatos Arxiv ha terminado</p>",42)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",36)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>La inserción de metadatos Arxiv no se puede completar</p>",36)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",36)
# print traceback.format_exc()
try:
"""
Conexión con base datos para insertar metadatos de paper de Scopus
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inica la inserción de metadatos Scopus</p>",48)
busqueda = open(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/busqueda0.xml")
procesamientoScopusXml.xml_to_bd(busqueda, modelo_proyecto.id_proyecto, articulos_scopus['titulos'])
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>La inserción de metadatos Scopus ha terminado</p>",54)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",48)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>La inserción de metadatos Scopus no se puede completar</p>",48)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",48)
# print traceback.format_exc()
# try:
# """
# NAIVE BAYES
# """
# #ConsumirServicios.consumir_recuperacion_unidades_academicas(str(request.user.username),str(modelo_proyecto.id_proyecto))
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia el procesado Scopus XML</ṕ>",60)
# procesamientoScopusXml.xml_to_bd(busqueda, modelo_proyecto.id_proyecto, articulos_scopus['titulos'])
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>El procesmiento Scopus XML ha terminado</p>",62)
# except:
# # cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",60)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b> El procesando Scopus XML no se puede completar</p>",60)
# # cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",60)
# # print traceback.format_exc()
try:
"""
generar el XML OUTPUT
"""
admin =AdminBD()
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia convertidor archivo de XML</ṕ>",60)
#papers = admin.getPapers(modelo_proyecto.id_proyecto)
adminBD = AdminBD()
papers =adminBD.getPapers(modelo_proyecto.id_proyecto)
target = open(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/busqueda1.xml", 'w')
target.write(funciones.papersToXML(papers))
target.close()
# print str(funciones.papersToXML(papers))
# funciones.papersToXML(papers).write(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/busqueda1.xml")
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>termina el convertidor archivo de XML</ṕ>",60)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
print traceback.format_exc()
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>Error al convertir archivo de XML</p>",64)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
# print traceback.format_exc()
try:
"""
indexación
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia la indexación</label></p>",64)
ir = ConsumirServicios.IR()
ir.indexar(str(request.user.username),str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Indexacion terminada</p>",68)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>La indexación no se puede completar</p>",64)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
# print traceback.format_exc()
try:
""""
Analisis
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia el Analisis</p>",66)
data = ConsumirServicios.consumir_analisis(str(request.user.username),str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Analisis terminado</p>",68)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",66)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b> El Analisis no se puede completar</p>",66)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",66)
# print traceback.format_exc()
try:
"""
Analisis de Redes Sociales
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia el Analisis de Redes Sociales</p>",70)
network = ConsumirServicios.consumir_red(str(request.user.username),str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Analisis de Redes Sociales terminado</p>",72)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",70)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>El Analisis de Redes Sociales no se puede completar</p>",70)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",70)
#print traceback.format_exc()
try:
"""
Recuperacion de unidades
"""
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia la recuperacion de unidades academicas</p>",10)
# ConsumirServicios.consumir_recuperacion_unidades_academicas(str(request.user.username),str(modelo_proyecto.id_proyecto))
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Finaliza la recuperacion de unidades academicas</p>",10)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",80)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Se ha creado el proyecto</p>",90)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Su navegador se reiniciara</p>",97)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"EOF",100)
except:
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b> la recuperacion de unidades academicas no se puede completar: {}</p>".format(traceback.format_exc()),80)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"EOF",100)
# print traceback.format_exc()
# messages.success(request, "Se ha creado exitosamente el proyecto")
#articulos = funciones.buscadorSimple(fraseB)
#ac = AdministradorConsultas()
#ac.descargar_papers(fraseB)
#lista_scopus = ac.titulos_descargas
#if fraseA != "" or autor != "" or words != "":
# articulos = funciones.buscadorAvanzado(fraseA, words, autor, after, before)
#print articulos
#funciones.moveFiles(modelo_proyecto.id_proyecto, request.user, articulos, lista_scopus)
#funciones.escribir_archivo_documentos(modelo_proyecto.id_proyecto, request.user, articulos, lista_scopus)
# messages.success(request, "Se ha creado exitosamente el proyecto")
#~ return redirect('crear_proyecto')
else:
messages.error(request, "Imposible crear el proyecto")
else:
form = FormularioCrearProyecto()
return render(request, 'GestionProyecto/NuevoProyecto.html', {'form': form,
'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos}, context_instance=RequestContext(request))
#Visualización de proyectos propios de un usuario.
@login_required
def ver_mis_proyectos(request):
global model_proyecto
global proyectos_list
try:
proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
except:
proyectos_list =None
messages.success(request, "Usted no tiene proyectos")
return render(request, 'GestionProyecto/verMisProyectos.html', {'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos}, context_instance=RequestContext(request))
#Visualización de proyectos con disponibilidad pública que no pertenecen al usuario actual.
@login_required
def ver_otros_proyectos(request):
global model_proyecto
global proyecto_list
if (model_proyecto != None and model_proyecto.idUsuario != request.user):
model_proyecto = None
try:
proyectos_list_all = get_list_or_404(proyecto)
idUser = request.user
otros_proyectos = []
for project in proyectos_list_all:
if project.idUsuario != idUser:
otros_proyectos.append(project)
except:
proyectos_list_all =None
otros_proyectos = None
return render(request, 'GestionProyecto/OtrosProyectos.html', {
'proyectos': otros_proyectos, 'proyectos_user':proyectos_list, 'mproyecto': model_proyecto}, context_instance=RequestContext(request))
@login_required
def busqueda_navegacion(request):
global proyectos_list
global model_proyecto
return render(request, 'GestionBusqueda/Busqueda_Navegacion.html', {'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def editar_proyecto(request, id_proyecto):
global proyectos_list
global model_proyecto
model_proyecto = get_object_or_404(proyecto, id_proyecto=id_proyecto)
request.session['proyecto']= str(model_proyecto.id_proyecto)
request.proyecto = model_proyecto
# print "This is my project:",request.session['proyecto']
lista = funciones.crearListaDocumentos(id_proyecto, request.user)
if request.method == 'POST':
proyecto_form = FormularioCrearProyecto(request.POST, instance=model_proyecto)
#proyecto_form.fields['disponibilidad'].widget.attrs['disabled']=True
if proyecto_form.is_valid:
#print proyecto_form.cleaned_data
#nuevoNombre=proyecto_form.cleaned_data['nombre']
model_project = proyecto_form.save()
# funciones.cambiarNombreDirectorio(nombreDirectorioAnterior,nuevoNombre,request.user)
messages.success(request, "Se ha modificado exitosamente el proyecto")
else:
messages.error(request, "Imposible editar el proyecto")
else:
proyecto_form = FormularioCrearProyecto(instance=model_proyecto)
return render(request, 'GestionProyecto/editar_proyecto.html',
{'form': proyecto_form, 'lista': lista, 'user': request.user, 'mproyecto':model_proyecto, 'proyectos_user': proyectos_list, 'proyecto': id_proyecto, 'lista_permisos': permisos},
context_instance=RequestContext(request))
@login_required
def ver_proyecto(request, id_proyecto):
global model_proyecto
global proyectos_list
proyecto_actual = None
proyecto_actual = get_object_or_404(proyecto, id_proyecto=id_proyecto)
proyecto_form = FormularioCrearProyecto(instance=proyecto_actual)
if (model_proyecto != None and model_proyecto.idUsuario != request.user):
model_proyecto = None
#model_proyecto = get_object_or_404(proyecto, id_proyecto=id_proyecto)
#proyecto_form = FormularioCrearProyecto(instance=model_proyecto)
#proyecto_form.fields['disponibilidad'].widget.attrs['disabled']=True
#proyecto_form.fields['nombre'].label="Titulo del proyecto"
proyecto_form.fields['nombre'].widget.attrs['disabled'] = True
proyecto_form.fields['resumen'].widget.attrs['disabled'] = True
return render(request, 'GestionProyecto/ver_proyecto.html', {'form': proyecto_form, 'mproyecto':model_proyecto, 'proyectos_user':proyectos_list, 'lista_permisos': permisos},
context_instance=RequestContext(request))
@login_required
def buscador(request):
global proyectos_list
global model_proyecto
if request.method == 'GET':
ir = ConsumirServicios.IR()
fraseBusqueda = request.GET.get("busquedaIR")
data = ir.consultar(fraseBusqueda,str(request.user.username), str(model_proyecto.id_proyecto))
# print model_proyecto
# IR.consultar(fraseBusqueda,"","")
# data = ir.consultar(fraseBusqueda,str(request.user.username),request.session['proyecto'])
#data = funciones.busqueda(fraseBusqueda)
#for d in data:
# d['path'] = d['path'].replace("/home/vigtech/shared/repository/", "/media/").encode("utf8")
# print data
# print fraseBusqueda
return render(request, "GestionBusqueda/Busqueda_Navegacion.html", {'resultados': data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
else:
return render(request, "GestionBusqueda/Busqueda_Navegacion.html", {'resultados': data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto,'lista_permisos': permisos})
@login_required
def analisisView(request):
global proyectos_list
global model_proyecto
#data = ConsumirServicios.consumir_red(request.user.username, request.session['proyecto'])
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR + proyecto + "/coautoria.json") as json_file:
data = json.load(json_file)
#nodos, aristas = r.generar_json()
nodos1 = json.dumps(data['nodes'])
aristas1 = json.dumps(data['links'])
# return render(request, "GestionAnalisis/coautoria.html", {"nodos": nodos1, "aristas": aristas1})
return render(request, "GestionAnalisis/coautoria.html", {"nodos": nodos1, "aristas": aristas1, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto,'lista_permisos': permisos})
#return render(request, "GestionAnalisis/coautoria2.html", {"proyecto":proyecto})
@login_required
def coautoria_old(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR + proyecto + "/coautoria.json") as json_file:
data = json.load(json_file)
#nodos, aristas = r.generar_json()
nodos1 = json.dumps(data['nodes'])
aristas1 = json.dumps(data['links'])
# return render(request, "GestionAnalisis/coautoria.html", {"nodos": nodos1, "aristas": aristas1})
return render(request, "GestionAnalisis/Analisis.html", {"nodos": nodos1, "aristas": aristas1, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def eliminar_proyecto(request, id_proyecto):
global model_proyecto
global proyectos_list
try:
# print "#1"
proyectos_list = get_list_or_404(proyecto, idUsuario=self.request.user)
# print "#2"
model_proyecto = get_object_or_404(proyecto, id_proyecto=str(self.request.session['proyecto']))
# print "#3"
except:
proyectos_list = None
model_proyecto = None
user = request.user
project = get_object_or_404(proyecto, id_proyecto=id_proyecto)
funciones.eliminar_proyecto(id_proyecto, user)
project.delete()
messages.success(request, "El proyecto \""+project.nombre+"\" se elimino.")
return HttpResponseRedirect(reverse('ver_mis_proyectos'))
@login_required
def analisis_paises(request):
global proyectos_list
global model_proyecto
# print model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR+ proyecto + "/data.json") as json_file:
data = json.load(json_file)
# print data
labels=json.dumps(data['paises']['labels'])
values=json.dumps(data['paises']['valores'])
# print proyecto
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/paisesbar.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_autores(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/autoresbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_afiliaciones(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/afiliacionesbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_revistas(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/revistasbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_docsfechas(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/fechasbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_tipodocs(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/tiposbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_paisespie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/paisespie.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_autorespie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/autorespie.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_afiliacionespie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/afiliacionespie.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_revistaspie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/revistaspie.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_docsfechaspie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/fechaspie.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_tipodocspie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/tipospie.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_clustering(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
return render(request, "GestionAnalisis/grupos.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
@login_required
def analisis_indicadores(request):
global proyectos_list
global model_proyecto
try:
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
with open(REPOSITORY_DIR + proyecto + "/data.json") as json_file:
data = json.load(json_file)
return render(request, "GestionAnalisis/indicadores.html",{"data":data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
print traceback.format_exc()
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
# print data
#labels=json.dumps(data['paises']['labels'])
#values=json.dumps(data['paises']['valores'])
#print proyecto
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
#return render(request, "GestionAnalisis/indicadores.html",{"data":data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto})
@login_required
def clasificacion_eisc(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR + proyecto + "/eisc.json") as json_file:
data = json.load(json_file)
eids = data['clasificacion']
if eids :
adminBD = AdminBD()
papers =adminBD.get_papers_eid(eids)
return render (request, "GestionEISC/clasificacion_eisc.html", {"papers": papers, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
else:
return render (request, "GestionEISC/clasificacion_eisc.html", {"papers": [], 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
def logmensajes(request):
"""
Permite consultar el estado del proceso de creacion de
un nuevo proyecto
"""
try:
cache_key = "%s_%s_%s" % (request.GET.get('csrfmiddlewaretoken'),request.GET.get('fraseB').replace(" ",""),request.user.username)
data = json.dumps(cache.get(cache_key))
print cache.get(cache_key)['estado']
cache.set(cache_key, {'estado': cache.get(cache_key)['estado'],'mensaje' : ""})
except:
print "hay problema"
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"",0)
return HttpResponse(data,content_type="application/json")
# Configuración de los permisos --links a mostrar alozada
@login_required
def configurar_permisos(request):
global model_proyecto
global proyectos_list
# print permisos["estadisticas"]
try:
proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
except:
proyectos_list =None
messages.success(request, "Usted no tiene proyectos")
if request.method == 'POST':
if 'cbIndicadores' in request.POST:
permisos["indicadores"] = 1
else:
permisos["indicadores"] = 0
if 'graficos_barra' in request.POST:
permisos["graficos_barra"] = 1
else:
permisos["graficos_barra"] = 0
if 'graficos_pie' in request.POST:
permisos["graficos_pie"] = 1
else:
permisos["graficos_pie"] = 0
if not ('cbIndicadores' in request.POST and 'graficos_barra' in request.POST and 'graficos_pie' and request.POST):
print "entra if"
permisos["estadisticas"] = 0
else:
print "entra else"
permisos["estadisticas"] = 1
if 'coautoria' in request.POST:
permisos["coautoria"] = 1
else:
permisos["coautoria"] = 0
if 'coautoria_medidas' in request.POST:
permisos["coautoria_medidas"] = 1
else:
permisos["coautoria_medidas"] = 0
if 'clustering' in request.POST:
permisos["clustering"] = 1
else:
permisos["clustering"] = 0
if 'clasificacion_eisc' in request.POST:
permisos["clasificacion_eisc"] = 1
else:
permisos["clasificacion_eisc"] = 0
return render(request, 'configurar_permisos.html', {'proyectos_user': proyectos_list, 'lista_permisos': permisos, 'mproyecto': model_proyecto}, context_instance=RequestContext(request))
# def registrarusuario(request):
# if request.method == 'GET':
# return render(request, "registrarUsuario.html")
# elif request.method == 'POST':
# data = request.POST.get('nombre')
# print data
# # messages.success(request, "Se ha creado exitosamente el usuario")
# # return redirect('login')
# return render (request, "registrarUsuario.html", {"response": data})
# else:
# return render(request, "registrarUsuario.html")
|
VigTech/Vigtech-Services
|
principal/views.py
|
Python
|
lgpl-3.0
| 45,303 | 0.014399 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystone import catalog
from keystone.common import manager
from keystone.tests import unit
class TestCreateLegacyDriver(unit.BaseTestCase):
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_class_is_properly_deprecated(self, mock_reporter):
Driver = manager.create_legacy_driver(catalog.CatalogDriverV8)
# NOTE(dstanek): I want to subvert the requirement for this
# class to implement all of the abstract methods.
Driver.__abstractmethods__ = set()
impl = Driver()
details = {
'as_of': 'Liberty',
'what': 'keystone.catalog.core.Driver',
'in_favor_of': 'keystone.catalog.core.CatalogDriverV8',
'remove_in': mock.ANY,
}
mock_reporter.assert_called_with(mock.ANY, mock.ANY, details)
self.assertEqual('N', mock_reporter.call_args[0][2]['remove_in'][0])
self.assertIsInstance(impl, catalog.CatalogDriverV8)
|
cernops/keystone
|
keystone/tests/unit/common/test_manager.py
|
Python
|
apache-2.0
| 1,531 | 0 |
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.storage import default_storage as default_messages_storage
from django.db import connection
from django.test import RequestFactory
from django.test.utils import CaptureQueriesContext
from django.urls import reverse
from django.utils.dateformat import DateFormat
from unittest import mock
from pyquery import PyQuery as pq
from olympia import amo, core
from olympia.abuse.models import AbuseReport
from olympia.activity.models import ActivityLog
from olympia.addons.models import AddonUser
from olympia.amo.tests import (
addon_factory,
collection_factory,
TestCase,
user_factory,
version_factory,
)
from olympia.api.models import APIKey, APIKeyConfirmation
from olympia.bandwagon.models import Collection
from olympia.ratings.models import Rating
from olympia.reviewers.models import ReviewerScore
from olympia.users.admin import UserAdmin
from olympia.users.models import (
EmailUserRestriction,
IPNetworkUserRestriction,
UserProfile,
UserRestrictionHistory,
)
class TestUserAdmin(TestCase):
def setUp(self):
self.user = user_factory()
self.list_url = reverse('admin:users_userprofile_changelist')
self.detail_url = reverse(
'admin:users_userprofile_change', args=(self.user.pk,)
)
self.delete_url = reverse(
'admin:users_userprofile_delete', args=(self.user.pk,)
)
def test_search_for_multiple_users(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
another_user = user_factory()
response = self.client.get(
self.list_url,
{'q': f'{self.user.pk},{another_user.pk},foobaa'},
follow=True,
)
assert response.status_code == 200
doc = pq(response.content)
assert str(self.user.pk) in doc('#result_list').text()
assert str(another_user.pk) in doc('#result_list').text()
def test_search_for_multiple_user_ids(self):
"""Test the optimization when just searching for matching ids."""
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
another_user = user_factory()
with CaptureQueriesContext(connection) as queries:
response = self.client.get(
self.list_url,
{'q': f'{self.user.pk},{another_user.pk}'},
follow=True,
)
queries_str = '; '.join(q['sql'] for q in queries.captured_queries)
in_sql = f'`users`.`id` IN ({self.user.pk}, {another_user.pk})'
assert in_sql in queries_str
assert len(queries.captured_queries) == 6
assert response.status_code == 200
doc = pq(response.content)
assert str(self.user.pk) in doc('#result_list').text()
assert str(another_user.pk) in doc('#result_list').text()
def test_search_ip_as_int_isnt_considered_an_ip(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
self.user.update(last_login_ip='127.0.0.1')
response = self.client.get(self.list_url, {'q': '2130706433'}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#result_list tbody tr')
assert not doc('.column-_ratings_all__ip_address')
def test_search_for_single_ip(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
user_factory(last_login_ip='127.0.0.1') # Extra user that shouldn't match
self.user.update(email='foo@bar.com', last_login_ip='127.0.0.2') # Will match
response = self.client.get(self.list_url, {'q': '127.0.0.2'}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
# Make sure it's the right user.
assert doc('.field-email').text() == self.user.email
# Make sure last login is now displayed, and has the right value.
assert doc('.field-last_login_ip').text() == '127.0.0.2'
def test_search_for_single_ip_multiple_results_for_different_reasons(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
extra_user = user_factory(
email='extra@bar.com', last_login_ip='127.0.0.1'
) # Extra user that matches but not thanks to their last_login_ip...
UserRestrictionHistory.objects.create(user=extra_user, ip_address='127.0.0.2')
extra_extra_user = user_factory(email='extra_extra@bar.com')
UserRestrictionHistory.objects.create(
user=extra_extra_user, last_login_ip='127.0.0.2'
)
self.user.update(email='foo@bar.com', last_login_ip='127.0.0.2')
response = self.client.get(self.list_url, {'q': '127.0.0.2'}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
# Make sure it's the right users.
assert doc('.field-email').text() == ' '.join(
[
extra_extra_user.email,
extra_user.email,
self.user.email,
]
)
# Make sure last login is now displayed, and has the right values.
assert doc('.field-last_login_ip').text() == '127.0.0.1 127.0.0.1 127.0.0.2'
# Same for the others that match
assert doc('.field-restriction_history__ip_address').text() == '- 127.0.0.2 -'
assert (
doc('.field-restriction_history__last_login_ip').text() == '127.0.0.2 - -'
)
def test_search_for_multiple_ips(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
self.user.update(email='foo@bar.com', last_login_ip='127.0.0.2')
response = self.client.get(
self.list_url, {'q': '127.0.0.2,127.0.0.3'}, follow=True
)
assert response.status_code == 200
doc = pq(response.content)
# Make sure it's the right user.
assert doc('.field-email').text() == self.user.email
# Make sure last login is now displayed, and has the right value.
assert doc('.field-last_login_ip').text() == '127.0.0.2'
def test_search_for_multiple_ips_with_deduplication(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
# Will match once with the last_login
self.user.update(email='foo@bar.com', last_login_ip='127.0.0.2')
# Will match twice: once with the last login, once with the restriction history
# ip_address. Only one result will be shown since the 2 rows would be the same.
extra_user = user_factory(email='extra@bar.com', last_login_ip='127.0.0.2')
UserRestrictionHistory.objects.create(user=extra_user, ip_address='127.0.0.2')
# Will match 4 times: last_login, restriction history (last_login_ip and
# ip_address), ratings ip_address. There will be 2 results shown because of the
# 2 different user restriction history matching.
extra_extra_user = user_factory(
email='extra_extra@bar.com', last_login_ip='127.0.0.3'
)
UserRestrictionHistory.objects.create(
user=extra_extra_user, last_login_ip='127.0.0.2', ip_address='10.0.0.42'
)
UserRestrictionHistory.objects.create(
user=extra_extra_user, ip_address='127.0.0.2', last_login_ip='10.0.0.36'
)
addon = addon_factory()
Rating.objects.create(
user=extra_extra_user,
rating=4,
ip_address='127.0.0.3',
addon=addon,
version=addon.current_version,
)
response = self.client.get(
self.list_url, {'q': '127.0.0.2,127.0.0.3'}, follow=True
)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('#result_list tbody tr')) == 4
# Make sure it's the right users.
assert doc('.field-email').text() == ' '.join(
[
extra_extra_user.email,
extra_extra_user.email,
extra_user.email,
self.user.email,
]
)
def test_search_for_mixed_strings(self):
# IP search is deactivated if the search term don't all look like IPs
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
user_factory(last_login_ip='127.0.0.2')
self.user.update(email='foo@bar.com', last_login_ip='127.0.0.2')
response = self.client.get(self.list_url, {'q': 'blah,127.0.0.2'}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('#result_list tbody tr')) == 0
def test_can_not_edit_without_users_edit_permission(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Addons:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 403
response = self.client.post(
self.detail_url, {'username': 'foo', 'email': self.user.email}, follow=True
)
assert response.status_code == 403
assert self.user.reload().username != 'foo'
def test_can_edit_with_users_edit_permission(self):
old_username = self.user.username
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
response = self.client.post(
self.detail_url, {'username': 'foo', 'email': self.user.email}, follow=True
)
assert response.status_code == 200
assert self.user.reload().username == 'foo'
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_EDITED.id
assert alog.arguments == [self.user]
assert alog.details == {'username': [old_username, 'foo']}
@mock.patch.object(UserProfile, '_delete_related_content')
def test_can_not_delete_with_users_edit_permission(
self, _delete_related_content_mock
):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
assert not user.deleted
self.client.login(email=user.email)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 403
response = self.client.post(self.delete_url, {'post': 'yes'}, follow=True)
assert response.status_code == 403
user.reload()
assert not user.deleted
assert user.email
assert _delete_related_content_mock.call_count == 0
@mock.patch.object(UserProfile, '_delete_related_content')
def test_can_delete_with_admin_advanced_permission(
self, _delete_related_content_mock
):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
assert not self.user.deleted
self.client.login(email=user.email)
core.set_user(user)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 200
assert b'Cannot delete user' not in response.content
response = self.client.post(self.delete_url, {'post': 'yes'}, follow=True)
assert response.status_code == 200
self.user.reload()
assert self.user.deleted
assert self.user.email
assert _delete_related_content_mock.call_count == 1
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_ANONYMIZED.id
assert alog.arguments == [self.user]
def test_can_delete_with_related_objects_with_admin_advanced_permission(self):
# Add related instances...
addon = addon_factory()
addon_with_other_authors = addon_factory()
AddonUser.objects.create(addon=addon_with_other_authors, user=user_factory())
relations_that_should_be_deleted = [
AddonUser.objects.create(addon=addon_with_other_authors, user=self.user),
Rating.objects.create(addon=addon_factory(), rating=5, user=self.user),
addon, # Has no other author, should be deleted.
collection_factory(author=self.user),
]
relations_that_should_survive = [
AbuseReport.objects.create(reporter=self.user),
AbuseReport.objects.create(user=self.user),
ActivityLog.create(user=self.user, action=amo.LOG.USER_EDITED),
ReviewerScore.objects.create(user=self.user, score=42),
addon_with_other_authors, # Has other authors, should be kept.
# Bit of a weird case, but because the user was the only author of
# this add-on, the addonuser relation is kept, and both the add-on
# and the user are soft-deleted. This is in contrast with the case
# where the user is *not* the only author, in which case the
# addonuser relation is deleted, but the add-on is left intact.
AddonUser.objects.create(addon=addon, user=self.user),
]
# Now test as normal.
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
assert not self.user.deleted
self.client.login(email=user.email)
core.set_user(user)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 200
assert b'Cannot delete user' not in response.content
response = self.client.post(self.delete_url, {'post': 'yes'}, follow=True)
assert response.status_code == 200
self.user.reload()
assert self.user.deleted
assert self.user.email
alog = ActivityLog.objects.filter(action=amo.LOG.ADMIN_USER_ANONYMIZED.id).get()
assert alog.arguments == [self.user]
# Test the related instances we created earlier.
for obj in relations_that_should_be_deleted:
assert not obj.__class__.objects.filter(pk=obj.pk).exists()
for obj in relations_that_should_survive:
assert obj.__class__.objects.filter(pk=obj.pk).exists()
def test_get_actions(self):
user_admin = UserAdmin(UserProfile, admin.site)
request = RequestFactory().get('/')
request.user = AnonymousUser()
assert list(user_admin.get_actions(request).keys()) == []
request.user = user_factory()
self.grant_permission(request.user, 'Users:Edit')
assert list(user_admin.get_actions(request).keys()) == [
'ban_action',
'reset_api_key_action',
'reset_session_action',
]
def test_ban_action(self):
another_user = user_factory()
a_third_user = user_factory()
users = UserProfile.objects.filter(pk__in=(another_user.pk, self.user.pk))
user_admin = UserAdmin(UserProfile, admin.site)
request = RequestFactory().get('/')
request.user = user_factory()
core.set_user(request.user)
request._messages = default_messages_storage(request)
user_admin.ban_action(request, users)
# Both users should be banned.
another_user.reload()
self.user.reload()
assert another_user.deleted
assert another_user.email
assert self.user.deleted
assert self.user.email
# The 3rd user should be unaffected.
assert not a_third_user.reload().deleted
# We should see 2 activity logs for banning.
assert (
ActivityLog.objects.filter(action=amo.LOG.ADMIN_USER_BANNED.id).count() == 2
)
def test_ban_button_in_change_view(self):
ban_url = reverse('admin:users_userprofile_ban', args=(self.user.pk,))
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
assert ban_url in response.content.decode('utf-8')
def test_reset_api_key_action(self):
another_user = user_factory()
a_third_user = user_factory()
APIKey.objects.create(user=self.user, is_active=True, key='foo')
APIKeyConfirmation.objects.create(user=self.user)
APIKeyConfirmation.objects.create(user=another_user)
APIKey.objects.create(user=a_third_user, is_active=True, key='bar')
APIKeyConfirmation.objects.create(user=a_third_user)
users = UserProfile.objects.filter(pk__in=(another_user.pk, self.user.pk))
user_admin = UserAdmin(UserProfile, admin.site)
request = RequestFactory().get('/')
request.user = user_factory()
core.set_user(request.user)
request._messages = default_messages_storage(request)
user_admin.reset_api_key_action(request, users)
# APIKeys should have been deactivated, APIKeyConfirmation deleted.
assert self.user.api_keys.exists()
assert not self.user.api_keys.filter(is_active=True).exists()
assert not APIKeyConfirmation.objects.filter(user=self.user).exists()
# This user didn't have api keys before, it shouldn't matter.
assert not another_user.api_keys.exists()
assert not another_user.api_keys.filter(is_active=True).exists()
assert not APIKeyConfirmation.objects.filter(user=another_user).exists()
# The 3rd user should be unaffected.
assert a_third_user.api_keys.exists()
assert a_third_user.api_keys.filter(is_active=True).exists()
assert APIKeyConfirmation.objects.filter(user=a_third_user).exists()
# We should see 2 activity logs.
assert (
ActivityLog.objects.filter(action=amo.LOG.ADMIN_API_KEY_RESET.id).count()
== 2
)
def test_reset_session_action(self):
assert self.user.auth_id
another_user = user_factory()
assert another_user.auth_id
a_third_user = user_factory()
assert a_third_user.auth_id
old_auth_id = a_third_user.auth_id
users = UserProfile.objects.filter(pk__in=(another_user.pk, self.user.pk))
user_admin = UserAdmin(UserProfile, admin.site)
request = RequestFactory().get('/')
request.user = user_factory()
core.set_user(request.user)
request._messages = default_messages_storage(request)
user_admin.reset_session_action(request, users)
self.user.reload()
assert self.user.auth_id is None
another_user.reload()
assert another_user.auth_id is None
a_third_user.reload()
assert a_third_user.auth_id == old_auth_id
def test_reset_api_key_button_in_change_view(self):
reset_api_key_url = reverse(
'admin:users_userprofile_reset_api_key', args=(self.user.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
assert reset_api_key_url in response.content.decode('utf-8')
def test_session_button_in_change_view(self):
reset_session_url = reverse(
'admin:users_userprofile_reset_session', args=(self.user.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
assert reset_session_url in response.content.decode('utf-8')
def test_delete_picture_button_in_change_view(self):
delete_picture_url = reverse(
'admin:users_userprofile_delete_picture', args=(self.user.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
assert delete_picture_url in response.content.decode('utf-8')
def test_ban(self):
ban_url = reverse('admin:users_userprofile_ban', args=(self.user.pk,))
wrong_ban_url = reverse(
'admin:users_userprofile_ban', args=(self.user.pk + 42,)
)
user = user_factory(email='someone@mozilla.com')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.post(ban_url, follow=True)
assert response.status_code == 403
self.grant_permission(user, 'Users:Edit')
response = self.client.get(ban_url, follow=True)
assert response.status_code == 405 # Wrong http method.
response = self.client.post(wrong_ban_url, follow=True)
assert response.status_code == 404 # Wrong pk.
self.user.reload()
assert not self.user.deleted
response = self.client.post(ban_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1][0].endswith(self.detail_url)
assert response.redirect_chain[-1][1] == 302
self.user.reload()
assert self.user.deleted
assert self.user.email
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_BANNED.id
assert alog.arguments == [self.user]
def test_reset_api_key(self):
APIKey.objects.create(user=self.user, is_active=True, key='foo')
APIKeyConfirmation.objects.create(user=self.user)
reset_api_key_url = reverse(
'admin:users_userprofile_reset_api_key', args=(self.user.pk,)
)
wrong_reset_api_key_url = reverse(
'admin:users_userprofile_reset_api_key', args=(self.user.pk + 9,)
)
user = user_factory(email='someone@mozilla.com')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.post(reset_api_key_url, follow=True)
assert response.status_code == 403
self.grant_permission(user, 'Users:Edit')
response = self.client.get(reset_api_key_url, follow=True)
assert response.status_code == 405 # Wrong http method.
response = self.client.post(wrong_reset_api_key_url, follow=True)
assert response.status_code == 404 # Wrong pk.
assert self.user.api_keys.filter(is_active=True).exists()
assert APIKeyConfirmation.objects.filter(user=self.user).exists()
response = self.client.post(reset_api_key_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1][0].endswith(self.detail_url)
assert response.redirect_chain[-1][1] == 302
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_API_KEY_RESET.id
assert alog.arguments == [self.user]
# APIKeys should have been deactivated, APIKeyConfirmation deleted.
assert self.user.api_keys.exists()
assert not self.user.api_keys.filter(is_active=True).exists()
assert not APIKeyConfirmation.objects.filter(user=self.user).exists()
def test_reset_session(self):
assert self.user.auth_id
reset_session_url = reverse(
'admin:users_userprofile_reset_session', args=(self.user.pk,)
)
wrong_reset_session_url = reverse(
'admin:users_userprofile_reset_session', args=(self.user.pk + 9,)
)
user = user_factory(email='someone@mozilla.com')
self.client.login(email=user.email)
response = self.client.post(reset_session_url, follow=True)
assert response.status_code == 403
self.grant_permission(user, 'Users:Edit')
response = self.client.get(reset_session_url, follow=True)
assert response.status_code == 405 # Wrong http method.
response = self.client.post(wrong_reset_session_url, follow=True)
assert response.status_code == 404 # Wrong pk.
response = self.client.post(reset_session_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1][0].endswith(self.detail_url)
assert response.redirect_chain[-1][1] == 302
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_SESSION_RESET.id
assert alog.arguments == [self.user]
self.user.reload()
assert not self.user.auth_id
@mock.patch.object(UserProfile, 'delete_picture')
def test_delete_picture(self, delete_picture_mock):
delete_picture_url = reverse(
'admin:users_userprofile_delete_picture', args=(self.user.pk,)
)
wrong_delete_picture_url = reverse(
'admin:users_userprofile_delete_picture', args=(self.user.pk + 42,)
)
user = user_factory(email='someone@mozilla.com')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.post(delete_picture_url, follow=True)
assert response.status_code == 403
self.grant_permission(user, 'Users:Edit')
response = self.client.get(delete_picture_url, follow=True)
assert response.status_code == 405 # Wrong http method.
response = self.client.post(wrong_delete_picture_url, follow=True)
assert response.status_code == 404 # Wrong pk.
assert delete_picture_mock.call_count == 0
response = self.client.post(delete_picture_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1][0].endswith(self.detail_url)
assert response.redirect_chain[-1][1] == 302
assert delete_picture_mock.call_count == 1
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_PICTURE_DELETED.id
assert alog.arguments == [self.user]
def test_picture_img(self):
model_admin = UserAdmin(UserProfile, admin.site)
assert self.user.picture_url.endswith('anon_user.png')
assert (
model_admin.picture_img(self.user)
== '<img src="%s" />' % self.user.picture_url
)
self.user.update(picture_type='image/png')
assert (
model_admin.picture_img(self.user)
== '<img src="%s" />' % self.user.picture_url
)
def test_known_ip_adresses(self):
self.user.update(last_login_ip='127.1.2.3')
Rating.objects.create(
addon=addon_factory(), user=self.user, ip_address='127.1.2.3'
)
dummy_addon = addon_factory()
Rating.objects.create(
addon=dummy_addon,
version=dummy_addon.current_version,
user=self.user,
ip_address='128.1.2.3',
)
Rating.objects.create(
addon=dummy_addon,
version=version_factory(addon=dummy_addon),
user=self.user,
ip_address='129.1.2.4',
)
Rating.objects.create(
addon=addon_factory(), user=self.user, ip_address='130.1.2.4'
)
Rating.objects.create(
addon=addon_factory(), user=self.user, ip_address='130.1.2.4'
)
Rating.objects.create(
addon=dummy_addon, user=user_factory(), ip_address='255.255.0.0'
)
with core.override_remote_addr('15.16.23.42'):
ActivityLog.create(amo.LOG.ADD_VERSION, dummy_addon, user=self.user)
UserRestrictionHistory.objects.create(user=self.user, last_login_ip='4.8.15.16')
UserRestrictionHistory.objects.create(user=self.user, ip_address='172.0.0.2')
model_admin = UserAdmin(UserProfile, admin.site)
doc = pq(model_admin.known_ip_adresses(self.user))
result = doc('ul li').text().split()
assert len(result) == 7
assert set(result) == {
'130.1.2.4',
'128.1.2.3',
'129.1.2.4',
'127.1.2.3',
'15.16.23.42',
'172.0.0.2',
'4.8.15.16',
}
# Duplicates are ignored
Rating.objects.create(
addon=dummy_addon,
version=version_factory(addon=dummy_addon),
user=self.user,
ip_address='127.1.2.3',
)
with core.override_remote_addr('172.0.0.2'):
ActivityLog.create(amo.LOG.ADD_VERSION, dummy_addon, user=self.user)
UserRestrictionHistory.objects.create(
user=self.user, last_login_ip='15.16.23.42'
)
UserRestrictionHistory.objects.create(user=self.user, ip_address='4.8.15.16')
doc = pq(model_admin.known_ip_adresses(self.user))
result = doc('ul li').text().split()
assert len(result) == 7
assert set(result) == {
'130.1.2.4',
'128.1.2.3',
'129.1.2.4',
'127.1.2.3',
'15.16.23.42',
'172.0.0.2',
'4.8.15.16',
}
def test_last_known_activity_time(self):
someone_else = user_factory(username='someone_else')
addon = addon_factory()
model_admin = UserAdmin(UserProfile, admin.site)
assert str(model_admin.last_known_activity_time(self.user)) == ''
# Add various activities. They will be attached to whatever user is
# set in the thread global at the time, so set that in advance.
core.set_user(self.user)
expected_date = self.days_ago(1)
activity = ActivityLog.create(amo.LOG.CREATE_ADDON, addon)
activity.update(created=self.days_ago(2))
activity = ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
activity.update(created=expected_date)
assert activity.reload().created == expected_date
# Create another activity, more recent, attached to a different user.
core.set_user(someone_else)
activity = ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
expected_result = DateFormat(expected_date).format(settings.DATETIME_FORMAT)
assert str(model_admin.last_known_activity_time(self.user)) == expected_result
def _call_related_content_method(self, method):
model_admin = UserAdmin(UserProfile, admin.site)
result = getattr(model_admin, method)(self.user)
link = pq(result)('a')[0]
return link.attrib['href'], link.text
def test_collections_authorship(self):
Collection.objects.create()
Collection.objects.create(author=self.user)
Collection.objects.create(author=self.user, listed=False)
url, text = self._call_related_content_method('collections_authorship')
expected_url = (
reverse('admin:bandwagon_collection_changelist')
+ '?author=%d' % self.user.pk
)
assert url == expected_url
assert text == '2'
def test_addons_authorship(self):
addon_factory()
another_user = user_factory()
addon_factory(users=[self.user, another_user])
addon_factory(users=[self.user], status=amo.STATUS_NOMINATED)
addon_factory(users=[self.user], status=amo.STATUS_DELETED)
addon_factory(
users=[self.user], version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED}
)
# This next add-on shouldn't be counted.
addon_where_user_has_deleted_role = addon_factory(
users=[self.user, another_user]
)
addon_where_user_has_deleted_role.addonuser_set.filter(user=self.user).update(
role=amo.AUTHOR_ROLE_DELETED
)
url, text = self._call_related_content_method('addons_authorship')
expected_url = (
reverse('admin:addons_addon_changelist') + '?authors=%d' % self.user.pk
)
assert url == expected_url
assert text == '4 (active role), 1 (deleted role)'
def test_ratings_authorship(self):
Rating.objects.create(addon=addon_factory(), user=self.user)
dummy_addon = addon_factory()
Rating.objects.create(
addon=dummy_addon, version=dummy_addon.current_version, user=self.user
)
Rating.objects.create(
addon=dummy_addon,
version=version_factory(addon=dummy_addon),
user=self.user,
)
Rating.objects.create(
addon=dummy_addon, user=user_factory(), ip_address='255.255.0.0'
)
url, text = self._call_related_content_method('ratings_authorship')
expected_url = (
reverse('admin:ratings_rating_changelist') + '?user=%d' % self.user.pk
)
assert url == expected_url
assert text == '3'
def test_activity(self):
addon = addon_factory()
core.set_user(self.user)
ActivityLog.create(amo.LOG.CREATE_ADDON, addon)
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
# Create another activity attached to a different user.
someone_else = user_factory()
core.set_user(someone_else)
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
url, text = self._call_related_content_method('activity')
expected_url = (
reverse('admin:activity_activitylog_changelist') + '?user=%d' % self.user.pk
)
assert url == expected_url
assert text == '2'
def test_abuse_reports_by_this_user(self):
addon = addon_factory()
AbuseReport.objects.create(user=self.user)
AbuseReport.objects.create(user=self.user)
AbuseReport.objects.create(addon=addon)
AbuseReport.objects.create(addon=addon, reporter=self.user)
AbuseReport.objects.create(user=user_factory(), reporter=self.user)
url, text = self._call_related_content_method('abuse_reports_by_this_user')
expected_url = (
reverse('admin:abuse_abusereport_changelist')
+ '?reporter=%d' % self.user.pk
)
assert url == expected_url
assert text == '2'
def test_abuse_reports_for_this_user(self):
other_user = user_factory()
addon = addon_factory()
AbuseReport.objects.create(user=self.user)
AbuseReport.objects.create(user=other_user)
AbuseReport.objects.create(user=other_user, reporter=self.user)
AbuseReport.objects.create(addon=addon, reporter=self.user)
AbuseReport.objects.create(user=self.user, reporter=user_factory())
url, text = self._call_related_content_method('abuse_reports_for_this_user')
expected_url = (
reverse('admin:abuse_abusereport_changelist') + '?user=%d' % self.user.pk
)
assert url == expected_url
assert text == '2'
def test_user_restriction_history(self):
other_user = user_factory()
UserRestrictionHistory.objects.create(user=self.user)
UserRestrictionHistory.objects.create(user=self.user)
UserRestrictionHistory.objects.create(user=other_user)
url, text = self._call_related_content_method(
'restriction_history_for_this_user'
)
expected_url = (
reverse('admin:users_userrestrictionhistory_changelist')
+ '?user=%d' % self.user.pk
)
assert url == expected_url
assert text == '2'
def test_access_using_email(self):
lookup_user = user_factory(email='foo@bar.xyz')
detail_url_by_email = reverse(
'admin:users_userprofile_change', args=(lookup_user.email,)
)
detail_url_final = reverse(
'admin:users_userprofile_change', args=(lookup_user.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Addons:Edit')
self.client.login(email=user.email)
response = self.client.get(detail_url_by_email, follow=False)
self.assert3xx(response, detail_url_final, 301)
class TestEmailUserRestrictionAdmin(TestCase):
def setUp(self):
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:Advanced')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:users_emailuserrestriction_changelist')
def test_list(self):
EmailUserRestriction.objects.create(email_pattern='*@*foo.com')
response = self.client.get(self.list_url)
assert response.status_code == 200
class TestIPNetworkUserRestrictionAdmin(TestCase):
def setUp(self):
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:Advanced')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:users_ipnetworkuserrestriction_changelist')
def test_list(self):
IPNetworkUserRestriction.objects.create(network='192.168.0.0/24')
response = self.client.get(self.list_url)
assert response.status_code == 200
class TestUserRestrictionHistoryAdmin(TestCase):
def setUp(self):
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:Advanced')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:users_userrestrictionhistory_changelist')
def test_list(self):
other_user = user_factory()
UserRestrictionHistory.objects.create(user=self.user)
UserRestrictionHistory.objects.create(user=other_user)
response = self.client.get(self.list_url)
assert response.status_code == 200
content = response.content.decode('utf-8')
assert str(self.user) in content
assert str(other_user) in content
response = self.client.get(self.list_url + '?user=%s' % self.user.pk)
assert response.status_code == 200
content = response.content.decode('utf-8')
assert str(self.user) in content
assert str(other_user) not in content
|
mozilla/addons-server
|
src/olympia/users/tests/test_admin.py
|
Python
|
bsd-3-clause
| 38,889 | 0.001363 |
from intprim.bayesian_interaction_primitives import *
import intprim.basis
import intprim.constants
import intprim.examples
import intprim.filter
import intprim.filter.align
import intprim.filter.spatiotemporal
import intprim.util
|
ir-lab/intprim
|
intprim/__init__.py
|
Python
|
mit
| 231 | 0 |
# coding: utf-8
import random
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import sys
import os
# how many pictures to generate
num = 10
if len(sys.argv) > 1:
num = int(sys.argv[1])
def genline(text, font, filename):
'''
generate one line
'''
w, h = font.getsize(text)
image = Image.new('RGB', (w + 15, h + 15), 'white')
brush = ImageDraw.Draw(image)
brush.text((8, 5), text, font=font, fill=(0, 0, 0))
image.save(filename + '.jpg')
with open(filename + '.txt', 'w') as f:
f.write(text)
f.close()
if __name__ == '__main__':
if not os.path.isdir('./lines/'):
os.mkdir('./lines/')
for i in range(num):
fontname = './fonts/simkai.ttf'
fontsize = 24
font = ImageFont.truetype(fontname, fontsize)
text = str(random.randint(1000000000, 9999999999))
text = text + str(random.randint(1000000000, 9999999999))
#text = str(random.randint(1000, 9999))
filename = './lines/' + str(i + 1)
genline(text, font, filename)
pass
|
Halfish/lstm-ctc-ocr
|
1_generateImage.py
|
Python
|
apache-2.0
| 1,086 | 0.003683 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 20 23:53:19 2017
@author: chosenone
Train CNN for Text Classification
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_util
from CNN4Text import CNN4Text
from tensorflow.contrib import learn
#==============================================================================
# parameters
#==============================================================================
#==============================================================================
# data parameters
#==============================================================================
tf.flags.DEFINE_float("validation_set_percentage",0.1,
"the percentage of training examples that will be used for validation set")
tf.flags.DEFINE_string("data_postive_path","./data/rt-polaritydata/rt-polarity.pos",
"file path for postive data")
tf.flags.DEFINE_string("data_negative_path","./data/rt-polaritydata/rt-polarity.neg",
"file path for negative data")
#==============================================================================
# model hyperparameters
#==============================================================================
tf.flags.DEFINE_float("learning_rate",0.001,"learning rate(default 0.001)")
tf.flags.DEFINE_integer("embedding_size",128,"the size of word embeeding (default 128)")
tf.flags.DEFINE_integer("num_filters",128,"the number of filters for each filter size(default 128)")
tf.flags.DEFINE_string("filter_sizes","3,4,5","comma-separated filter sizes(default 3,4,5)")
tf.flags.DEFINE_float("keep_prob",0.5,"the probability used for dropout(default 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda",0.0,"the l2 regularization lambda(default 0)")
#==============================================================================
# train parameters
#==============================================================================
tf.flags.DEFINE_integer("batch_size",64,"Batch size (default size 64)")
tf.flags.DEFINE_integer("num_epochs",200,"Epoch sizes(default size 200)")
tf.flags.DEFINE_integer("evaluate_interval",100,"Evaluate model interval(default 100)")
tf.flags.DEFINE_integer("checkpoint_interval",100,"Save Checkpoint Interval(default 100)")
tf.flags.DEFINE_integer("num_checkpoints",5,"number of checkpoints to save(default 5)")
#==============================================================================
# misc parameters
#==============================================================================
tf.flags.DEFINE_bool("allow_soft_parameters",True,"allow soft device placement(default true)")
tf.flags.DEFINE_bool("log_device_placement",False,"Log placement of ops on devices(default false)")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr,value in sorted(FLAGS.__flags.items()):
print("%s:%s" % (attr.upper(),value))
print("\n")
#==============================================================================
# Data Preparation
#==============================================================================
# Load data
print("Loading Data...\n")
x_data,y = data_util.load_data_and_labels(FLAGS.data_postive_path,FLAGS.data_negative_path)
# construct vocabulary
max_sentence_length = max([len(sent.split(" ")) for sent in x_data])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_sentence_length)
x = np.array(list(vocab_processor.fit_transform(x_data)))
print(max_sentence_length)
# shuffle data
np.random.seed(10)
shuffled_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffled_indices]
y_shuffled = y[shuffled_indices]
# split train-test set
# have a try with k-fold cross-validation later.
validation_set_index = -1 * int(FLAGS.validation_set_percentage * float(len(y)))
x_train,x_val = x_shuffled[:validation_set_index],x_shuffled[validation_set_index:]
y_train,y_val = y_shuffled[:validation_set_index],y_shuffled[validation_set_index:]
print("Vocabulary Size: %s" % len(vocab_processor.vocabulary_._mapping))
print("Length of train/validation set: %d , %d ." % (len(y_train),len(y_val)))
#==============================================================================
# Training
#==============================================================================
with tf.Graph().as_default():
session_config = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_parameters,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_config)
with sess.as_default():
cnn = CNN4Text(sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_size,
filter_sizes=list(map(int,FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# the detail of train procedure
global_step = tf.Variable(0,name="global_step",trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn._loss)
train_op = optimizer.apply_gradients(grads_and_vars,global_step=global_step)
grad_summaries = []
for g,v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("%s/grad/hist" % v.name,g)
sparsity_summary = tf.summary.scalar("%s/grad/hist" % v.name,tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summayies_merged = tf.summary.merge(grad_summaries)
# output path of summary
timestamp = str(int(time.time()))
output_path = os.path.abspath(os.path.join(os.path.curdir,"runs",timestamp))
print("Writing into Output Path: %s ..." % output_path)
# summary for loss and accuracy
loss_summary = tf.summary.scalar("loss",cnn._loss)
acc_summary = tf.summary.scalar("accuracy",cnn._accuracy)
# train summaries
train_summary_op = tf.summary.merge([loss_summary,acc_summary,grad_summayies_merged])
train_summary_path = os.path.join(output_path,"summary","train")
train_summary_writer = tf.summary.FileWriter(train_summary_path,sess.graph)
#validation summaries
validation_summary_op = tf.summary.merge([loss_summary,acc_summary])
validation_summary_path = os.path.join(output_path,"summary","validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_path,sess.graph)
checkpoint_path = os.path.abspath(os.path.join(output_path,"checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_path,"model")
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
saver = tf.train.Saver(tf.global_variables(),max_to_keep=FLAGS.num_checkpoints)
# save vocabulary
vocab_processor.save(os.path.join(output_path,"vocab"))
sess.run(tf.global_variables_initializer())
# a single training step
def train_step(x_batch,y_batch,writer=None):
'''
a single training step
'''
feed_dict = {cnn._input_x:x_batch,
cnn._input_y:y_batch,
cnn._keep_prob:FLAGS.keep_prob}
_,step,summaries,loss,accuracy = sess.run([train_op,global_step,train_summary_op
,cnn._loss,cnn._accuracy],feed_dict)
time_str = datetime.datetime.now().isoformat()
print("%s: Step: %d,Loss: %.4f,Accuracy: %.4f" % (time_str,step,loss,accuracy))
if writer:
writer.add_summary(summaries,step)
# a single validation step
def validation_step(x_batch,y_batch,writer=None):
'''
a single training step
'''
feed_dict = {cnn._input_x:x_batch,
cnn._input_y:y_batch,
cnn._keep_prob:1.0} # for evaluation
step,summaries,loss,accuracy = sess.run([global_step,validation_summary_op
,cnn._loss,cnn._accuracy],feed_dict)
time_str = datetime.datetime.now().isoformat()
print("%s: Step: %d,Loss: %.4f,Accuracy: %.4f" % (time_str,step,loss,accuracy))
if writer:
writer.add_summary(summaries,step)
# generates batches
batches = data_util.batch_iter(list(zip(x_train,y_train)),FLAGS.batch_size,FLAGS.num_epochs)
#Training Loop
for batch in batches:
x_batch,y_batch = zip(*batch)
train_step(x_batch,y_batch,writer=train_summary_writer)
current_step = tf.train.global_step(sess,global_step)
if current_step % FLAGS.evaluate_interval == 0:
print("Evaluation:\n")
validation_step(x_batch,y_batch,writer=validation_summary_writer)
print("")
if current_step % FLAGS.checkpoint_interval == 0:
path = saver.save(sess,checkpoint_prefix,global_step=current_step)
print("Saved the model checkpoint to %s " % path)
|
chosenone75/Neural-Networks
|
tf/CNN-Sentence-Classification/train_CNN4Text.py
|
Python
|
gpl-3.0
| 10,181 | 0.021904 |
"""
CMSIS-DAP Interface Firmware
Copyright (c) 2009-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Extract and patch the interface without bootloader
"""
from options import get_options
from paths import get_interface_path, TMP_DIR
from utils import gen_binary, is_lpc, split_path
from os.path import join
if __name__ == '__main__':
options = get_options()
in_path = get_interface_path(options.interface, options.target, bootloader=False)
_, name, _ = split_path(in_path)
out_path = join(TMP_DIR, name + '.bin')
print '\nELF: %s' % in_path
gen_binary(in_path, out_path, is_lpc(options.interface))
print "\nBINARY: %s" % out_path
|
flyhung/CMSIS-DAP
|
tools/get_binary.py
|
Python
|
apache-2.0
| 1,166 | 0.002573 |
#Ensure there is an exceptional edge from the following case
def f2():
b, d = Base, Derived
try:
class MyNewClass(b, d):
pass
except:
e2
def f3():
sequence_of_four = a_global
try:
a, b, c = sequence_of_four
except:
e3
#Always treat locals as non-raising to keep DB size down.
def f4():
if cond:
local = 1
try:
local
except:
e4
def f5():
try:
a_global
except:
e5
def f6():
local = a_global
try:
local()
except:
e6
#Literals can't raise
def f7():
try:
4
except:
e7
def f8():
try:
a + b
except:
e8
#OK assignments
def f9():
try:
a, b = 1, 2
except:
e9
def fa():
seq = a_global
try:
a = seq
except:
ea
def fb():
a, b, c = a_global
try:
seq = a, b, c
except:
eb
#Ensure that a.b and c[d] can raise
def fc():
a, b = a_global
try:
return a[b]
except:
ec
def fd():
a = a_global
try:
return a.b
except:
ed
def fe():
try:
call()
except:
ee
else:
ef
|
github/codeql
|
python/ql/test/library-tests/ControlFlow/except/test.py
|
Python
|
mit
| 1,251 | 0.02558 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class nspbr6_args :
""" Provides additional arguments required for fetching the nspbr6 resource.
"""
def __init__(self) :
self._detail = False
@property
def detail(self) :
"""To get a detailed view.
"""
try :
return self._detail
except Exception as e:
raise e
@detail.setter
def detail(self, detail) :
"""To get a detailed view.
"""
try :
self._detail = detail
except Exception as e:
raise e
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/ns/nspbr6_args.py
|
Python
|
apache-2.0
| 1,047 | 0.025788 |
import pytest
from plumbum.colorlib.styles import ANSIStyle, Color, AttributeNotFound, ColorNotFound
from plumbum.colorlib.names import color_html, FindNearest
class TestNearestColor:
def test_exact(self):
assert FindNearest(0,0,0).all_fast() == 0
for n,color in enumerate(color_html):
# Ignoring duplicates
if n not in (16, 21, 46, 51, 196, 201, 226, 231, 244):
rgb = (int(color[1:3],16), int(color[3:5],16), int(color[5:7],16))
assert FindNearest(*rgb).all_fast() == n
def test_nearby(self):
assert FindNearest(1,2,2).all_fast() == 0
assert FindNearest(7,7,9).all_fast() == 232
def test_simplecolor(self):
assert FindNearest(1,2,4).only_basic() == 0
assert FindNearest(0,255,0).only_basic() == 2
assert FindNearest(100,100,0).only_basic() == 3
assert FindNearest(140,140,140).only_basic() == 7
class TestColorLoad:
def test_rgb(self):
blue = Color(0,0,255) # Red, Green, Blue
assert blue.rgb == (0,0,255)
def test_simple_name(self):
green = Color.from_simple('green')
assert green.number == 2
def test_different_names(self):
assert Color('Dark Blue') == Color('Dark_Blue')
assert Color('Dark_blue') == Color('Dark_Blue')
assert Color('DARKBLUE') == Color('Dark_Blue')
assert Color('DarkBlue') == Color('Dark_Blue')
assert Color('Dark Green') == Color('Dark_Green')
def test_loading_methods(self):
assert Color("Yellow") == Color.from_full("Yellow")
assert (Color.from_full("yellow").representation !=
Color.from_simple("yellow").representation)
class TestANSIColor:
@classmethod
def setup_class(cls):
ANSIStyle.use_color = True
def test_ansi(self):
assert str(ANSIStyle(fgcolor=Color('reset'))) == '\033[39m'
assert str(ANSIStyle(fgcolor=Color.from_full('green'))) == '\033[38;5;2m'
assert str(ANSIStyle(fgcolor=Color.from_simple('red'))) == '\033[31m'
class TestNearestColor:
def test_allcolors(self):
myrange = (0,1,2,5,17,39,48,73,82,140,193,210,240,244,250,254,255)
for r in myrange:
for g in myrange:
for b in myrange:
near = FindNearest(r,g,b)
assert near.all_slow() == near.all_fast(), 'Tested: {0}, {1}, {2}'.format(r,g,b)
|
vodik/plumbum
|
tests/test_color.py
|
Python
|
mit
| 2,444 | 0.020458 |
#!/usr/bin/env python
# vim: tw=80 ts=4 sw=4 noet
from os.path import join, basename, dirname, abspath
import _import
from wwwclient import browse, scrape
HTML = scrape.HTML
s = browse.Session("http://www.google.com")
f = s.form().fill(q="python web scraping")
s.submit(f, action="btnG", method="GET")
tree = scrape.HTML.tree(s.page())
nodes = tree.cut(below=3)
nodes = nodes.filter(accept=lambda n:n.name.lower() in ("table","p"))
for node in nodes.children:
print HTML.text(node)
if node.name == "p":
link = node.find(withName="a")[0]
print "-->", link.attribute("href")
print HTML.links(link)
else:
print "---------"
# Google results are not properly closed, so we had to identify patterns where
# there were a closing tag should be inserted
# close_on = ("td", "a", "img", "br", "a")
# scrape.do(scrape.HTML.iterate, session.last().data(), closeOn=close_on, write=sys.stdout)
# EOF
|
sebastien/wwwclient
|
tests/site-google.py
|
Python
|
lgpl-3.0
| 902 | 0.012195 |
#! Tests out the CG solver with CPHF Polarizabilities
import time
import numpy as np
import psi4
psi4.set_output_file("output.dat")
# Benzene
mol = psi4.geometry("""
0 1
O 0.000000000000 0.000000000000 -0.075791843589
H 0.000000000000 -0.866811828967 0.601435779270
H 0.000000000000 0.866811828967 0.601435779270
symmetry c1
""")
psi4.set_options({"basis": "aug-cc-pVDZ",
"scf_type": "df",
"e_convergence": 1e-8,
"save_jk": True,
})
scf_e, scf_wfn = psi4.energy("SCF", return_wfn=True)
# Orbitals
Co = scf_wfn.Ca_subset("AO", "OCC")
Cv = scf_wfn.Ca_subset("AO", "VIR")
# Mints object
mints = psi4.core.MintsHelper(scf_wfn.basisset())
# RHS Dipoles
dipoles_xyz = []
for dip in mints.ao_dipole():
Fia = psi4.core.triplet(Co, dip, Cv, True, False, False)
Fia.scale(-2.0)
dipoles_xyz.append(Fia)
# Build up the preconditioner
precon = psi4.core.Matrix(Co.shape[1], Cv.shape[1])
occ = np.array(scf_wfn.epsilon_a_subset("AO", "OCC"))
vir = np.array(scf_wfn.epsilon_a_subset("AO", "VIR"))
precon.np[:] = (-occ.reshape(-1, 1) + vir)
# Build a preconditioner function
def precon_func(matrices, active_mask):
ret = []
for act, mat in zip(active_mask, matrices):
if act:
p = mat.clone()
p.apply_denominator(precon)
ret.append(p)
else:
ret.append(False)
return ret
def wrap_Hx(matrices, active_mask):
x_vec = [mat for act, mat in zip(active_mask, matrices) if act]
Hx_vec = scf_wfn.cphf_Hx(x_vec)
ret = []
cnt = 0
for act, mat in zip(active_mask, matrices):
if act:
ret.append(Hx_vec[cnt])
cnt += 1
else:
ret.append(False)
return ret
# Solve
ret, resid = psi4.p4util.solvers.cg_solver(dipoles_xyz, wrap_Hx, precon_func, rcond=1.e-6)
polar = np.empty((3, 3))
for numx in range(3):
for numf in range(3):
polar[numx, numf] = -1 * ret[numx].vector_dot(dipoles_xyz[numf])
psi4.core.print_out("\n " + "CPHF Dipole Polarizability:".center(44) + "\n")
tops = ("X", "Y", "Z")
psi4.core.print_out(" %12s %12s %12s\n" % tops)
for n, p in enumerate(tops):
psi4.core.print_out(" %3s %12.4f %12.4f %12.4f\n" % (p, polar[n][0], polar[n][1], polar[n][2]))
psi4.core.print_out("\n")
psi4.compare_values(8.01554, polar[0][0], 3, 'Dipole XX Polarizability') # TEST
psi4.compare_values(12.50363, polar[1][1], 3, 'Dipole YY Polarizability') # TEST
psi4.compare_values(10.04161, polar[2][2], 3, 'Dipole ZZ Polarizability') # TEST
|
psi4/psi4
|
tests/psi4numpy/cphf/input.py
|
Python
|
lgpl-3.0
| 2,679 | 0.005972 |
from bs4 import BeautifulSoup
class RunParameter_xml:
'''
A class for reading runparameters xml file from Illumina sequencing runs
:param xml_file: A runparameters xml file
'''
def __init__(self, xml_file):
self.xml_file = xml_file
self._read_xml()
def _read_xml(self):
'''
Internal function for reading the xml file using BS4
'''
try:
xml_file = self.xml_file
with open(xml_file, 'r') as fp:
soup = BeautifulSoup(fp, "html5lib")
self._soup = soup
except Exception as e:
raise ValueError(
'Failed to parse xml file {0}, error {1}'.\
format(self.xml_file, e))
def get_nova_workflow_type(self):
try:
soup = self._soup
workflowtype = None
if soup.workflowtype:
workflowtype = \
soup.workflowtype.contents[0]
return workflowtype
except Exception as e:
raise ValueError('Failed to get NovaSeq workflow type')
def get_novaseq_flowcell(self):
try:
soup = self._soup
flowcell_id = None
workflowtype = self.get_nova_workflow_type()
if workflowtype is None or \
workflowtype != 'NovaSeqXp':
raise ValueError(
'Missing NovaSeq workflow type: {0}'.\
format(workflowtype))
if soup.rfidsinfo and \
soup.rfidsinfo.flowcellserialbarcode:
flowcell_id = \
soup.rfidsinfo.flowcellmode.contents[0]
if flowcell_id is None:
raise ValueError(
'Missing NovaSeq flowcell id, file: {0}'.\
format(self.xml_file))
except Exception as e:
raise ValueError(
'Failed to get NovaSeq flowcell id, error: {0}'.format(e))
def get_novaseq_flowcell_mode(self):
try:
soup = self._soup
flowcell_mode = None
workflowtype = self.get_nova_workflow_type()
if workflowtype is None or \
workflowtype != 'NovaSeqXp':
raise ValueError(
'Missing NovaSeq workflow type: {0}'.\
format(workflowtype))
if soup.rfidsinfo and \
soup.rfidsinfo.flowcellmode:
flowcell_mode = \
soup.rfidsinfo.flowcellmode.contents[0]
if flowcell_mode is None:
raise ValueError(
'Missing NovaSeq flowcell mode, file: {0}'.\
format(self.xml_file))
except Exception as e:
raise ValueError(
'Failed to get NovaSeq flowcell mode, error: {0}'.format(e))
def get_hiseq_flowcell(self):
'''
A method for fetching flowcell details for hiseq run
:returns: Flowcell info or None (for MiSeq, NextSeq or NovaSeq runs)
'''
try:
soup = self._soup
if soup.flowcell:
flowcell = soup.flowcell.contents[0]
else:
flowcell = None
return flowcell
except Exception as e:
raise ValueError(
'Failed to get flowcell for hiseq, error: {0}'.\
format(e))
|
imperial-genomics-facility/data-management-python
|
igf_data/illumina/runparameters_xml.py
|
Python
|
apache-2.0
| 3,007 | 0.018291 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
from openerp.exceptions import ValidationError
class AccountOwnCheck(models.Model):
_inherit = 'account.own.check'
@api.multi
def cancel_check(self):
""" Lo que deberia pasar con el cheque cuando se cancela """
if any(check.state != 'draft' for check in self):
raise ValidationError("Solo se puede cancelar un cheque en estado borrador")
self.next_state('draft_canceled')
@api.multi
def revert_canceled_check(self):
""" Lo que deberia pasar con el cheque cuando se revierte una cancelacion """
if any(check.state != 'canceled' for check in self):
raise ValidationError("Funcionalidad unica para cheques cancelados")
self.cancel_state('canceled')
@api.multi
def reject_check(self):
""" Lo que deberia pasar con el cheque cuando se rechaza """
if any(check.state != 'handed' for check in self):
raise ValidationError("No se puede rechazar un cheque que no esta entregado")
self.next_state('handed')
@api.multi
def revert_reject(self):
""" Lo que deberia pasar con el cheque cuando se revierte un rechazo """
if any(check.state != 'rejected' for check in self):
raise ValidationError("Funcionalidad unica para cheques rechazados")
self.cancel_state('rejected')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
odoo-arg/odoo_l10n_ar
|
l10n_ar_reject_checks/models/account_own_check.py
|
Python
|
agpl-3.0
| 2,297 | 0.002612 |
import datetime
import tweepy
from geopy.geocoders import Nominatim
import json
from secret import *
import boto3
import re
import preprocessor as p
import time
p.set_options(p.OPT.URL, p.OPT.EMOJI)
# Get the service resource.
dynamodb = boto3.resource('dynamodb', region_name='us-west-2')
table = dynamodb.Table('fuck')
geolocator = Nominatim()
epoch = datetime.datetime.utcfromtimestamp(0)
with open('zip2fips.json') as data_file:
zip2fips = json.load(data_file)
def get_fips(coords):
location = geolocator.reverse('{:f}, {:f}'.format(coords[0], coords[1]))
zipcode = None
fips = None
if 'address' in location.raw:
if 'country_code' in location.raw['address']:
if location.raw['address']['country_code'] == 'us':
if 'postcode' in location.raw['address']:
zipcode = location.raw['address']['postcode']
else:
print("postcode not in location address")
try:
fips = zip2fips[location.raw['address']['postcode']]
except IndexError:
print("FIPS could not be found")
return fips, zipcode
else:
print("Not in the US")
else:
print("No Country code is in the address")
else:
print("No address")
class TwitterStreamListener(tweepy.StreamListener):
def on_status(self, status):
try:
if status.geo != None:
fips, zipcode = get_fips(status.geo['coordinates'])
if fips is None:
print("Fips is None")
raise Exception
if zipcode is None:
print("Zipcode is None")
raise Exception
txt = re.sub('[!@#$]', '', status.text)
txt = p.clean(txt)
try:
table.update_item(
Key={
'fips': int(fips)
},
UpdateExpression='ADD tweet :val1',
ExpressionAttributeValues={
':val1': set([txt])
}
)
except:
print("it crashed")
print("FIPS:" + fips)
print("TXT:" + txt)
except Exception as e:
print(e)
def on_error(self, status):
print(status)
if __name__ == "__main__":
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# for i in range(4):
# t = threading.Thread(target=worker)
# t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
# t.start()
stream = tweepy.Stream(auth, TwitterStreamListener())
while True:
try:
stream.filter(locations=[-125.0011, 24.9493, -66.9326, 49.5904])
except:
continue
time.wait(10)
#stream.sample(1)
|
Gknoblau/gladitude
|
twitter/twitter_tweepy.py
|
Python
|
mit
| 3,076 | 0.002276 |
from __future__ import division
__author__ = 'jerry'
from utils.settings import pbp, seasons
from Game import Game
class NoCollectionError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SeasonDataError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class Season:
# This is a helper class to manage a full season's worth of data
_coll = pbp
def __init__(self, season=None, collection=None):
self._games = []
self._data = []
self._start_date = None
self._end_date = None
self._season = season
self._asg = None
self._index = 0
if collection is not None:
self.__class__._coll = collection
self._coll = collection
elif self._coll is None:
if self.__class__._coll is None:
raise NoCollectionError('Must have a collection in MongoDB!')
else:
self._coll = self.__class__._coll
if season is not None:
self.get_by_season(season)
def get_by_season(self, season):
season_data = seasons.find_one({'season': season})
self._start_date = season_data['start']
self._end_date = season_data['end']
self._asg = season_data['allStarGame']
data = self._coll.find({'game_date': {'$gte': self._start_date, '$lte': self._end_date}})
self._data = data
self.set_data(self._data)
def set_data(self, data):
self._games = []
self._games = sorted([Game(event_id=game['id']) for game in data])
# for game_json in data:
# event_id = game_json['id']
# game = Game(event_id=event_id)
# self._games.append(game)
#self._games = sorted(self._games)
#self._start_date = self._games[0].date
#self._end_date = self._games[-1].date
def __str__(self):
return '{}-{} NBA Season'.format(self.season, self.season + 1)
def __iter__(self):
self._index = 0
return self
def next(self):
try:
game = self.games[self._index]
except IndexError:
raise StopIteration
self._index += 1
return game
@property
def games(self):
return self._games
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def season(self):
return self._season
def __len__(self):
return len(self.season)
@property
def regular_season_games(self):
return [game for game in self._games if game.game_type == 'Regular Season']
@property
def postseason_games(self):
return [game for game in self._games if game.game_type == 'Postseason']
def get_all_games_in_range(self, start_date=None, end_date=None):
if start_date is not None and end_date is None:
games = [game for game in self.games if start_date <= game.date]
elif start_date is None and end_date is not None:
games = [game for game in self.games if game.date <= end_date]
elif start_date is not None and end_date is not None:
games = [game for game in self.games if start_date <= game.date <= end_date]
else:
games = self.games
return games
def get_team_games_in_range(self, team, start_date=None, end_date=None):
games = [game for game in self.get_all_games_in_range(start_date, end_date)
if game.is_away(team) or game.is_home(team)]
return games
def get_player_games_in_range(self, player, start_date=None, end_date=None):
games = [game for game in self.get_all_games_in_range(start_date, end_date)
if game.player_in_game(player)]
return games
def drtg(self, team, start_date=None, end_date=None):
games = self.get_team_games_in_range(team, start_date, end_date)
pts_against = 0
possessions = 0
for game in games:
opponent = game.opponent(team)
pts_against += game.score(opponent)
possessions += game.possessions(opponent)
drtg = 100 * pts_against / possessions
return drtg
def ortg(self, team, start_date=None, end_date=None):
games = self.get_team_games_in_range(team, start_date, end_date)
pts_scored = 0
possessions = 0
for game in games:
pts_scored += game.score(team)
possessions += game.possessions(team)
ortg = 100 * pts_scored / possessions
return ortg
def player_ortg(self, player, start_date=None, end_date=None):
games_played = self.get_player_games_in_range(player, start_date, end_date)
ast = 0
fgm = 0
fga = 0
fta = 0
ftm = 0
tov = 0
threes = 0
orb = 0
pts = 0
mp = 0
team_fgm = 0
team_fga = 0
team_ast = 0
team_mp = 0
team_ftm = 0
team_fta = 0
team_orb = 0
team_pts = 0
team_3pm = 0
team_tov = 0
opp_dreb = 0
for game in games_played:
player_data = game.player_boxscore(player)
team = game.player_team(player)
opponent = game.opponent(team)
team_data = game.team_boxscore(team)['teamStats']
opponent_data = game.team_boxscore(opponent)['teamStats']
ast += player_data['assists']
fgm += player_data['fieldGoals']['made']
fga += player_data['fieldGoals']['attempted']
fta += player_data['freeThrows']['attempted']
ftm += player_data['freeThrows']['made']
tov += player_data['turnovers']
threes += player_data['threePointFieldGoals']['made']
orb += player_data['rebounds']['offensive']
pts += player_data['points']
mp += player_data['totalSecondsPlayed'] / 60.0
team_fgm += team_data['fieldGoals']['made']
team_fga += team_data['fieldGoals']['attempted']
team_ast += team_data['assists']
team_mp += team_data['minutes']
team_ftm += team_data['freeThrows']['made']
team_fta += team_data['freeThrows']['attempted']
team_orb += team_data['rebounds']['offensive']
team_pts += team_data['points']
team_3pm += team_data['threePointFieldGoals']['made']
team_tov += team_data['turnovers']['total']
opp_dreb += opponent_data['rebounds']['defensive']
team_orb_pct = team_orb / (opp_dreb + team_orb)
ft_part = (1 - (1 - (ftm / fta))**2) * 0.4 * fta
ast_part = 0.5 * (((team_pts - team_ftm) - (pts - ftm)) / (2 * (team_fga - fga))) * ast
q_ast = ((mp / (team_mp / 5)) * (1.14 * ((team_ast - ast) / team_fgm))) + ((((team_ast / team_mp) * mp * 5 - ast) / ((team_fgm / team_mp) * mp * 5 - fgm)) * (1 - (mp / (team_mp / 5))))
fg_part = fgm * (1 - 0.5 * ((pts - ftm) / (2 * fga)) * q_ast)
team_scoring_poss = team_fgm + (1 - (1 - (team_ftm / team_fta))**2) * team_fta * 0.4
team_play_pct = team_scoring_poss / (team_fga + team_fta * 0.4 + team_tov)
team_orb_weight = ((1 - team_orb_pct) * team_play_pct) / ((1 - team_orb_pct) * team_play_pct + team_orb_pct * (1 - team_play_pct))
orb_part = orb * team_orb_weight * team_play_pct
scr_poss = (fg_part + ast_part + ft_part) * (1 - (team_orb / team_scoring_poss) * team_orb_weight * team_play_pct) + orb_part
fg_x_poss = (fga - fgm) * (1 - 1.07 * team_orb_pct)
ft_x_poss = ((1 - (ftm / fta))**2) * 0.4 * fta
tot_poss = scr_poss + fg_x_poss + ft_x_poss + tov
pprod_fg_part = 2 * (fgm + 0.5 * threes) * (1 - 0.5 * ((pts - ftm) / (2 * fga)) * q_ast)
pprod_ast_part = 2 * ((team_fgm - fgm + 0.5 * (team_3pm - threes)) / (team_fgm - fgm)) * 0.5 * (((team_pts - team_ftm) - (pts - ftm)) / (2 * (team_fga - fga))) * ast
pprod_orb_part = orb * team_orb_weight * team_play_pct * (team_pts / (team_fgm + (1 - (1 - (team_ftm / team_fta))**2) * 0.4 * team_fta))
pprod = (pprod_fg_part + pprod_ast_part + ftm) * (1 - (team_orb / team_scoring_poss) * team_orb_weight * team_play_pct) + pprod_orb_part
ortg = 100 * pprod / tot_poss
return ortg
def player_drtg(self, player, start_date=None, end_date=None):
games_played = self.get_player_games_in_range(player, start_date, end_date)
drb = 0
pf = 0
mp = 0
stl = 0
blk = 0
team_mp = 0
team_blk = 0
team_stl = 0
team_drb = 0
team_pf = 0
team_pos = 0
opp_fta = 0
opp_ftm = 0
opp_fga = 0
opp_fgm = 0
opp_orb = 0
opp_pts = 0
opp_tov = 0
opp_mp = 0
for game in games_played:
player_data = game.player_boxscore(player)
team = game.player_team(player)
opponent = game.opponent(team)
team_data = game.team_boxscore(team)['teamStats']
opponent_data = game.team_boxscore(opponent)['teamStats']
drb += player_data['rebounds']['defensive']
pf += player_data['personalFouls']
mp += player_data['totalSecondsPlayed'] / 60.0
stl += player_data['steals']
blk += player_data['blockedShots']
team_mp += team_data['minutes']
team_blk += team_data['blockedShots']
team_stl += team_data['steals']
team_drb += team_data['rebounds']['defensive']
team_pf += team_data['personalFouls']
team_pos += game.possessions(team)
opp_fta += opponent_data['freeThrows']['attempted']
opp_ftm += opponent_data['freeThrows']['made']
opp_fga += opponent_data['fieldGoals']['attempted']
opp_fgm += opponent_data['fieldGoals']['made']
opp_orb += opponent_data['rebounds']['offensive']
opp_pts += opponent_data['points']
opp_tov += opponent_data['turnovers']['total']
opp_mp += opponent_data['minutes']
team_drtg = 100 * opp_pts / team_pos
dor_pct = opp_orb / (opp_orb + team_drb)
dfg_pct = opp_fgm / opp_fga
fmwt = (dfg_pct * (1 - dor_pct)) / (dfg_pct * (1 - dor_pct) + (1 - dfg_pct) * dor_pct)
stops1 = stl + blk * fmwt * (1 - 1.07 * dor_pct) + drb * (1 - fmwt)
stops2 = (((opp_fga - opp_fgm - team_blk) / team_mp) * fmwt * (1 - 1.07 * dor_pct) + ((opp_tov - team_stl) / team_mp)) * mp + (pf / team_pf) * 0.4 * opp_fta * (1 - (opp_ftm / opp_fta))**2
stops_tot = stops1 + stops2
stop_pct = (stops_tot * opp_mp) / (team_pos * mp)
d_pts_per_scrposs = opp_pts / (opp_fgm + (1 - (1 - (opp_ftm / opp_fta))**2) * opp_fta * 0.4)
drtg = team_drtg + 0.2 * (100 * d_pts_per_scrposs * (1 - stop_pct) - team_drtg)
return drtg
def player_usage(self, player, start_date=None, end_date=None):
games_played = self.get_player_games_in_range(player, start_date, end_date)
fga = 0
fta = 0
tov = 0
mp = 0
team_fga = 0
team_fta = 0
team_tov = 0
team_mp = 0
for game in games_played:
team = game.player_team(player)
player_data = game.player_boxscore(player)
team_data = game.team_boxscore(team)['teamStats']
fga += player_data['fieldGoals']['attempted']
fta += player_data['freeThrows']['attempted']
tov += player_data['turnovers']
mp += player_data['totalSecondsPlayed'] / 60.0
team_fga += team_data['fieldGoals']['attempted']
team_fta += team_data['freeThrows']['attempted']
team_tov += team_data['turnovers']['total']
team_mp += team_data['minutes']
usg = 100 * ((fga + 0.44 * fta + tov) * (team_mp / 5)) / (mp * (team_fga + 0.44 * team_fta + team_tov))
return usg
|
grapesmoker/nba
|
game/Season.py
|
Python
|
gpl-2.0
| 12,250 | 0.002286 |
#!/usr/bin/env python3
print('hello hello hello')
|
bobisme/hello
|
python/hello3.py
|
Python
|
mit
| 50 | 0 |
#def binh_phuong()
try:
a=int(raw_input("Nhap so n>=0 \n"))
while a<=0:
a=int(raw_input("Nhap lai so n>=0\n "))
print "%d" %(a)
b=pow(a,2)
c=int(raw_input("Doan so binh phuong cua ban\n"))
while c!=b:
if c<b:
print"chua dung, cao len 1 chut\n"
c=input()
else:
print"qua rui giam xuong ti\n"
c=input()
print "Chinh xac ket qua la %d" %(c)
except:
print "Ban nhap khong dung kieu Integer"
|
pythonvietnam/pbc082015
|
VuQuangThang/21082015_B2/bai4.py
|
Python
|
gpl-2.0
| 433 | 0.080831 |
import unittest
import logging
from domaincrawl.link_aggregator import LinkAggregator
from domaincrawl.link_filters import DomainFilter, is_acceptable_url_scheme
from domaincrawl.site_graph import SiteGraph
from domaincrawl.util import URLNormalizer, extract_domain_port
class LinkAggregatorTest(unittest.TestCase):
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p')
def test_link_dedup(self):
base_url = "acme.com:8999"
base_domain, port = extract_domain_port(base_url)
logger = logging.getLogger()
url_norm = URLNormalizer(base_domain, port)
normalized_url = url_norm.normalize_with_domain(base_url)
logger.debug("Constructed normalized base url : %s"%normalized_url)
domain_filter = DomainFilter(base_domain, logger)
site_graph = SiteGraph(logger)
link_aggregator = LinkAggregator(logger, site_graph, link_mappers=[url_norm.normalize_with_domain], link_filters=[domain_filter.passes, is_acceptable_url_scheme])
valid_links = ["/a/b","/a/b/./","http://acme.com:8002/a","https://acme.com:8002/b?q=asd#frag"]
expected_links = ["http://acme.com:8999/a/b","http://acme.com:8002/a","https://acme.com:8002/b"]
# This time, we also specify a referrer page
filtered_links = link_aggregator.filter_update_links(valid_links, normalized_url)
self.assertListEqual(expected_links,filtered_links)
self.assertSetEqual(set(expected_links),link_aggregator._links)
# Second invocation should result in deduplication
filtered_links = link_aggregator.filter_update_links(valid_links, None)
self.assertTrue(len(filtered_links) == 0)
self.assertSetEqual(set(expected_links),link_aggregator._links)
# None of the invalid links should pass
invalid_links = ["mailto://user@mail.com","code.acme.com","code.acme.com/b","https://127.122.9.1"]
filtered_links = link_aggregator.filter_update_links(invalid_links, None)
self.assertTrue(len(filtered_links) == 0)
self.assertSetEqual(set(expected_links),link_aggregator._links)
# A new valid link should pass
new_valid_links = ["http://acme.com:8999/"]
filtered_links = link_aggregator.filter_update_links(new_valid_links, None)
expected_result = ["http://acme.com:8999"]
self.assertListEqual(expected_result,filtered_links)
expected_result_set = set(expected_links)
expected_result_set.update(set(expected_result))
self.assertSetEqual(expected_result_set,link_aggregator._links)
self.assertEqual(len(expected_result_set), site_graph.num_nodes())
for link in expected_result_set:
self.assertTrue(site_graph.has_vertex(link))
self.assertEqual(len(expected_links), site_graph.num_edges())
for link in expected_links:
self.assertTrue(site_graph.has_edge(normalized_url, link))
|
planBrk/domaincrawler
|
test/test_link_aggregator.py
|
Python
|
apache-2.0
| 3,054 | 0.007531 |
from flask import Flask, request, session, g, redirect, url_for, \
abort, flash
import db
import routes
DATABASE = 'test.db'
DEBUG = True
SECRET_KEY = 'key'
USERNAME = 'admin'
PASSWORD = 'password'
app = Flask(__name__)
app.config.from_object(__name__)
if __name__ == '__main__':
app.run()
|
JackMc/CourseScraper
|
web/main.py
|
Python
|
mit
| 311 | 0 |
#
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is provided to you under the terms of
# the GNU LGPL. Please see LICENSE in the top level directory for full
# details.
#
import numpy as np
import xdrlib
import warnings
from .. import HemeLbMagicNumber
SnapshotMagicNumber = 0x736e7004
def HemeLbSnapshot(filename):
"""Guess which file format we were given and use the correct class
to open it.
We have to handle a number of cases:
- the original text format;
- the XDR copy thereof, and
- the updated (August 2011) version with format magic and version
numbers and more metadata.
"""
start = file(filename).read(8)
reader = xdrlib.Unpacker(start)
firstInt = reader.unpack_uint()
if firstInt == HemeLbMagicNumber:
assert reader.unpack_uint() == SnapshotMagicNumber
cls = VersionedXdrSnapshot
elif firstInt == 0 or firstInt == 1 or firstInt == 2:
# It is the basic Xdr format that starts with the stablity flag
cls = XdrSnapshotVersionOne
# Maybe text? If so, the first character should be a '0', '1' or '2', followed by a newline
elif (start[0] == '0' or start[0] == '1' or start == '2') and start[1] == '\n':
cls = TextSnapshot
else:
raise ValueError('Cannot determine version of snapshot file "%s"' % filename)
return cls(filename)
class BaseSnapshot(np.recarray):
"""Base class wrapping a HemeLB snapshot.
Snap is basically a numpy record array with the following fields:
- id (int) -- an id number (basically the index of the point in the
file
- position (3x float) -- the position in input space (m)
- grid (3x int) -- the (x, y, z) coordinates in lattice units
- pressure (float) -- the pressure in physical units (mmHg)
- velocity (3x float) -- (x,y,z) components of the velocity field
in physical units (m/s)
- stress (float) -- the von Mises stress in physical units (Pa)
It has a number of additional properties (see __readHeader for full details)
"""
_raw_row = [('id', int),
('position', float, (3,)),
('grid', int, (3,)),
('pressure', float),
('velocity', float, (3,)),
('stress', float)]
_readable_row = np.dtype(_raw_row[2:])
row = np.dtype(_raw_row)
_attrs = {'stable': None,
'voxel_size': None,
'origin': np.array([np.nan, np.nan, np.nan]),
'bb_min': None,
'bb_max': None,
'bb_len': None,
'voxel_count': None}
# header = len(_attrs)
def __new__(cls, filename):
"""Create a new instance. Numpy array subclasses use this
method instead of __init__ for initialization.
"""
headerDict = cls._readHeader(filename)
noindex = cls._load(filename, headerDict)
index = np.recarray(shape=noindex.shape, dtype=cls.row)
for el in cls._raw_row[2:]:
key = el[0]
index.__setattr__(key, noindex.__getattribute__(key))
continue
index.id = np.arange(len(noindex))
try:
index.position = cls._computePosition(index.grid, headerDict)
except:
index.position = np.nan
pass
obj = index.view(cls)
# Set the attributes on the snapshot
for headerField in headerDict:
setattr(obj, headerField, headerDict[headerField])
continue
return obj
def __array_finalize__(self, parent):
"""Numpy special method."""
if parent is None:
return
for a in self._attrs:
setattr(self, a, getattr(parent, a, self._attrs[a]))
continue
return
pass
class PositionlessSnapshot(BaseSnapshot):
"""Base class for the original text snapshots and the XDR
equivalent. These lack the data required to compute the positions
of grid points. It is supplied through the coords.asc file
generated by the old setuptool.
"""
def computePosition(self, coordsFile):
"""Given the coordinate file from the segtool, calculate all
the lattice positions' coordinates.
"""
from os.path import exists
if exists (coordsFile):
from ...coordinates import Transformer
trans = Transformer(coordsFile)
self.position = 1e-3 * trans.siteToStl(self.grid + self.bb_min)
return
else:
# The coords file is missing!
warnings.warn('Missing coordinates file "%s", assuming origin at [0,0,0]' % coordsFile, stacklevel=2)
self.position = (self.grid + self.bb_min) * self.voxel_size # + origin, but we'll just assume it's zero here.
pass
class TextSnapshot(PositionlessSnapshot):
"""Read a text snapshot.
"""
nHeaderLines = 6
@classmethod
def _readHeader(cls, filename):
"""Read the header lines, according to:
0- Flag for simulation stability, 0 or 1
1- Voxel size in physical units (units of m)
2- vertex coords of the minimum bounding box with minimum values (x, y and z values)
3- vertex coords of the minimum bounding box with maximum values (x, y and z values)
4- #voxels within the minimum bounding box along the x, y, z axes (3 values)
5- total number of fluid voxels
"""
f = file(filename)
stable = int(f.readline())
voxel_size = float(f.readline())
bb_min = np.array([int(x) for x in f.readline().split()])
bb_max = np.array([int(x) for x in f.readline().split()])
bb_len = np.array([int(x) for x in f.readline().split()])
voxel_count = int(f.readline())
return {'stable': stable,
'voxel_size': voxel_size,
'bb_min': bb_min,
'bb_max': bb_max,
'bb_len': bb_len,
'voxel_count': voxel_count}
@classmethod
def _load(cls, filename, header):
return np.loadtxt(filename,
skiprows=cls.nHeaderLines,
dtype=cls._readable_row).view(np.recarray)
pass
class XdrVoxelFormatOneSnapshot(object):
@classmethod
def _load(cls, filename, header):
# Skip past the header, slurp data, create XDR object
f = file(filename)
f.seek(cls._headerLengthBytes)
reader = xdrlib.Unpacker(f.read())
ans = np.recarray((header['voxel_count'],), dtype=cls._readable_row)
# Read all the voxels.
for i in xrange(header['voxel_count']):
ans[i] = ((reader.unpack_int(),
reader.unpack_int(),
reader.unpack_int()),
reader.unpack_float(),
(reader.unpack_float(),
reader.unpack_float(),
reader.unpack_float()),
reader.unpack_float())
continue
reader.done()
return ans
pass
class XdrSnapshotVersionOne(PositionlessSnapshot, XdrVoxelFormatOneSnapshot):
"""Read an old-style XDR snapshot.
"""
# int float 3x int 3x int 3x int int
_headerLengthBytes = 4 + 8 + 3*4 + 3*4 + 3*4 + 4
@classmethod
def _readHeader(cls, filename):
"""Read the header lines, according to:
0- Flag for simulation stability, 0 or 1
1- Voxel size in physical units (units of m)
2- vertex coords of the minimum bounding box with minimum values (x, y and z values)
3- vertex coords of the minimum bounding box with maximum values (x, y and z values)
4- #voxels within the minimum bounding box along the x, y, z axes (3 values)
5- total number of fluid voxels
"""
reader = xdrlib.Unpacker(file(filename).read(cls._headerLengthBytes))
header = {}
header['stable'] = reader.unpack_int()
header['voxel_size'] = reader.unpack_double()
header['bb_min'] = np.array((reader.unpack_int(),
reader.unpack_int(),
reader.unpack_int()))
header['bb_max'] = np.array((reader.unpack_int(),
reader.unpack_int(),
reader.unpack_int()))
header['bb_len'] = np.array((reader.unpack_int(),
reader.unpack_int(),
reader.unpack_int()))
header['voxel_count'] = reader.unpack_int();
return header
pass
class XdrSnapshotVersionTwo(BaseSnapshot, XdrVoxelFormatOneSnapshot):
"""Read snapshots for the updated format as for August 2011.
"""
_headerLengthBytes = 80
VersionNumber = 2
@classmethod
def _readHeader(cls, filename):
"""Read the header lines, according to description in Code/io/formats/snapshot.h
"""
reader = xdrlib.Unpacker(file(filename).read(cls._headerLengthBytes))
header = {}
assert reader.unpack_uint() == HemeLbMagicNumber
assert reader.unpack_uint() == SnapshotMagicNumber
assert reader.unpack_uint() == cls.VersionNumber
bodyStart = reader.unpack_uint()
assert bodyStart == cls._headerLengthBytes
header['stable'] = reader.unpack_int()
header['voxel_size'] = reader.unpack_double()
header['origin'] = np.array((reader.unpack_double(),
reader.unpack_double(),
reader.unpack_double()))
header['bb_min'] = np.array((reader.unpack_int(),
reader.unpack_int(),
reader.unpack_int()))
header['bb_max'] = np.array((reader.unpack_int(),
reader.unpack_int(),
reader.unpack_int()))
header['bb_len'] = header['bb_max'] - header['bb_min'] + 1
header['voxel_count'] = reader.unpack_int();
return header
@classmethod
def _computePosition(cls, grid, header):
return (grid + header['bb_min']) * header['voxel_size'] + header['origin']
pass
def VersionedXdrSnapshot(filename):
"""Examine the file and dispatch to the appropriate constructor.
"""
# Need the two magic numbers and the version number, i.e. 12 bytes
reader = xdrlib.Unpacker(file(filename).read(12))
assert reader.unpack_uint() == HemeLbMagicNumber
assert reader.unpack_uint() == SnapshotMagicNumber
version = reader.unpack_uint()
if version == 2:
return XdrSnapshotVersionTwo(filename)
raise ValueError('Unknown version number (%d) in file "%s"' % (version, filename))
|
jenshnielsen/hemelb
|
Tools/hemeTools/parsers/snapshot/__init__.py
|
Python
|
lgpl-3.0
| 11,074 | 0.00587 |
# A Job consists of many "Tasks".
# A task is the run of an external tool, with proper methods for failure handling
from Tools.CList import CList
class Job(object):
NOT_STARTED, IN_PROGRESS, FINISHED, FAILED = range(4)
def __init__(self, name):
self.tasks = [ ]
self.resident_tasks = [ ]
self.workspace = "/tmp"
self.current_task = 0
self.callback = None
self.name = name
self.finished = False
self.end = 100
self.__progress = 0
self.weightScale = 1
self.afterEvent = None
self.state_changed = CList()
self.status = self.NOT_STARTED
self.onSuccess = None
# description is a dict
def fromDescription(self, description):
pass
def createDescription(self):
return None
def getProgress(self):
if self.current_task == len(self.tasks):
return self.end
t = self.tasks[self.current_task]
jobprogress = t.weighting * t.progress / float(t.end) + sum([task.weighting for task in self.tasks[:self.current_task]])
return int(jobprogress*self.weightScale)
progress = property(getProgress)
def getStatustext(self):
return { self.NOT_STARTED: _("Waiting"), self.IN_PROGRESS: _("In progress"), self.FINISHED: _("Finished"), self.FAILED: _("Failed") }[self.status]
def task_progress_changed_CB(self):
self.state_changed()
def addTask(self, task):
task.job = self
task.task_progress_changed = self.task_progress_changed_CB
self.tasks.append(task)
def start(self, callback):
assert self.callback is None
self.callback = callback
self.restart()
def restart(self):
self.status = self.IN_PROGRESS
self.state_changed()
self.runNext()
sumTaskWeightings = sum([t.weighting for t in self.tasks]) or 1
self.weightScale = self.end / float(sumTaskWeightings)
def runNext(self):
if self.current_task == len(self.tasks):
if len(self.resident_tasks) == 0:
self.status = self.FINISHED
self.state_changed()
self.callback(self, None, [])
self.callback = None
else:
print "still waiting for %d resident task(s) %s to finish" % (len(self.resident_tasks), str(self.resident_tasks))
else:
self.tasks[self.current_task].run(self.taskCallback)
self.state_changed()
def taskCallback(self, task, res, stay_resident = False):
cb_idx = self.tasks.index(task)
if stay_resident:
if cb_idx not in self.resident_tasks:
self.resident_tasks.append(self.current_task)
print "task going resident:", task
else:
print "task keeps staying resident:", task
return
if len(res):
print ">>> Error:", res
self.status = self.FAILED
self.state_changed()
self.callback(self, task, res)
if cb_idx != self.current_task:
if cb_idx in self.resident_tasks:
print "resident task finished:", task
self.resident_tasks.remove(cb_idx)
if not res:
self.state_changed()
self.current_task += 1
self.runNext()
def retry(self):
assert self.status == self.FAILED
self.restart()
def abort(self):
if self.current_task < len(self.tasks):
self.tasks[self.current_task].abort()
for i in self.resident_tasks:
self.tasks[i].abort()
def cancel(self):
self.abort()
def __str__(self):
return "Components.Task.Job name=%s #tasks=%s" % (self.name, len(self.tasks))
class Task(object):
def __init__(self, job, name):
self.name = name
self.immediate_preconditions = [ ]
self.global_preconditions = [ ]
self.postconditions = [ ]
self.returncode = None
self.initial_input = None
self.job = None
self.end = 100
self.weighting = 100
self.__progress = 0
self.cmd = None
self.cwd = "/tmp"
self.args = [ ]
self.cmdline = None
self.task_progress_changed = None
self.output_line = ""
job.addTask(self)
self.container = None
def setCommandline(self, cmd, args):
self.cmd = cmd
self.args = args
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
self.postconditions.append(ReturncodePostcondition())
def setCmdline(self, cmdline):
self.cmdline = cmdline
def checkPreconditions(self, immediate = False):
not_met = [ ]
if immediate:
preconditions = self.immediate_preconditions
else:
preconditions = self.global_preconditions
for precondition in preconditions:
if not precondition.check(self):
not_met.append(precondition)
return not_met
def _run(self):
if (self.cmd is None) and (self.cmdline is None):
self.finish()
return
from enigma import eConsoleAppContainer
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.processFinished)
self.container.stdoutAvail.append(self.processStdout)
self.container.stderrAvail.append(self.processStderr)
if self.cwd is not None:
self.container.setCWD(self.cwd)
if not self.cmd and self.cmdline:
print "execute:", self.container.execute(self.cmdline), self.cmdline
else:
assert self.cmd is not None
assert len(self.args) >= 1
print "execute:", self.container.execute(self.cmd, *self.args), ' '.join(self.args)
if self.initial_input:
self.writeInput(self.initial_input)
def run(self, callback):
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if failed_preconditions:
print "[Task] preconditions failed"
callback(self, failed_preconditions)
return
self.callback = callback
try:
self.prepare()
self._run()
except Exception, ex:
print "[Task] exception:", ex
self.postconditions = [FailedPostcondition(ex)]
self.finish()
def prepare(self):
pass
def cleanup(self, failed):
pass
def processStdout(self, data):
self.processOutput(data)
def processStderr(self, data):
self.processOutput(data)
def processOutput(self, data):
self.output_line += data
while True:
i = self.output_line.find('\n')
if i == -1:
break
self.processOutputLine(self.output_line[:i+1])
self.output_line = self.output_line[i+1:]
def processOutputLine(self, line):
print "[Task %s]" % self.name, line[:-1]
pass
def processFinished(self, returncode):
self.returncode = returncode
self.finish()
def abort(self):
if self.container:
self.container.kill()
self.finish(aborted = True)
def finish(self, aborted = False):
self.afterRun()
not_met = [ ]
if aborted:
not_met.append(AbortedPostcondition())
else:
for postcondition in self.postconditions:
if not postcondition.check(self):
not_met.append(postcondition)
self.cleanup(not_met)
self.callback(self, not_met)
def afterRun(self):
pass
def writeInput(self, input):
self.container.write(input)
def getProgress(self):
return self.__progress
def setProgress(self, progress):
if progress > self.end:
progress = self.end
if progress < 0:
progress = 0
self.__progress = progress
if self.task_progress_changed:
self.task_progress_changed()
progress = property(getProgress, setProgress)
def __str__(self):
return "Components.Task.Task name=%s" % self.name
class LoggingTask(Task):
def __init__(self, job, name):
Task.__init__(self, job, name)
self.log = []
def processOutput(self, data):
print "[%s]" % self.name, data,
self.log.append(data)
class PythonTask(Task):
def _run(self):
from twisted.internet import threads
from enigma import eTimer
self.aborted = False
self.pos = 0
threads.deferToThread(self.work).addBoth(self.onComplete)
self.timer = eTimer()
self.timer.callback.append(self.onTimer)
self.timer.start(5)
def work(self):
raise NotImplemented, "work"
def abort(self):
self.aborted = True
if self.callback is None:
self.finish(aborted = True)
def onTimer(self):
self.setProgress(self.pos)
def onComplete(self, result):
self.postconditions.append(FailedPostcondition(result))
self.timer.stop()
del self.timer
self.finish()
class ConditionTask(Task):
"""
Reactor-driven pthread_condition.
Wait for something to happen. Call trigger when something occurs that
is likely to make check() return true. Raise exception in check() to
signal error.
Default is to call trigger() once per second, override prepare/cleanup
to do something else (like waiting for hotplug)...
"""
def __init__(self, job, name, timeoutCount=None):
Task.__init__(self, job, name)
self.timeoutCount = timeoutCount
def _run(self):
self.triggerCount = 0
def prepare(self):
from enigma import eTimer
self.timer = eTimer()
self.timer.callback.append(self.trigger)
self.timer.start(1000)
def cleanup(self, failed):
if hasattr(self, 'timer'):
self.timer.stop()
del self.timer
def check(self):
# override to return True only when condition triggers
return True
def trigger(self):
self.triggerCount += 1
try:
if (self.timeoutCount is not None) and (self.triggerCount > self.timeoutCount):
raise Exception, "Timeout elapsed, sorry"
res = self.check()
except Exception, e:
self.postconditions.append(FailedPostcondition(e))
res = True
if res:
self.finish()
# The jobmanager will execute multiple jobs, each after another.
# later, it will also support suspending jobs (and continuing them after reboot etc)
# It also supports a notification when some error occurred, and possibly a retry.
class JobManager:
def __init__(self):
self.active_jobs = [ ]
self.failed_jobs = [ ]
self.job_classes = [ ]
self.in_background = False
self.visible = False
self.active_job = None
# Set onSuccess to popupTaskView to get a visible notification.
# onFail defaults to notifyFailed which tells the user that it went south.
def AddJob(self, job, onSuccess=None, onFail=None):
job.onSuccess = onSuccess
if onFail is None:
job.onFail = self.notifyFailed
else:
job.onFail = onFail
self.active_jobs.append(job)
self.kick()
def kick(self):
if self.active_job is None:
if self.active_jobs:
self.active_job = self.active_jobs.pop(0)
self.active_job.start(self.jobDone)
def notifyFailed(self, job, task, problems):
from Tools import Notifications
from Screens.MessageBox import MessageBox
if problems[0].RECOVERABLE:
Notifications.AddNotificationWithCallback(self.errorCB, MessageBox, _("Error: %s\nRetry?") % (problems[0].getErrorMessage(task)))
return True
else:
Notifications.AddNotification(MessageBox, job.name + "\n" + _("Error") + ': %s' % (problems[0].getErrorMessage(task)), type = MessageBox.TYPE_ERROR )
return False
def jobDone(self, job, task, problems):
print "job", job, "completed with", problems, "in", task
if problems:
if not job.onFail(job, task, problems):
self.errorCB(False)
else:
self.active_job = None
if job.onSuccess:
job.onSuccess(job)
self.kick()
# Set job.onSuccess to this function if you want to pop up the jobview when the job is done/
def popupTaskView(self, job):
if not self.visible:
from Tools import Notifications
from Screens.TaskView import JobView
self.visible = True
Notifications.AddNotification(JobView, job)
def errorCB(self, answer):
if answer:
print "retrying job"
self.active_job.retry()
else:
print "not retrying job."
self.failed_jobs.append(self.active_job)
self.active_job = None
self.kick()
def getPendingJobs(self):
list = [ ]
if self.active_job:
list.append(self.active_job)
list += self.active_jobs
return list
# some examples:
#class PartitionExistsPostcondition:
# def __init__(self, device):
# self.device = device
#
# def check(self, task):
# import os
# return os.access(self.device + "part1", os.F_OK)
#
#class CreatePartitionTask(Task):
# def __init__(self, device):
# Task.__init__(self, "Creating partition")
# self.device = device
# self.setTool("/sbin/sfdisk")
# self.args += ["-f", self.device + "disc"]
# self.initial_input = "0,\n;\n;\n;\ny\n"
# self.postconditions.append(PartitionExistsPostcondition(self.device))
#
#class CreateFilesystemTask(Task):
# def __init__(self, device, partition = 1, largefile = True):
# Task.__init__(self, "Creating filesystem")
# self.setTool("/sbin/mkfs.ext")
# if largefile:
# self.args += ["-T", "largefile"]
# self.args.append("-m0")
# self.args.append(device + "part%d" % partition)
#
#class FilesystemMountTask(Task):
# def __init__(self, device, partition = 1, filesystem = "ext3"):
# Task.__init__(self, "Mounting filesystem")
# self.setTool("/bin/mount")
# if filesystem is not None:
# self.args += ["-t", filesystem]
# self.args.append(device + "part%d" % partition)
class Condition:
def __init__(self):
pass
RECOVERABLE = False
def getErrorMessage(self, task):
return _("An unknown error occurred!") + " (%s @ task %s)" % (self.__class__.__name__, task.__class__.__name__)
class WorkspaceExistsPrecondition(Condition):
def __init__(self):
pass
def check(self, task):
return os.access(task.job.workspace, os.W_OK)
class DiskspacePrecondition(Condition):
def __init__(self, diskspace_required):
self.diskspace_required = diskspace_required
self.diskspace_available = 0
def check(self, task):
import os
try:
s = os.statvfs(task.job.workspace)
self.diskspace_available = s.f_bsize * s.f_bavail
return self.diskspace_available >= self.diskspace_required
except OSError:
return False
def getErrorMessage(self, task):
return _("Not enough disk space. Please free up some disk space and try again. (%d MB required, %d MB available)") % (self.diskspace_required / 1024 / 1024, self.diskspace_available / 1024 / 1024)
class ToolExistsPrecondition(Condition):
def __init__(self):
pass
def check(self, task):
import os
if task.cmd[0]=='/':
self.realpath = task.cmd
print "[Task.py][ToolExistsPrecondition] WARNING: usage of absolute paths for tasks should be avoided!"
return os.access(self.realpath, os.X_OK)
else:
self.realpath = task.cmd
path = os.environ.get('PATH', '').split(os.pathsep)
path.append(task.cwd + '/')
absolutes = filter(lambda file: os.access(file, os.X_OK), map(lambda directory, file = task.cmd: os.path.join(directory, file), path))
if absolutes:
self.realpath = absolutes[0]
return True
return False
def getErrorMessage(self, task):
return _("A required tool (%s) was not found.") % self.realpath
class AbortedPostcondition(Condition):
def __init__(self):
pass
def getErrorMessage(self, task):
return "Cancelled upon user request"
class ReturncodePostcondition(Condition):
def __init__(self):
pass
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
if hasattr(task, 'log') and task.log:
log = ''.join(task.log).strip()
log = log.split('\n')[-3:]
log = '\n'.join(log)
return log
else:
return _("Error code") + ": %s" % task.returncode
class FailedPostcondition(Condition):
def __init__(self, exception):
self.exception = exception
def getErrorMessage(self, task):
if isinstance(self.exception, int):
if hasattr(task, 'log'):
log = ''.join(task.log).strip()
log = log.split('\n')[-4:]
log = '\n'.join(log)
return log
else:
return _("Error code") + " %s" % self.exception
return str(self.exception)
def check(self, task):
return (self.exception is None) or (self.exception == 0)
#class HDDInitJob(Job):
# def __init__(self, device):
# Job.__init__(self, _("Initialize Harddisk"))
# self.device = device
# self.fromDescription(self.createDescription())
#
# def fromDescription(self, description):
# self.device = description["device"]
# self.addTask(CreatePartitionTask(self.device))
# self.addTask(CreateFilesystemTask(self.device))
# self.addTask(FilesystemMountTask(self.device))
#
# def createDescription(self):
# return {"device": self.device}
job_manager = JobManager()
|
popazerty/EG-2
|
lib/python/Components/Task.py
|
Python
|
gpl-2.0
| 15,627 | 0.03398 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from testtools import skipIf
from heat.common import exception
from heat.common import template_format
from heat.engine import clients
from heat.engine import scheduler
from heat.engine.resources.neutron import loadbalancer
from heat.openstack.common.importutils import try_import
from heat.tests import fakes
from heat.tests import utils
from heat.tests.common import HeatTestCase
from heat.tests.v1_1 import fakes as nova_fakes
neutronclient = try_import('neutronclient.v2_0.client')
health_monitor_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources",
"Parameters" : {},
"Resources" : {
"monitor": {
"Type": "OS::Neutron::HealthMonitor",
"Properties": {
"type": "HTTP",
"delay": 3,
"max_retries": 5,
"timeout": 10
}
}
}
}
'''
pool_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources",
"Parameters" : {},
"Resources" : {
"pool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"protocol": "HTTP",
"subnet_id": "sub123",
"lb_method": "ROUND_ROBIN",
"vip": {
"protocol_port": 80
}
}
}
}
}
'''
member_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer member",
"Resources" : {
"member": {
"Type": "OS::Neutron::PoolMember",
"Properties": {
"protocol_port": 8080,
"pool_id": "pool123",
"address": "1.2.3.4"
}
}
}
}
'''
lb_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources",
"Parameters" : {},
"Resources" : {
"lb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"protocol_port": 8080,
"pool_id": "pool123",
"members": ["1234"]
}
}
}
}
'''
pool_with_session_persistence_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources wit",
"Parameters" : {},
"Resources" : {
"pool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"protocol": "HTTP",
"subnet_id": "sub123",
"lb_method": "ROUND_ROBIN",
"vip": {
"protocol_port": 80,
"session_persistence": {
"type": "APP_COOKIE",
"cookie_name": "cookie"
}
}
}
}
}
}
'''
@skipIf(neutronclient is None, 'neutronclient unavailable')
class HealthMonitorTest(HeatTestCase):
def setUp(self):
super(HealthMonitorTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'delete_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'show_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'update_health_monitor')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_health_monitor(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_health_monitor({
'health_monitor': {
'delay': 3, 'max_retries': 5, 'type': u'HTTP',
'timeout': 10, 'admin_state_up': True}}
).AndReturn({'health_monitor': {'id': '5678'}})
snippet = template_format.parse(health_monitor_template)
stack = utils.parse_stack(snippet)
return loadbalancer.HealthMonitor(
'monitor', snippet['Resources']['monitor'], stack)
def test_create(self):
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_health_monitor({
'health_monitor': {
'delay': 3, 'max_retries': 5, 'type': u'HTTP',
'timeout': 10, 'admin_state_up': True}}
).AndRaise(loadbalancer.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(health_monitor_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.HealthMonitor(
'monitor', snippet['Resources']['monitor'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_health_monitor('5678')
neutronclient.Client.show_health_monitor('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_health_monitor('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_health_monitor('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=400))
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_health_monitor()
neutronclient.Client.show_health_monitor('5678').MultipleTimes(
).AndReturn(
{'health_monitor': {'admin_state_up': True, 'delay': 3}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual(3, rsrc.FnGetAtt('delay'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (monitor subnet_id) is incorrect.',
str(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_health_monitor()
neutronclient.Client.update_health_monitor(
'5678', {'health_monitor': {'delay': 10}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['delay'] = 10
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
class PoolTest(HeatTestCase):
def setUp(self):
super(PoolTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_pool')
self.m.StubOutWithMock(neutronclient.Client, 'delete_pool')
self.m.StubOutWithMock(neutronclient.Client, 'show_pool')
self.m.StubOutWithMock(neutronclient.Client, 'update_pool')
self.m.StubOutWithMock(neutronclient.Client,
'associate_health_monitor')
self.m.StubOutWithMock(neutronclient.Client,
'disassociate_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'create_vip')
self.m.StubOutWithMock(neutronclient.Client, 'delete_vip')
self.m.StubOutWithMock(neutronclient.Client, 'show_vip')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_pool(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
return loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
def test_create(self):
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_pending(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'PENDING_CREATE'}})
neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'PENDING_CREATE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed_unexpected_status(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ERROR', 'name': '5678'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'Error: neutron report unexpected pool '
'resource[5678] status[ERROR]',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_create_failed_unexpected_vip_status(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ERROR', 'name': 'xyz'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'Error: neutron reported unexpected vip '
'resource[xyz] status[ERROR]',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndRaise(loadbalancer.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_create_with_session_persistence(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80,
'session_persistence': {
'type': 'APP_COOKIE',
'cookie_name': 'cookie'}}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_with_session_persistence_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_failing_validation_with_session_persistence(self):
msg = _('Property cookie_name is required, when '
'session_persistence type is set to APP_COOKIE.')
snippet = template_format.parse(pool_with_session_persistence_template)
pool = snippet['Resources']['pool']
persistence = pool['Properties']['vip']['session_persistence']
#When persistence type is set to APP_COOKIE, cookie_name is required
persistence['type'] = 'APP_COOKIE'
persistence['cookie_name'] = None
resource = loadbalancer.Pool('pool', pool, utils.parse_stack(snippet))
error = self.assertRaises(exception.StackValidationFailed,
resource.validate)
self.assertEqual(msg, str(error))
def test_validation_not_failing_without_session_persistence(self):
snippet = template_format.parse(pool_template)
pool = snippet['Resources']['pool']
resource = loadbalancer.Pool('pool', pool, utils.parse_stack(snippet))
self.assertIsNone(resource.validate())
def test_properties_are_prepared_for_session_persistence(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80,
'session_persistence': {'type': 'HTTP_COOKIE'}}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_with_session_persistence_template)
pool = snippet['Resources']['pool']
persistence = pool['Properties']['vip']['session_persistence']
#change persistence type to HTTP_COOKIE that not require cookie_name
persistence['type'] = 'HTTP_COOKIE'
del persistence['cookie_name']
resource = loadbalancer.Pool('pool', pool, utils.parse_stack(snippet))
#assert that properties contain cookie_name property with None value
persistence = resource.properties['vip']['session_persistence']
self.assertIn('cookie_name', persistence)
self.assertIsNone(persistence['cookie_name'])
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE), resource.state)
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_pool()
neutronclient.Client.delete_vip('xyz')
neutronclient.Client.show_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
neutronclient.Client.delete_pool('5678')
neutronclient.Client.show_pool('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
neutronclient.Client.delete_pool('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_vip_failed(self):
neutronclient.Client.delete_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=400))
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
neutronclient.Client.delete_pool('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=400))
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_pool()
neutronclient.Client.show_pool('5678').MultipleTimes(
).AndReturn(
{'pool': {'admin_state_up': True, 'lb_method': 'ROUND_ROBIN'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual('ROUND_ROBIN', rsrc.FnGetAtt('lb_method'))
self.m.VerifyAll()
def test_vip_attribute(self):
rsrc = self.create_pool()
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'address': '10.0.0.3', 'name': 'xyz'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual({'address': '10.0.0.3', 'name': 'xyz'},
rsrc.FnGetAtt('vip'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'net_id')
self.assertEqual(
'The Referenced Attribute (pool net_id) is incorrect.',
str(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_pool()
neutronclient.Client.update_pool(
'5678', {'pool': {'admin_state_up': False}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['admin_state_up'] = False
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
def test_update_monitors(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.associate_health_monitor(
'5678', {'health_monitor': {'id': 'mon123'}})
neutronclient.Client.associate_health_monitor(
'5678', {'health_monitor': {'id': 'mon456'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
neutronclient.Client.disassociate_health_monitor(
'5678', {'health_monitor': {'id': 'mon456'}})
neutronclient.Client.associate_health_monitor(
'5678', {'health_monitor': {'id': 'mon789'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
snippet['Resources']['pool']['Properties']['monitors'] = [
'mon123', 'mon456']
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['monitors'] = ['mon123', 'mon789']
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
class PoolMemberTest(HeatTestCase):
def setUp(self):
super(PoolMemberTest, self).setUp()
self.fc = nova_fakes.FakeClient()
self.m.StubOutWithMock(neutronclient.Client, 'create_member')
self.m.StubOutWithMock(neutronclient.Client, 'delete_member')
self.m.StubOutWithMock(neutronclient.Client, 'update_member')
self.m.StubOutWithMock(neutronclient.Client, 'show_member')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_member(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'address': '1.2.3.4', 'admin_state_up': True}}
).AndReturn({'member': {'id': 'member5678'}})
snippet = template_format.parse(member_template)
stack = utils.parse_stack(snippet)
return loadbalancer.PoolMember(
'member', snippet['Resources']['member'], stack)
def test_create(self):
rsrc = self.create_member()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('member5678', rsrc.resource_id)
self.m.VerifyAll()
def test_create_optional_parameters(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'weight': 100, 'admin_state_up': False,
'address': '1.2.3.4'}}
).AndReturn({'member': {'id': 'member5678'}})
snippet = template_format.parse(member_template)
snippet['Resources']['member']['Properties']['admin_state_up'] = False
snippet['Resources']['member']['Properties']['weight'] = 100
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.PoolMember(
'member', snippet['Resources']['member'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('member5678', rsrc.resource_id)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_member()
neutronclient.Client.show_member('member5678').MultipleTimes(
).AndReturn(
{'member': {'admin_state_up': True, 'weight': 5}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual(5, rsrc.FnGetAtt('weight'))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_member()
neutronclient.Client.update_member(
'member5678', {'member': {'pool_id': 'pool456'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['pool_id'] = 'pool456'
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_member()
neutronclient.Client.delete_member(u'member5678')
neutronclient.Client.show_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_missing_member(self):
rsrc = self.create_member()
neutronclient.Client.delete_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
class LoadBalancerTest(HeatTestCase):
def setUp(self):
super(LoadBalancerTest, self).setUp()
self.fc = nova_fakes.FakeClient()
self.m.StubOutWithMock(neutronclient.Client, 'create_member')
self.m.StubOutWithMock(neutronclient.Client, 'delete_member')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
utils.setup_dummy_db()
def create_load_balancer(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
clients.OpenStackClients.nova("compute").MultipleTimes().AndReturn(
self.fc)
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'address': '1.2.3.4'}}
).AndReturn({'member': {'id': 'member5678'}})
snippet = template_format.parse(lb_template)
stack = utils.parse_stack(snippet)
return loadbalancer.LoadBalancer(
'lb', snippet['Resources']['lb'], stack)
def test_create(self):
rsrc = self.create_load_balancer()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678')
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'address': '4.5.6.7'}}
).AndReturn({'member': {'id': 'memberxyz'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['members'] = ['5678']
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
def test_update_missing_member(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['members'] = []
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_missing_member(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
|
ntt-sic/heat
|
heat/tests/test_neutron_loadbalancer.py
|
Python
|
apache-2.0
| 34,298 | 0.000087 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.context_processors import csrf
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import CsrfViewMiddleware, CSRF_KEY_LENGTH
from django.template import RequestContext, Template
from django.test import TestCase
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token, ensure_csrf_cookie
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content=u"""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(TestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = "<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
Check that if the token is longer than expected, it is ignored and
a new token is created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertTrue('Cookie' in resp2.get('Vary',''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertEqual(None, req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
self.assertEqual(u"", resp.content)
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = ""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self.assertNotEqual(u"", resp.content)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertNotEqual(None, req2)
self.assertEqual(403, req2.status_code)
def test_https_good_referer(self):
"""
Test that a POST HTTPS request with a good referer is accepted
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_https_good_referer_2(self):
"""
Test that a POST HTTPS request with a good referer is accepted
where the referer contains no trailing slash
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_ensures_csrf_cookie_no_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with no middleware
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertTrue('Cookie' in resp.get('Vary',''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with the middleware enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertTrue('Cookie' in resp2.get('Vary',''))
|
adrianholovaty/django
|
tests/regressiontests/csrf_tests/tests.py
|
Python
|
bsd-3-clause
| 13,454 | 0.002155 |
the_count = [1, 2, 3, 4, 5]
fruits = ['apple', 'oranges', 'pears', 'apricots',]
change = [1, 'pennies', 2, 'dimes', 3, 'quarters',]
#this first kind of for-loop goes through a list
for number in the_count:
print("This is count %d" % number)
# same as above
for fruit in fruits:
print("A fruit of type: %s" % fruit)
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print("I got %r " % i)
# we can alse build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0,6):
print("Adding %d to the list." % i)
# append is a function that lists understand
elements.append(i)
# now we can print them out too
for i in elements:
print("Element was: %d" % i)
|
sunrin92/LearnPython
|
1-lpthw/ex32.py
|
Python
|
mit
| 812 | 0.004926 |
from pydub import *
class AudioMerger:
voice_tags = ["one", "two", "three", "four", "five", "ten", "RUN", "relax", "completed"]
def __init__(self, music):
self.music = music
self.additionalGain = 8
self.voices={}
for voice in self.voice_tags:
sound = AudioSegment.from_file('voices/' + voice + '.wav')
sound += self.additionalGain
self.voices[voice] = sound
def addCountdown(self, startTime, isRun = True):
for i in range(1, 6):
voice = self.voices[self.voice_tags[i - 1]]
self.music = self.music.overlay(voice, position = (startTime - i) * 1000)
self.music = self.music.overlay(self.voices["ten"], position = (startTime - 10) * 1000)
voice = self.voices["RUN" if isRun else "relax"]
self.music = self.music.overlay(voice, position = startTime * 1000)
def addCompleted(self, startTimeSec):
self.music = self.music.overlay(self.voices["completed"], position = (startTimeSec * 1000))
def exportMusic(self, fname):
self.music.export(fname + ".mp3", format="mp3")
|
gyimothilaszlo/interval-music-maker
|
AudioMerger.py
|
Python
|
mit
| 1,005 | 0.034826 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-03 16:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0003_book_owner'),
]
operations = [
migrations.RenameModel(
old_name='Book',
new_name='BookItem',
),
]
|
pankajlal/prabandh
|
books/migrations/0004_auto_20160703_2143.py
|
Python
|
apache-2.0
| 384 | 0 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from testtools import matchers
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesGetTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(VolumesGetTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesGetTestJSON, cls).setup_clients()
cls.client = cls.volumes_extensions_client
@test.idempotent_id('f10f25eb-9775-4d9d-9cbe-1cf54dae9d5f')
def test_volume_create_get_delete(self):
# CREATE, GET, DELETE Volume
volume = None
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
# Create volume
volume = self.client.create_volume(display_name=v_name,
metadata=metadata)
self.addCleanup(self.delete_volume, volume['id'])
self.assertIn('id', volume)
self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
# Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
# GET Volume
fetched_volume = self.client.show_volume(volume['id'])
# Verification of details of fetched Volume
self.assertEqual(v_name,
fetched_volume['displayName'],
'The fetched Volume is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume is different '
'from the created Volume')
self.assertThat(fetched_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
|
akash1808/tempest
|
tempest/api/compute/volumes/test_volumes_get.py
|
Python
|
apache-2.0
| 3,025 | 0 |
#!/usr/bin/python
#
# OpenStack Heat Plugin for interfacing with VMware Big Data Extensions
#
# Chris Mutchler - chris@virtualelephant.com
# http://www.VirtualElephant.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import base64
import requests
import subprocess
import pyVmomi
from pyVim import connect
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vmodl, vim
from heat.engine import constraints, properties, resource
from heat.openstack.common import log as logging
from neutronclient.neutron import client
logger = logging.getLogger(__name__)
class BigDataExtensions(resource.Resource):
PROPERTIES = (
BDE_ENDPOINT, VCM_SERVER, USERNAME, PASSWORD,
CLUSTER_NAME, CLUSTER_TYPE, NETWORK, CLUSTER_PASSWORD, CLUSTER_RP,
VIO_CONFIG, BDE_CONFIG, SECURITY_GROUP, SUBNET
) = (
'bde_endpoint', 'vcm_server', 'username', 'password',
'cluster_name', 'cluster_type', 'network', 'cluster_password', 'cluster_rp',
'vio_config', 'bde_config', 'security_group', 'subnet'
)
properties_schema = {
BDE_ENDPOINT: properties.Schema(
properties.Schema.STRING,
required=True,
default='bde.localdomain'
),
VCM_SERVER: properties.Schema(
properties.Schema.STRING,
required=True,
default='vcenter.localdomain'
),
USERNAME: properties.Schema(
properties.Schema.STRING,
required=True,
default='administrator@vsphere.local'
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
required=True,
default='password'
),
CLUSTER_NAME: properties.Schema(
properties.Schema.STRING,
required=True
),
CLUSTER_TYPE: properties.Schema(
properties.Schema.STRING,
required=True
),
NETWORK: properties.Schema(
properties.Schema.STRING,
required=True
),
CLUSTER_PASSWORD: properties.Schema(
properties.Schema.STRING,
required=False
),
CLUSTER_RP: properties.Schema(
properties.Schema.STRING,
required=True,
default='openstackRP'
),
VIO_CONFIG: properties.Schema(
properties.Schema.STRING,
required=True,
default='/usr/local/bin/etc/vio.config'
),
BDE_CONFIG: properties.Schema(
properties.Schema.STRING,
required=False,
default='/usr/local/bin/etc/bde.config'
),
SECURITY_GROUP: properties.Schema(
properties.Schema.STRING,
required=False,
default='9d3ecec8-e0e3-4088-8c71-8c35cd67dd8b'
),
SUBNET: properties.Schema(
properties.Schema.STRING,
required=True
)
}
def _open_connection(self):
bde_server = self.properties.get(self.BDE_ENDPOINT)
bde_user = self.properties.get(self.USERNAME)
bde_pass = self.properties.get(self.PASSWORD)
header = {'content-type': 'application/x-www-form-urlencoded'}
prefix = 'https://'
port = ':8443'
auth_string = "/serengeti/j_spring_security_check"
data = 'j_username=' + bde_user + '&j_password=' + bde_pass
s = requests.session()
url = prefix + bde_server + port + auth_string
r = s.post(url, data, headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Authentication status code %s") % r.json)
return s
def _close_connection(self):
bde_server = self.properties.get(self.BDE_ENDPOINT)
header = {'content-type': 'application/x-www-form-urlencoded'}
url = 'https://' + bde_server + ':8443/serengeti/j_spring_security_logout'
s = requests.session()
r = s.post(url, headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Log out status code %s") % r.json)
return
def _create_nsx_ports(self):
# Load VIO environment variables from /usr/local/etc/vio.config
in_file = "/usr/local/etc/vio.config"
f = open(in_file, "ro")
for line in f:
if "OS_AUTH_URL" in line:
trash, os_auth_url = map(str, line.split("="))
os_auth_url = os_auth_url.rstrip('\n')
logger.info(_("VirtualElephant::VMware::BDE - DEBUG os_auth_url %s") % os_auth_url)
elif "OS_TENANT_ID" in line:
trash, os_tenant_id = map(str,line.split("="))
os_tenant_id = os_tenant_id.rstrip('\n')
elif "OS_TENANT_NAME" in line:
trash, os_tenant_name = map(str, line.split("="))
os_tenant_name = os_tenant_name.rstrip('\n')
elif "OS_USERNAME" in line:
trash, os_username = map(str, line.split("="))
os_username = os_username.rstrip('\n')
elif "OS_PASSWORD" in line:
trash, os_password = map(str, line.split("="))
os_password = os_password.rstrip('\n')
elif "OS_URL" in line:
trash, os_url = map(str, line.split("="))
os_url = os_url.rstrip('\n')
elif "OS_TOKEN" in line:
trash, os_token = map(str, line.split("="))
os_token = os_token.rstrip('\n')
d = {}
d['username'] = os_username
d['password'] = os_password
d['auth_url'] = os_auth_url
d['tenant_name'] = os_tenant_name
d['token'] = os_token
d['url'] = os_url
logger.info(_("VirtualElephant::VMware::BDE - Loaded VIO credentials - %s") % d)
# Using BDE API and vSphere API return the MAC address
# for the virtual machines created by BDE.
bde_server = self.properties.get(self.BDE_ENDPOINT)
vcm_server = self.properties.get(self.VCM_SERVER)
admin_user = self.properties.get(self.USERNAME)
admin_pass = self.properties.get(self.PASSWORD)
cluster_name = self.properties.get(self.CLUSTER_NAME)
network_id = self.properties.get(self.NETWORK)
security_group = self.properties.get(self.SECURITY_GROUP)
prefix = 'https://'
port = ':8443'
logger.info(_("VirtualElephant::VMware::BDE - Creating NSX ports for network %s") % network_id)
# Get the node names for the cluster from BDE
curr = self._open_connection()
header = {'content-type': 'application/json'}
api_call = '/serengeti/api/cluster/' + cluster_name
url = prefix + bde_server + port + api_call
r = curr.get(url, headers=header, verify=False)
raw_json = json.loads(r.text)
cluster_data = raw_json["nodeGroups"]
# Open connect to the vSphere API
si = SmartConnect(host=vcm_server, user=admin_user, pwd=admin_pass, port=443)
search_index = si.content.searchIndex
root_folder = si.content.rootFolder
for ng in cluster_data:
nodes = ng["instances"]
for node in nodes:
logger.info(_("VirtualElephant::VMware::BDE - Creating NSX port for %s") % node.get("name"))
vm_name = node.get("name")
vm_moId = node.get("moId")
port_name = vm_name + "-port0"
# moId is not in format we need to match
(x,y,z) = vm_moId.split(":")
vm_moId = "'vim." + y + ":" + z + "'"
# Go through each DC one at a time, in case there are multiple in vCenter
for dc in root_folder.childEntity:
content = si.content
objView = content.viewManager.CreateContainerView(dc, [vim.VirtualMachine], True)
vm_list = objView.view
objView.Destroy()
for instance in vm_list:
# convert object to string so we can search
i = str(instance.summary.vm)
if vm_moId in i:
# Matched the VM in BDE and vCenter
logger.info(_("VirtualElephant::VMware::BDE - Match found for BDE node %s") % instance)
for device in instance.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
mac_address = str(device.macAddress)
logger.info(_("VirtualElephant::VMware::BDE - Found MAC address %s") % mac_address)
# If the node is already trying to get an IP address,
# then a powercycle is required.
#logger.info(_("VirtualElephant::VMware::BDE - Powercycling the node %s") % node.get("name"))
#if instance.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
# task = instance.PowerOff()
# while task.info.state not in [vim.TaskInfo.State.success,
# vim.TaskInfo.State.error]:
# logger.info(_("VirtualElephant::VMware::BDE - Waiting for node power off %s") % node.get("name"))
# time.sleep(5)
# task = instance.PowerOn()
# while task.info.state not in [vim.TaskInfo.State.success,
# vim.TaskInfo.State.error]:
# logger.info(_("VirtualElephant::VMware::BDE - Waiting for node power on %s") % node.get("name"))
# time.sleep(5)
# Create a new port through Neutron
neutron = client.Client('2.0',
username=os_username,
password=os_password,
auth_url=os_auth_url,
tenant_name=os_tenant_name,
endpoint_url=os_url,
token=os_token)
port_info = {
"port": {
"admin_state_up": True,
"device_id": vm_name,
"name": port_name,
"mac_address": mac_address,
"network_id": network_id
}
}
logger.info(_("VirtualElephant::VMware::BDE - Neutron port string %s") % port_info)
response = neutron.create_port(body=port_info)
logger.info(_("VirtualElephant::VMware::BDE - NSX port creation response - %s") % response)
return
def handle_create(self):
# REST API call to create a new VMware BDE cluster
bde_server = self.properties.get(self.BDE_ENDPOINT)
vcm_server = self.properties.get(self.VCM_SERVER)
bde_user = self.properties.get(self.USERNAME)
bde_pass = self.properties.get(self.PASSWORD)
distro = self.properties.get(self.CLUSTER_TYPE)
clusterName = self.properties.get(self.CLUSTER_NAME)
network = self.properties.get(self.NETWORK)
rp = self.properties.get(self.CLUSTER_RP)
prefix = 'https://'
port = ':8443'
# hack because of Heat sends call before NSX network is created/assigned
#time.sleep(60)
# determine actual NSX portgroup created
# hack - regex in Python is not a strength
mob_string = '/mob/?moid=datacenter-2'
curl_cmd = 'curl -k -u ' + bde_user + ':' + bde_pass + ' ' + prefix + vcm_server + mob_string
grep_cmd = " | grep -oP '(?<=\(vxw).*(?=" + network + "\))' | grep -oE '[^\(]+$'"
awk_cmd = " | awk '{print $0 \"" + network + "\"}'"
full_cmd = curl_cmd + grep_cmd + awk_cmd
p = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, shell=True)
(net_uid, err) = p.communicate()
# Check to see if network_id is as we expect it
if 'vxw' in net_uid:
network_id = net_uid
else:
network_id = "vxw" + net_uid
network_id = network_id.rstrip('\n')
# Authenticate in a requests.session to the BDE server
curr = self._open_connection()
# Should check to see if network already exists as available network
# This logs a big fat error message in /opt/serengeti/logs/serengeti.log
# when the network doesn't exist.
header = {'content-type': 'application/json'}
api_call = '/serengeti/api/network/' + network
url = prefix + bde_server + port + api_call
r = curr.get(url, headers=header, verify=False)
# Add new network to BDE as an available network if check fails
payload = {"name" : network, "portGroup" : network_id, "isDhcp" : "true"}
api_call = '/serengeti/api/networks'
url = prefix + bde_server + port + api_call
r = curr.post(url, data=json.dumps(payload), headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Network creation status code %s") % r.json)
# Send the create cluster REST API call
payload = {"name": clusterName, "distro": distro, "rpNames": [rp], "networkConfig": { "MGT_NETWORK": [network]}}
api_call = '/serengeti/api/clusters'
url = prefix + bde_server + port + api_call
r = curr.post(url, data=json.dumps(payload), headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Create cluster status code %s") % r.json)
# Arbitrary sleep value to allow for the nodes to be cloned
sleep = 180
logger.info(_("VirtualElephant::VMware::BDE - Sleeping for %s seconds BDE to create nodes") % sleep)
time.sleep(sleep)
# Create ports for the BDE nodes on the NSX logical router
nsx = self._create_nsx_ports()
term = self._close_connection()
return
def handle_suspend(self):
# REST API call to shutdown an existing VMware BDE cluster
bde_server = self.properties.get(self.BDE_ENDPOINT)
bde_user = self.properties.get(self.USERNAME)
bde_pass = self.properties.get(self.PASSWORD)
name = self.properties.get(self.CLUSTER_NAME)
prefix = 'https://'
port = ':8443'
state = 'stop'
curr = self._open_connection()
header = {'content-type': 'application/json'}
api_call = '/serengeti/api/cluster/' + name + '?state=' + state
url = prefix + bde_server + port + api_call
r = curr.post(url, headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Stop cluster status code %s") % r.json)
term = self._close_connection()
return
def handle_resume(self):
# REST API call to startup an existing VMware BDE cluster
bde_server = self.properties.get(self.BDE_ENDPOINT)
bde_user = self.properties.get(self.USERNAME)
bde_pass = self.properties.get(self.PASSWORD)
name = self.properties.get(self.CLUSTER_NAME)
prefix = 'https://'
port = ':8443'
state = 'start'
curr = self._open_connection()
header = {'content-type': 'application/json'}
api_call = '/serengeti/api/cluster/' + name + '?state=' + state
url = prefix + bde_server + port + api_call
r = curr.post(url, headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Start cluster status code %s") % r.json)
term = self._close_connection()
return
def handle_delete(self):
# REST API call to delete an existing VMware BDE cluster
bde_server = self.properties.get(self.BDE_ENDPOINT)
bde_user = self.properties.get(self.USERNAME)
bde_pass = self.properties.get(self.PASSWORD)
name = self.properties.get(self.CLUSTER_NAME)
prefix = 'https://'
port = ':8443'
curr = self._open_connection()
header = {'content-type': 'application/json'}
api_call = '/serengeti/api/cluster/' + name
url = prefix + bde_server + port + api_call
r = curr.delete(url, headers=header, verify=False)
logger.info(_("VirtualElephant::VMware::BDE - Delete cluster status code %s") % r.json)
# Need to delete the NSX ports for clean-up
term = self._close_connection()
return
def resource_mapping():
return { 'VirtualElephant::VMware::BDE': BigDataExtensions }
|
virtualelephant/openstack-heat-bde-plugin
|
plugin/BigDataExtensions.py
|
Python
|
apache-2.0
| 17,510 | 0.003198 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.