text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from adm.application.models import *
class Migration:
def forwards(self, orm):
# Adding field 'SubmissionInfo.doc_reviewed_at'
db.add_column('application_submissioninfo', 'doc_reviewed_at', orm['application.submissioninfo:doc_reviewed_at'])
db.create_index('application_submissioninfo',['doc_reviewed_at'])
def backwards(self, orm):
# Deleting field 'SubmissionInfo.doc_reviewed_at'
db.delete_column('application_submissioninfo', 'doc_reviewed_at')
db.delete_index('application_submissioninfo',['doc_reviewed_at'])
models = {
'application.address': {
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'road': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'village_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'village_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'application.applicant': {
'activation_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'doc_submission_method': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'has_logged_in': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_related_model': ('IntegerListField', [], {'default': 'None'}),
'hashed_password': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'application.applicantaddress': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'address'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'contact_address': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'contact_owner'", 'unique': 'True', 'to': "orm['application.Address']"}),
'home_address': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home_owner'", 'unique': 'True', 'to': "orm['application.Address']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'application.education': {
'anet': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'education'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'gat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gat_date': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gat_score_set'", 'null': 'True', 'to': "orm['application.GPExamDate']"}),
'gpax': ('django.db.models.fields.FloatField', [], {}),
'has_graduated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pat1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pat1_date': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pat1_score_set'", 'null': 'True', 'to': "orm['application.GPExamDate']"}),
'pat3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pat3_date': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pat3_score_set'", 'null': 'True', 'to': "orm['application.GPExamDate']"}),
'school_city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'school_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'school_province': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'uses_gat_score': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'application.gpexamdate': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month_year': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'application.major': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'application.majorpreference': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'preference'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'majors': ('IntegerListField', [], {})
},
'application.passwordrequestlog': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'password_request_log'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_request_at': ('django.db.models.fields.DateTimeField', [], {}),
'num_requested_today': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'application.personalinfo': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'personal_info'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'ethnicity': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'national_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '35'})
},
'application.registration': {
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'registrations'", 'to': "orm['application.Applicant']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'registered_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'application.submissioninfo': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'submission_info'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'applicantion_id': ('django.db.models.fields.AutoField', [], {'unique': 'True', 'primary_key': 'True'}),
'doc_received_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'doc_reviewed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'doc_reviewed_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_been_reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['application']
|
jittat/ku-eng-direct-admission
|
application/migrations/0030_add_doc_reviewed_at_to_submission_info.py
|
Python
|
agpl-3.0
| 9,283 | 0.009049 |
#!/usr/bin/env python
""" project creation and deletion check for v3 """
# We just want to see any exception that happens
# don't want the script to die under any cicumstances
# script must try to clean itself up
# pylint: disable=broad-except
# pylint: disable=invalid-name
# pylint: disable=import-error
import argparse
import time
import logging
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ocutil = OCUtil()
commandDelay = 10 # seconds
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
oc_time = time.time()
oc_result = ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - oc_time))
return oc_result
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='OpenShift project creation and deletion test')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--namespace', default="ops-project-operation-check",
help='namespace (be careful of using existing namespaces)')
return parser.parse_args()
def send_metrics(status_code_create, status_code_delete):
""" send data to MetricSender"""
logger.debug("send_metrics()")
ms_time = time.time()
ms = MetricSender()
logger.info("Send data to MetricSender")
# 1 means create and delete the project failed
ms.add_metric({'openshift.master.project.create': status_code_create})
ms.add_metric({'openshift.master.project.delete': status_code_delete})
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def check_project(config):
""" check create and delete project """
logger.info('check_project()')
logger.debug(config)
project = None
try:
project = runOCcmd("get project {}".format(config.namespace))
logger.debug(project)
except Exception:
pass # don't want exception if project not found
if project:
project_exist = 1 # project exists
else:
project_exist = 0 # project doest not exists
return project_exist
def create_project(config):
" create the project "
try:
runOCcmd("new-project {}".format(config.namespace), base_cmd='oc adm')
time.sleep(commandDelay)
except Exception:
logger.exception('error creating new project')
def delete_project(config):
" delete the project "
try:
runOCcmd("delete project {}".format(config.namespace), base_cmd='oc')
time.sleep(commandDelay)
except Exception:
logger.exception('error delete project')
def main():
""" check the project operation status """
logger.debug("main()")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
ocutil.namespace = args.namespace
project_exists = check_project(args)
# project does not exists.
delete_project_code = 0
# delete the project first if it's already there
if project_exists == 1:
# the project already exists, try to delete it first.
delete_project(args)
delete_project_code = check_project(args)
if delete_project_code == 1:
# 1 means project deletion failed, the project still exists
# give the deletion second chance. 10 more seconds to check the
# teminating status project
delete_project(args)
delete_project_code = check_project(args)
if delete_project_code == 1:
logger.info('project deletion failed in 20s')
# start the test
logger.info("project does not exists, going to create it")
create_project(args)
create_project_code = check_project(args)
if create_project_code == 0:
# 0 means project creation failed, no project was created
logger.info('project creation failed')
else:
# project creation succeed, then delete the project
delete_project(args)
delete_project_code = check_project(args)
if delete_project_code == 1:
# 1 means project deletion failed, the project still exists
# give the deletion second chance. 10 more seconds to check the
# teminating status project
delete_project(args)
delete_project_code = check_project(args)
if delete_project_code == 1:
logger.info('project deletion failed in 20s')
else:
delete_project_code = 0
#logger.info("{} {}".format(create_project_code, delete_project_code))
if create_project_code == 1 and delete_project_code == 0:
logger.info('creation and deletion succeed, no data was sent to zagg')
send_metrics(create_project_code, delete_project_code)
if __name__ == "__main__":
main()
|
blrm/openshift-tools
|
scripts/monitoring/cron-send-project-operation.py
|
Python
|
apache-2.0
| 5,158 | 0.003296 |
from urllib import urlencode
from django import forms, template
from django.contrib.auth.admin import csrf_protect_m
from django.contrib.admin import helpers
from django.contrib.admin.util import unquote
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.forms.formsets import all_valid
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
from django.contrib.admin import ModelAdmin
from filetransfers.api import prepare_upload
class FiletransferAdmin(ModelAdmin):
"""
Adds some things to the model admin that makes working with
filetransfers a lot easier.
"""
add_form_template = "filetransfers/change_form.html"
change_form_template = "filetransfers/change_form.html"
def return_invalid_form(self, request):
"""
Google's blobstore upload api requires a redirect from the
view that processes the upload no matter what. To appease the
api but still show the user a proper failed form error message,
we'll return a get to the page we're on with the query string
of the posted data.
On the receiving side, if the view is a get, we check for the
failed_validation parameter to know to pass real data to the
form so validation will trigger when the user sees the form again.
"""
form_data = dict(request.POST.items(), failed_validation=True)
return HttpResponseRedirect('?' + urlencode(form_data))
def has_file_field(self, form, formsets):
# when using AppEngine, FileField is different...
# if you want to use the filetransfers in the admin, you'll
# be subclassing this model, so we're just returning True for
# not... the real code should be what is commented out below.
return True
# for field in form.fields:
# if isinstance(field, forms.FileField):
# return True
# for formset in formsets:
# for form in formset.forms:
# for field in form.fields:
# if isinstance(field, forms.FileField):
# return True
# return False
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
form_validated = True
else:
form_validated = False
new_object = self.model()
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new="_saveasnew" in request.POST,
prefix=prefix, queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=False)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=False)
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
return self.return_invalid_form(request)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
data = request.GET if 'failed_validation' in initial else None
form = ModelForm(data, initial=initial)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request),
self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data, instance=self.model(), prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
self.prepopulated_fields, self.get_readonly_fields(request),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
readonly = list(inline.get_readonly_fields(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': "_popup" in request.REQUEST,
'show_delete': False,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
has_file_field = self.has_file_field(form, formsets)
return self.render_change_form(request, context, form_url=form_url, add=True, has_file_field=has_file_field)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url='../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, new_object),
self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=new_object, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
return self.return_invalid_form(request)
else:
data = request.GET if 'failed_validation' in request.GET else None
form = ModelForm(data, instance=obj)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, obj), self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data, instance=obj, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.REQUEST,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
has_file_field = self.has_file_field(form, formsets)
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj, has_file_field=has_file_field)
def get_filetransfer_data(self, upload_data):
input_element = '<input type="hidden" name="%s" value="%s" />'
upload_data = (input_element % (escape(name), escape(value)) \
for name, value in upload_data.items())
return mark_safe(''.join(upload_data))
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None, has_file_field=True):
opts = self.model._meta
app_label = opts.app_label
ordered_objects = opts.get_ordered_objects()
if has_file_field:
if not form_url:
form_url = request.path
form_url, upload_data = prepare_upload(request, form_url)
context.update({
'file_upload_data': self.get_filetransfer_data(upload_data),
})
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': has_file_field,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'ordered_objects': ordered_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'root_path': self.admin_site.root_path,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, context_instance=context_instance)
|
thomasgilgenast/spqr-nonrel
|
filetransfers/admin.py
|
Python
|
bsd-3-clause
| 13,542 | 0.001625 |
#!/usr/bin/env python
#
# $File: IfElseFixed.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(size=1000, loci=1)
verbose = True
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.5, 0.5]),
],
matingScheme=sim.RandomMating(),
postOps=sim.IfElse(verbose,
ifOps=[
sim.Stat(alleleFreq=0),
sim.PyEval(r"'Gen: %3d, allele freq: %.3f\n' % (gen, alleleFreq[0][1])",
step=5)
],
begin=10),
gen = 30
)
|
BoPeng/simuPOP
|
docs/IfElseFixed.py
|
Python
|
gpl-2.0
| 1,537 | 0.003904 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('builds', '0010_merge'),
]
operations = [
migrations.AlterField(
model_name='project',
name='approved',
field=models.BooleanField(default=False, db_index=True),
preserve_default=True,
),
]
|
frigg/frigg-hq
|
frigg/builds/migrations/0011_auto_20150223_0442.py
|
Python
|
mit
| 444 | 0 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyQtawesome(PythonPackage):
"""FontAwesome icons in PyQt and PySide applications"""
homepage = "https://github.com/spyder-ide/qtawesome"
url = "https://pypi.io/packages/source/Q/QtAwesome/QtAwesome-0.4.1.tar.gz"
version('0.4.1', 'bf93df612a31f3b501d751fc994c1b05')
version('0.3.3', '830677aa6ca4e7014e228147475183d3')
depends_on('py-setuptools', type='build')
depends_on('py-qtpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/py-qtawesome/package.py
|
Python
|
lgpl-2.1
| 1,760 | 0 |
"""
========================================
Release Highlights for scikit-learn 0.22
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.22, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <changes_0_22>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# New plotting API
# ----------------
#
# A new plotting API is available for creating visualizations. This new API
# allows for quickly adjusting the visuals of a plot without involving any
# recomputation. It is also possible to add different plots to the same
# figure. The following example illustrates :class:`~metrics.plot_roc_curve`,
# but other plots utilities are supported like
# :class:`~inspection.plot_partial_dependence`,
# :class:`~metrics.plot_precision_recall_curve`, and
# :class:`~metrics.plot_confusion_matrix`. Read more about this new API in the
# :ref:`User Guide <visualizations>`.
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
svc = SVC(random_state=42)
svc.fit(X_train, y_train)
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X_train, y_train)
svc_disp = plot_roc_curve(svc, X_test, y_test)
rfc_disp = plot_roc_curve(rfc, X_test, y_test, ax=svc_disp.ax_)
rfc_disp.figure_.suptitle("ROC curve comparison")
plt.show()
# %%
# Stacking Classifier and Regressor
# ---------------------------------
# :class:`~ensemble.StackingClassifier` and
# :class:`~ensemble.StackingRegressor`
# allow you to have a stack of estimators with a final classifier or
# a regressor.
# Stacked generalization consists in stacking the output of individual
# estimators and use a classifier to compute the final prediction. Stacking
# allows to use the strength of each individual estimator by using their output
# as input of a final estimator.
# Base estimators are fitted on the full ``X`` while
# the final estimator is trained using cross-validated predictions of the
# base estimators using ``cross_val_predict``.
#
# Read more in the :ref:`User Guide <stacking>`.
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True)
estimators = [
('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
('svr', make_pipeline(StandardScaler(),
LinearSVC(random_state=42)))
]
clf = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression()
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=42
)
clf.fit(X_train, y_train).score(X_test, y_test)
# %%
# Permutation-based feature importance
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X, y = make_classification(random_state=0, n_features=5, n_informative=3)
feature_names = np.array([f'x_{i}' for i in range(X.shape[1])])
rf = RandomForestClassifier(random_state=0).fit(X, y)
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0,
n_jobs=-1)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=feature_names[sorted_idx])
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Native support for missing values for gradient boosting
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
from sklearn.ensemble import HistGradientBoostingClassifier
X = np.array([0, 1, 2, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
gbdt = HistGradientBoostingClassifier(min_samples_leaf=1).fit(X, y)
print(gbdt.predict(X))
# %%
# Precomputed sparse nearest neighbors graph
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
from sklearn.neighbors import KNeighborsTransformer
from sklearn.manifold import Isomap
from sklearn.pipeline import make_pipeline
X, y = make_classification(random_state=0)
with TemporaryDirectory(prefix="sklearn_cache_") as tmpdir:
estimator = make_pipeline(
KNeighborsTransformer(n_neighbors=10, mode='distance'),
Isomap(n_neighbors=10, metric='precomputed'),
memory=tmpdir)
estimator.fit(X)
# We can decrease the number of neighbors and the graph will not be
# recomputed.
estimator.set_params(isomap__n_neighbors=5)
estimator.fit(X)
# %%
# KNN Based Imputation
# ------------------------------------
# We now support imputation for completing missing values using k-Nearest
# Neighbors.
#
# Each sample's missing values are imputed using the mean value from
# ``n_neighbors`` nearest neighbors found in the training set. Two samples are
# close if the features that neither is missing are close.
# By default, a euclidean distance metric
# that supports missing values,
# :func:`~metrics.nan_euclidean_distances`, is used to find the nearest
# neighbors.
#
# Read more in the :ref:`User Guide <knnimpute>`.
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
print(imputer.fit_transform(X))
# %%
# Tree pruning
# ------------
#
# It is now possible to prune most tree-based estimators once the trees are
# built. The pruning is based on minimal cost-complexity. Read more in the
# :ref:`User Guide <minimal_cost_complexity_pruning>` for details.
X, y = make_classification(random_state=0)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0).fit(X, y)
print("Average number of nodes without pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
rf = RandomForestClassifier(random_state=0, ccp_alpha=0.05).fit(X, y)
print("Average number of nodes with pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
# %%
# Retrieve dataframes from OpenML
# -------------------------------
# :func:`datasets.fetch_openml` can now return pandas dataframe and thus
# properly handle datasets with heterogeneous data:
from sklearn.datasets import fetch_openml
titanic = fetch_openml('titanic', version=1, as_frame=True)
print(titanic.data.head()[['pclass', 'embarked']])
# %%
# Checking scikit-learn compatibility of an estimator
# ---------------------------------------------------
# Developers can check the compatibility of their scikit-learn compatible
# estimators using :func:`~utils.estimator_checks.check_estimator`. For
# instance, the ``check_estimator(LinearSVC())`` passes.
#
# We now provide a ``pytest`` specific decorator which allows ``pytest``
# to run all checks independently and report the checks that are failing.
#
# ..note::
# This entry was slightly updated in version 0.24, where passing classes
# isn't supported anymore: pass instances instead.
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import parametrize_with_checks
@parametrize_with_checks([LogisticRegression(), DecisionTreeRegressor()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
# %%
# ROC AUC now supports multiclass classification
# ----------------------------------------------
# The :func:`roc_auc_score` function can also be used in multi-class
# classification. Two averaging strategies are currently supported: the
# one-vs-one algorithm computes the average of the pairwise ROC AUC scores, and
# the one-vs-rest algorithm computes the average of the ROC AUC scores for each
# class against all other classes. In both cases, the multiclass ROC AUC scores
# are computed from the probability estimates that a sample belongs to a
# particular class according to the model. The OvO and OvR algorithms support
# weighting uniformly (``average='macro'``) and weighting by the prevalence
# (``average='weighted'``).
#
# Read more in the :ref:`User Guide <roc_metrics>`.
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
X, y = make_classification(n_classes=4, n_informative=16)
clf = SVC(decision_function_shape='ovo', probability=True).fit(X, y)
print(roc_auc_score(y, clf.predict_proba(X), multi_class='ovo'))
|
glemaitre/scikit-learn
|
examples/release_highlights/plot_release_highlights_0_22_0.py
|
Python
|
bsd-3-clause
| 10,115 | 0.002472 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest as ut
import espressomd
import espressomd.electrostatics
import espressomd.interactions
from espressomd import drude_helpers
class Drude(ut.TestCase):
@ut.skipIf(not espressomd.has_features("P3M", "THOLE", "LANGEVIN_PER_PARTICLE"), "Test needs P3M, THOLE and LANGEVIN_PER_PARTICLE")
def test(self):
"""
Sets up a BMIM PF6 pair separated in y-direction with fixed cores.
Adds the Drude particles and related features (intramolecular exclusion bonds, Thole screening)
via helper functions.
Calculates the induced dipole moment and the diagonals of the polarization tensor
and compares against reference results, which where reproduced with LAMMPS.
"""
box_l = 50
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.seed = system.cell_system.get_state()['n_nodes'] * [12]
np.random.seed(12)
#Reference Results, reproduced with LAMMPS
#Dipole Moments
ref_mu0_pf6 = [0.00177594, 0.16480996, -0.01605161]
ref_mu0_c1 = [0.00076652, 0.15238767, 0.00135291]
ref_mu0_c2 = [-0.00020222, 0.11084197, 0.00135842]
ref_mu0_c3 = [0.00059177, 0.23949626, -0.05238468]
ref_mu0_bmim = [0.00115606, 0.5027259, -0.04967335]
#Polarisation Tensor diagonals
ref_pol_pf6 = [
4.5535698335873445, 4.7558611769477697, 4.5546580162000554]
ref_pol_bmim = [
13.126868394164262, 14.392582501485913, 16.824150151623762]
#TIMESTEP
fs_to_md_time = 1.0e-2
time_step_fs = 0.5
time_step_ns = time_step_fs * 1e-6
dt = time_step_fs * fs_to_md_time
#COM TEMPERATURE
#Global thermostat temperature, for com and langevin.
#LangevinPerParticle temperature is set to 0 for drude and core to properly account for com forces.
# Like that, langevin thermostat can still be used for non-drude
# particles
SI_temperature = 300.0
gamma_com = 1.0
kb_kjmol = 0.0083145
temperature_com = SI_temperature * kb_kjmol
# COULOMB PREFACTOR (elementary charge)^2 / (4*pi*epsilon_0) in
# Angstrom * kJ/mol
coulomb_prefactor = 1.67101e5 * kb_kjmol
#POLARIZATION
#polarization = 1.0 #In (Angstrom^3)_CGS
# alpha_SI = 4*Pi*eps_0 alpha_CGS;
# 4*Pi*epsilon_0*Angstrom^3/((elementary charge)^2*Angstrom^2*N_A/kJ)
conv_pol_CGS_SI = 7.197586e-4
#alpha = conv_pol_CGS_SI*args.polarization
#DRUDE/TOTAL MASS
#lamoureux03 used values 0.1-0.8 g/mol for drude mass
mass_drude = 0.8
mass_tot = 100.0
mass_core = mass_tot - mass_drude
mass_red_drude = mass_drude * mass_core / mass_tot
#SPRING CONSTANT DRUDE
#Used 1000kcal/mol/A^2 from lamoureux03a table 1 p 3031
k_drude = 4184.0
# in kJ/mol/A^2
T_spring = 2.0 * np.pi * np.sqrt(mass_drude / k_drude)
#T_spring_fs = T_spring/fs_to_md_time
#Period of free oscillation: T_spring = 2Pi/w; w = sqrt(k_d/m_d)
#TEMP DRUDE
# Used T* = 1K from lamoureux03a p 3031 (2) 'Cold drude oscillators
# regime'
SI_temperature_drude = 1.0
temperature_drude = SI_temperature_drude * kb_kjmol
#GAMMA DRUDE
#Thermostat relaxation time should be similar to T_spring
gamma_drude = mass_red_drude / T_spring
system.cell_system.skin = 0.4
system.time_step = dt
#Forcefield
types = {"PF6": 0, "BMIM_C1": 1, "BMIM_C2": 2, "BMIM_C3":
3, "BMIM_COM": 4, "PF6_D": 5, "BMIM_C1_D": 6, "BMIM_C2_D": 7, "BMIM_C3_D": 8}
charges = {"PF6": -0.78, "BMIM_C1": 0.4374,
"BMIM_C2": 0.1578, "BMIM_C3": 0.1848, "BMIM_COM": 0}
polarizations = {"PF6": 4.653, "BMIM_C1":
5.693, "BMIM_C2": 2.103, "BMIM_C3": 7.409}
masses = {"PF6": 144.96, "BMIM_C1": 67.07,
"BMIM_C2": 15.04, "BMIM_C3": 57.12, "BMIM_COM": 0}
masses["BMIM_COM"] = masses["BMIM_C1"] + \
masses["BMIM_C2"] + masses["BMIM_C3"]
box_center = 0.5 * np.array(3 * [box_l])
system.min_global_cut = 3.5
#Place Particles
dmol = 5.0
#Test Anion
pos_pf6 = box_center + np.array([0, dmol, 0])
system.part.add(id=0, type=types["PF6"], pos=pos_pf6, q=charges[
"PF6"], mass=masses["PF6"], fix=[1, 1, 1])
pos_com = box_center - np.array([0, dmol, 0])
system.part.add(id=2, type=types["BMIM_C1"], pos=pos_com + [
0, -0.527, 1.365], q=charges["BMIM_C1"], mass=masses["BMIM_C1"], fix=[1, 1, 1])
system.part.add(id=4, type=types["BMIM_C2"], pos=pos_com + [
0, 1.641, 2.987], q=charges["BMIM_C2"], mass=masses["BMIM_C2"], fix=[1, 1, 1])
system.part.add(id=6, type=types["BMIM_C3"], pos=pos_com + [
0, 0.187, -2.389], q=charges["BMIM_C3"], mass=masses["BMIM_C3"], fix=[1, 1, 1])
system.thermostat.set_langevin(kT=temperature_com, gamma=gamma_com)
p3m = espressomd.electrostatics.P3M(
prefactor=coulomb_prefactor, accuracy=1e-4, mesh=[18, 18, 18], cao=5)
system.actors.add(p3m)
#Drude related Bonds
thermalized_dist_bond = espressomd.interactions.ThermalizedBond(
temp_com=temperature_com, gamma_com=gamma_com, temp_distance=temperature_drude, gamma_distance=gamma_drude, r_cut=1.0)
harmonic_bond = espressomd.interactions.HarmonicBond(
k=k_drude, r_0=0.0, r_cut=1.0)
system.bonded_inter.add(thermalized_dist_bond)
system.bonded_inter.add(harmonic_bond)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
0], 1, types["PF6_D"], polarizations["PF6"], mass_drude, coulomb_prefactor, 2.0)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
2], 3, types["BMIM_C1_D"], polarizations["BMIM_C1"], mass_drude, coulomb_prefactor, 2.0)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
4], 5, types["BMIM_C2_D"], polarizations["BMIM_C2"], mass_drude, coulomb_prefactor, 2.0)
drude_helpers.add_drude_particle_to_core(system, harmonic_bond, thermalized_dist_bond, system.part[
6], 7, types["BMIM_C3_D"], polarizations["BMIM_C3"], mass_drude, coulomb_prefactor, 2.0)
#Setup and add Drude-Core SR exclusion bonds
drude_helpers.setup_and_add_drude_exclusion_bonds(system)
#Setup intramol SR exclusion bonds once
drude_helpers.setup_intramol_exclusion_bonds(
system, [6, 7, 8], [1, 2, 3], [charges["BMIM_C1"], charges["BMIM_C2"], charges["BMIM_C3"]])
#Add bonds per molecule
drude_helpers.add_intramol_exclusion_bonds(
system, [3, 5, 7], [2, 4, 6])
#Thole
drude_helpers.add_all_thole(system)
def dipole_moment(id_core, id_drude):
pc = system.part[id_core]
pd = system.part[id_drude]
v = pd.pos - pc.pos
return pd.q * v
def measure_dipole_moments():
dm_pf6 = []
dm_C1 = []
dm_C2 = []
dm_C3 = []
system.integrator.run(115)
for i in range(100):
system.integrator.run(1)
dm_pf6.append(dipole_moment(0, 1))
dm_C1.append(dipole_moment(2, 3))
dm_C2.append(dipole_moment(4, 5))
dm_C3.append(dipole_moment(6, 7))
dm_pf6_m = np.mean(dm_pf6, axis=0)
dm_C1_m = np.mean(dm_C1, axis=0)
dm_C2_m = np.mean(dm_C2, axis=0)
dm_C3_m = np.mean(dm_C3, axis=0)
dm_sum_bmim = dm_C1_m + dm_C2_m + dm_C3_m
res = dm_pf6_m, dm_C1_m, dm_C2_m, dm_C3_m, dm_sum_bmim
return res
def setElectricField(E):
E = np.array(E)
for p in system.part:
p.ext_force = p.q * E
def calc_pol(mu0, muE, E):
pol = (muE - mu0) / E / conv_pol_CGS_SI
return pol
def measure_pol(Es, dim):
E = [0.0, 0.0, 0.0]
E[dim] = Es
setElectricField(E)
mux_pf6, mux_c1, mux_c2, mux_c3, mux_bmim = measure_dipole_moments(
)
return calc_pol(mu0_pf6[dim], mux_pf6[dim], Es), calc_pol(mu0_bmim[dim], mux_bmim[dim], Es)
mu0_pf6, mu0_c1, mu0_c2, mu0_c3, mu0_bmim = measure_dipole_moments()
eA_to_Debye = 4.8032047
atol = 1e-2
rtol = 1e-2
np.testing.assert_allclose(
ref_mu0_pf6, eA_to_Debye * mu0_pf6, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c1, eA_to_Debye * mu0_c1, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c2, eA_to_Debye * mu0_c2, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c3, eA_to_Debye * mu0_c3, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_bmim, eA_to_Debye * mu0_bmim, atol=atol, rtol=rtol)
pol_pf6 = []
pol_bmim = []
Efield = 96.48536 # = 1 V/A in kJ / (Avogadro Number) / Angstrom / elementary charge
res = measure_pol(Efield, 0)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
res = measure_pol(Efield, 1)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
res = measure_pol(Efield, 2)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
np.testing.assert_allclose(
ref_pol_pf6, pol_pf6, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_pol_bmim, pol_bmim, atol=atol, rtol=rtol)
if __name__ == "__main__":
ut.main()
|
hmenke/espresso
|
testsuite/python/drude.py
|
Python
|
gpl-3.0
| 10,864 | 0.004602 |
from functional_tests import FunctionalTest, ROOT, USERS
import time
from selenium.webdriver.support.ui import WebDriverWait
#element = WebDriverWait(driver, 10).until(lambda driver : driver.find_element_by_id("createFolderCreateBtn"))
class TestRegisterPage (FunctionalTest):
def setUp(self):
self.url = ROOT + '/default/user/register'
get_browser=self.browser.get(self.url)
def test_can_view_register_page(self):
response_code = self.get_response_code(self.url)
self.assertEqual(response_code, 200)
def test_has_right_title(self):
title = self.browser.title
#self.assertEqual(u'Net Decision Making: Registration', title)
self.assertIn('Networked Decision Making', title)
def test_put_values_in_register_form(self):
#first_name = self.browser.find_element_by_name("first_name")
first_name = WebDriverWait(self, 10).until(lambda self : self.browser.find_element_by_name("first_name"))
first_name.send_keys(USERS['USER1'])
last_name = self.browser.find_element_by_name("last_name")
last_name.send_keys(USERS['USER1'])
email = self.browser.find_element_by_name("email")
email.send_keys("user1@user.com")
username = self.browser.find_element_by_name("username")
username.send_keys(USERS['USER1'])
password = self.browser.find_element_by_name("password")
password.send_keys(USERS['PASSWORD1'])
verify_password = self.browser.find_element_by_name("password_two")
verify_password.send_keys(USERS['PASSWORD1'])
register_button = self.browser.find_element_by_css_selector("#submit_record__row input")
register_button.click()
#hopefully to help gae get data sorted for next test
#time.sleep(10)
#element = WebDriverWait(driver, 10).until(lambda driver : driver.find_element_by_id("createFolderCreateBtn"))
body = WebDriverWait(self, 10).until(lambda self : self.browser.find_element_by_tag_name('body'))
#body = self.browser.find_element_by_tag_name('body')
self.assertIn('Welcome user1', body.text)
#self.assertIn('This email', body.text)
|
NewGlobalStrategy/NetDecisionMaking
|
fts/test_0aregister.py
|
Python
|
mit
| 2,323 | 0.021093 |
# DarkCoder
def sum_of_series(first_term, common_diff, num_of_terms):
"""
Find the sum of n terms in an arithmetic progression.
>>> sum_of_series(1, 1, 10)
55.0
>>> sum_of_series(1, 10, 100)
49600.0
"""
sum = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return sum
def main():
print(sum_of_series(1, 1, 10))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
TheAlgorithms/Python
|
maths/sum_of_arithmetic_series.py
|
Python
|
mit
| 482 | 0.002075 |
import operator
import time
# Only the vertex (and its hamming distance is given)
# Need to find all vertices which are within a hamming distance of 2
# That means for each vertex, generate a list of 300 other vertices (24 bits + 23 + 22 + ... + 1)
# which are with hamming distance of 2 (generate vertices with 2 bits different)
# Check if those exist and if they do, merge them with this vertex
# When merge happens, update the union structure. Create the union structure as
# a dictionary of key (vertex in bits) and its parent
# For this reason, can we use the same union structure for both?
# How will we get how many roots are there?
# For each vertex, after the above is over, check root.
# Create a list and add each root to list if not already there.
#
vert_union1 = {} # vertexes and their unique id. Vertexes are the keys
vert_union_parent = {} # vertex id and their parent. Vertex id is from above and is the key
vert_list = [] # just the vertices
roots = []
bits = 0
vert_list_set = set() # this is same as vert_list but much faster to search a set than a list
# set search completes 100x faster since it is hash table based
def return_hamming_1_2_distance(vert):
h_list = []
for x in range(bits):
#print word[0] #get one char of the word
#print word[0:1] #get one char of the word (same as above)
#print word[0:3] #get the first three char
#print word[:3] #get the first three char
#print word[-3:] #get the last three char
#print word[3:] #get all but the three first char
#print word[:-3] #get all but the three last character
if vert[x] == '0':
vert1 = vert[:x] + '1' + vert[x+1:] # flip the first bit
elif vert[x] == '1':
vert1 = vert[:x] + '0' + vert[x+1:] # flip the first bit
h_list.append(vert1) # add to list (this is all the items with one bit flipped)
#print vert
for y in xrange(x+1,bits):
#print "y is %d" %y
vert2 = vert1[:]
if vert1[y] == '0':
vert2 = vert1[:y] + '1' + vert1[y+1:] # flip the second bit
elif vert1[y] == '1':
vert2 = vert1[:y] + '0' + vert1[y+1:] # flip the second bit
h_list.append(vert2) # this is all the items with the second bit also flipped
#print vert2
return h_list
def parent(key):
return vert_union_parent[key]
def root(key):
while (vert_union_parent[key] != key):
key = parent(key)
return vert_union_parent[key]
def union(key1, key2):
root_key2 = root(key2)
vert_union_parent[root_key2] = root(key1)
# Put the root of the first one as the parent of the second one's root
# this avoids the issue of union of two vertices whose parents are different from their roots
# for example b is a's root and c is d's root. When we union a and c, we should not
# make a's parent as c or c's root, because then a loses its link with b
# instead make b point to c's root. That way a's parent will be b and b's will be d
# which will now become the root of b/a and continue to be the root of c thus the cluster
return
if __name__ == '__main__':
i = 0
with open('clustering_big.txt') as f:
no_v, bits = [int(x) for x in f.readline().split()]
for line in f: # read rest of lines
temp = [str(x) for x in line]
temp = [num.strip() for num in temp]
temp = filter(None, temp)
temp = ''.join(map(str,temp))
# temp = temp.rstrip()
# temp1 = temp1.strip()
# temp1 = temp1.lstrip()
#print temp
vert_union1[temp] = i
vert_union_parent[i] = i
vert_list.append(temp) # we dont need vert_list since we replace it with vert_list_set
vert_list_set.add(temp)
i = i+1
#print vert_list
#print vert_union['1000100']
clusters = no_v
vertices_done = 0
print "Done input"
for v in vert_list_set:
# for all the vertices, find all its hamming sets (300 vertices)
# then see if these 300 are also in the vertex list.
# if they are, then union/cluster them together
print vertices_done
vertices_done = vertices_done + 1 # Not required. Just counting up the vertices
#start = time.clock()
h_list = return_hamming_1_2_distance(v) # convert this to do for all vertices
#end = time.clock()
#print end - start
#print "hlist size is %d" %len(h_list)
#start = time.clock()
for x in h_list: # hlist returns all the possible vertices with HD=2 or 1
#start = time.clock()
#if x in vert_list_set: # if x is there in the set, then merge the two
# i = i +1
#end = time.clock()
#print end - start
#print i
if x in vert_list_set: # if x is there in the set, then merge the two
#print vert_union1[x], ",", vert_union1[v]
#print "before ", root(vert_union1[x]), ",", root(vert_union1[v])
if root(vert_union1[x]) != root(vert_union1[v]):
# not required for algo. Just counting down the clusters
clusters = clusters - 1
#print clusters
#start = time.clock()
#print x
union(vert_union1[x],vert_union1[v])
#print "after ", root(vert_union1[x]), ",", root(vert_union1[v])
#end = time.clock()
#print end - start
#print "The roots should be same. Vert root", vert_union1[v], ",", root(vert_union1[v])
#print "The roots should be same. Vert root", vert_union1[x], ",", root(vert_union1[x])
#print "\n"
#end = time.clock()
#print end - start
# Error was happening because here the key is something like "1001000"
# Earlier program it was something like 1:2, 2:3, 3:3, so we could say that the root
# is the root when the key is same as parent. so in the case above 3=3, so it is the root
# Here we cannot do the same. Hence we created two dicts, one with key = 1001000 and value is 1
# and then another dict where key is this value and value is the parent value.
# the second structure is the actual union find structure
#print "bits = %d" %bits
# now find how many clusters are there by going through all vertices and finding unique roots
#print "\n"
#start = time.clock()
print "Done hamming"
for v in vert_list_set:
# Now count the number of roots. We could have done it efficiently in the above loop too
if (root(vert_union1[v])) not in roots:
#print vert_union1[v], ",", root(vert_union1[v])
roots.append(root(vert_union1[v])) # vert_union1 contains the roots (thus clusters)
#end = time.clock()
#print end - start
print "Answer is %d" %len(roots)
print roots
#print "\n\n"
#testing
#bits = 4
#h_list = return_hamming_1_2_distance('1000') # convert this to do for all vertices
#print h_list
#print len(h_list)
|
ajayhk/algorithms
|
greedy/k-clustering-hamming.py
|
Python
|
apache-2.0
| 6,457 | 0.038098 |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# pygtail - a python "port" of logtail2
# Copyright (C) 2011 Brad Greenlee <brad@footle.org>
#
# Derived from logcheck <http://logcheck.org>
# Copyright (C) 2003 Jonathan Middleton <jjm@ixtab.org.uk>
# Copyright (C) 2001 Paul Slootman <paul@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from os import stat
from os.path import exists
import sys
import glob
import string
from optparse import OptionParser
__version__ = '0.2.1'
class Pygtail(object):
"""
Creates an iterable object that returns only unread lines.
"""
def __init__(self, filename, offset_file=None, paranoid=False):
self.filename = filename
self.paranoid = paranoid
self._offset_file = offset_file or "%s.offset" % self.filename
self._offset_file_inode = 0
self._offset = 0
self._fh = None
self._rotated_logfile = None
# if offset file exists, open and parse it
if exists(self._offset_file):
offset_fh = open(self._offset_file, "r")
(self._offset_file_inode, self._offset) = \
[string.atoi(line.strip()) for line in offset_fh]
offset_fh.close()
if self._offset_file_inode != stat(self.filename).st_ino:
# The inode has changed, so the file might have been rotated.
# Look for the rotated file and process that if we find it.
self._rotated_logfile = self._determine_rotated_logfile()
def __iter__(self):
return self
def next(self):
"""
Return the next line in the file, updating the offset.
"""
try:
line = self._filehandle().next()
except StopIteration:
# we've reached the end of the file; if we're processing the
# rotated log file, we can continue with the actual file; otherwise
# update the offset file
if self._rotated_logfile:
self._rotated_logfile = None
self._fh.close()
self._offset = 0
# open up current logfile and continue
try:
line = self._filehandle().next()
except StopIteration: # oops, empty file
self._update_offset_file()
raise
else:
self._update_offset_file()
raise
if self.paranoid:
self._update_offset_file()
return line
def readlines(self):
"""
Read in all unread lines and return them as a list.
"""
return [line for line in self]
def read(self):
"""
Read in all unread lines and return them as a single string.
"""
lines = self.readlines()
if lines:
return ''.join(lines)
else:
return None
def _filehandle(self):
"""
Return a filehandle to the file being tailed, with the position set
to the current offset.
"""
if not self._fh or self._fh.closed:
filename = self._rotated_logfile or self.filename
self._fh = open(filename, "r")
self._fh.seek(self._offset)
return self._fh
def _update_offset_file(self):
"""
Update the offset file with the current inode and offset.
"""
offset = self._filehandle().tell()
inode = stat(self.filename).st_ino
fh = open(self._offset_file, "w")
fh.write("%s\n%s\n" % (inode, offset))
fh.close()
def _determine_rotated_logfile(self):
"""
We suspect the logfile has been rotated, so try to guess what the
rotated filename is, and return it.
"""
rotated_filename = self._check_rotated_filename_candidates()
if (rotated_filename and exists(rotated_filename) and
stat(rotated_filename).st_ino == self._offset_file_inode):
return rotated_filename
else:
return None
def _check_rotated_filename_candidates(self):
"""
Check for various rotated logfile filename patterns and return the first
match we find.
"""
# savelog(8)
candidate = "%s.0" % self.filename
if (exists(candidate) and exists("%s.1.gz" % self.filename) and
(stat(candidate).st_mtime > stat("%s.1.gz" % self.filename).st_mtime)):
return candidate
# logrotate(8)
candidate = "%s.1" % self.filename
if exists(candidate):
return candidate
# dateext rotation scheme
candidates = glob.glob("%s-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]" % self.filename)
if candidates:
candidates.sort()
return candidates[-1] # return most recent
# no match
return None
def main():
# command-line parsing
cmdline = OptionParser(usage="usage: %prog [options] logfile",
description="Print log file lines that have not been read.")
cmdline.add_option("--offset-file", "-o", action="store",
help="File to which offset data is written (default: <logfile>.offset).")
cmdline.add_option("--paranoid", "-p", action="store_true",
help="Update the offset file every time we read a line (as opposed to"
" only when we reach the end of the file).")
options, args = cmdline.parse_args()
if (len(args) != 1):
cmdline.error("Please provide a logfile to read.")
pygtail = Pygtail(args[0],
offset_file=options.offset_file,
paranoid=options.paranoid)
for line in pygtail:
sys.stdout.write(line)
if __name__ == "__main__":
main()
|
mariodebian/server-stats-system-agent
|
sssa/pygtail.py
|
Python
|
gpl-2.0
| 6,429 | 0.002489 |
# -*- coding: utf-8 -*-
"""
A function f is defined by the rule that f(n) = n if n<3 and f(n) = f(n - 1) + 2f(n - 2) + 3f(n - 3) if n> 3. Write a
procedure that computes f by means of a recursive process. Write a procedure that computes f by means of an iterative
process.
"""
from operator import lt, sub, add, mul
def f_recursive(n):
if lt(n, 3):
return n
r1 = f_recursive(sub(n, 1))
r2 = f_recursive(sub(n, 2))
r3 = f_recursive(sub(n, 3))
return add(
add(r1, mul(2, r2)),
mul(3, r3)
)
def f_iterative(n):
def f_iter(a, b, c, count):
if count == 0:
return c
return f_iter(
add(
add(a, mul(2, b)),
mul(3, c)
),
a,
b,
sub(count, 1),
)
return f_iter(2, 1, 0, n)
def run_the_magic():
N = 5
from timeit import Timer
for n in range(N + 1):
print('n = %(n)s' % locals())
print('(f-recursive %(n)s)' % locals(), f_recursive(n), sep='\n')
print('(f-iterative %(n)s)' % locals(), f_iterative(n), sep='\n')
timer_rec = Timer(stmt="f_recursive(%(n)s)" % locals(), setup="from Chapter1.exercise1_11 import f_recursive")
timer_iter = Timer(stmt="f_iterative(%(n)s)" % locals(), setup="from Chapter1.exercise1_11 import f_iterative")
print(
'Mean execution time:',
'\t-(f-recursive %(n)s): {}'.format(timer_rec.timeit()) % locals(),
'\t-(f-iterative %(n)s): {}'.format(timer_iter.timeit()) % locals(),
sep='\n',
)
print('-' * 20)
if __name__ == '__main__':
run_the_magic()
|
aoyono/sicpy
|
Chapter1/exercises/exercise1_11.py
|
Python
|
mit
| 1,681 | 0.002974 |
__author__ = 'ronalddekker'
# imports
from xml.dom.pulldom import START_ELEMENT, parse
from xml.dom.minidom import NamedNodeMap
def segmentate_xml_file(xml_file):
# parse the file, XML event after XML event
# NOTE: the variable name 'doc' is not optimal (the variable is only a pointer to a stream of events)
doc = parse(xml_file)
print(str(doc)) # doc = DOMEventStream
segment = 0
for event, node in doc:
if event == START_ELEMENT and node.localName == "div" and node.attributes["type"].value == "act":
print(event, node.localName, node.attributes["type"].value, node.attributes.items())
segment += 1
yield(event, node, doc, segment)
# We define a function which tells the computer what to do when an act is encountered.
def segmentate_xml_file_and_write_segments_to_disc(xml_file,
filename="/Users/ronalddekker/Desktop/CollateX/Elisa Files/1823_act_"):
for (event, node, doc, segment) in segmentate_xml_file(xml_file):
doc.expandNode(node)
# print("Act "+str(act)+" contains: "+str(node.toxml()))
segment_xml_file = open(filename +str(segment)+".xml", mode="w")
segment_xml_file.write(node.toxml())
segment_xml_file.close()
|
DiXiT-eu/collatex-tutorial
|
unit8/sydney-material/integration/xml_segmentation.py
|
Python
|
gpl-3.0
| 1,298 | 0.006163 |
import unittest
from clandestined import RendezvousHash
class RendezvousHashTestCase(unittest.TestCase):
def test_init_no_options(self):
rendezvous = RendezvousHash()
self.assertEqual(0, len(rendezvous.nodes))
self.assertEqual(1361238019, rendezvous.hash_function('6666'))
def test_init(self):
nodes = ['0', '1', '2']
rendezvous = RendezvousHash(nodes=nodes)
self.assertEqual(3, len(rendezvous.nodes))
self.assertEqual(1361238019, rendezvous.hash_function('6666'))
def test_seed(self):
rendezvous = RendezvousHash(seed=10)
self.assertEqual(2981722772, rendezvous.hash_function('6666'))
def test_add_node(self):
rendezvous = RendezvousHash()
rendezvous.add_node('1')
self.assertEqual(1, len(rendezvous.nodes))
rendezvous.add_node('1')
self.assertEqual(1, len(rendezvous.nodes))
rendezvous.add_node('2')
self.assertEqual(2, len(rendezvous.nodes))
rendezvous.add_node('1')
self.assertEqual(2, len(rendezvous.nodes))
def test_remove_node(self):
nodes = ['0', '1', '2']
rendezvous = RendezvousHash(nodes=nodes)
rendezvous.remove_node('2')
self.assertEqual(2, len(rendezvous.nodes))
self.assertRaises(ValueError, rendezvous.remove_node, '2')
self.assertEqual(2, len(rendezvous.nodes))
rendezvous.remove_node('1')
self.assertEqual(1, len(rendezvous.nodes))
rendezvous.remove_node('0')
self.assertEqual(0, len(rendezvous.nodes))
def test_find_node(self):
nodes = ['0', '1', '2']
rendezvous = RendezvousHash(nodes=nodes)
self.assertEqual('0', rendezvous.find_node('ok'))
self.assertEqual('1', rendezvous.find_node('mykey'))
self.assertEqual('2', rendezvous.find_node('wat'))
def test_find_node_after_removal(self):
nodes = ['0', '1', '2']
rendezvous = RendezvousHash(nodes=nodes)
rendezvous.remove_node('1')
self.assertEqual('0', rendezvous.find_node('ok'))
self.assertEqual('0', rendezvous.find_node('mykey'))
self.assertEqual('2', rendezvous.find_node('wat'))
def test_find_node_after_addition(self):
nodes = ['0', '1', '2']
rendezvous = RendezvousHash(nodes=nodes)
self.assertEqual('0', rendezvous.find_node('ok'))
self.assertEqual('1', rendezvous.find_node('mykey'))
self.assertEqual('2', rendezvous.find_node('wat'))
self.assertEqual('2', rendezvous.find_node('lol'))
rendezvous.add_node('3')
self.assertEqual('0', rendezvous.find_node('ok'))
self.assertEqual('1', rendezvous.find_node('mykey'))
self.assertEqual('2', rendezvous.find_node('wat'))
self.assertEqual('3', rendezvous.find_node('lol'))
class RendezvousHashIntegrationTestCase(unittest.TestCase):
def test_grow(self):
rendezvous = RendezvousHash()
placements = {}
for i in range(10):
rendezvous.add_node(str(i))
placements[str(i)] = []
for i in range(1000):
node = rendezvous.find_node(str(i))
placements[node].append(i)
new_placements = {}
for i in range(20):
rendezvous.add_node(str(i))
new_placements[str(i)] = []
for i in range(1000):
node = rendezvous.find_node(str(i))
new_placements[node].append(i)
keys = [k for sublist in placements.values() for k in sublist]
new_keys = [k for sublist in new_placements.values() for k in sublist]
self.assertEqual(sorted(keys), sorted(new_keys))
added = 0
removed = 0
for node, assignments in new_placements.items():
after = set(assignments)
before = set(placements.get(node, []))
removed += len(before.difference(after))
added += len(after.difference(before))
self.assertEqual(added, removed)
self.assertEqual(1062, (added + removed))
def test_shrink(self):
rendezvous = RendezvousHash()
placements = {}
for i in range(10):
rendezvous.add_node(str(i))
placements[str(i)] = []
for i in range(1000):
node = rendezvous.find_node(str(i))
placements[node].append(i)
rendezvous.remove_node('9')
new_placements = {}
for i in range(9):
new_placements[str(i)] = []
for i in range(1000):
node = rendezvous.find_node(str(i))
new_placements[node].append(i)
keys = [k for sublist in placements.values() for k in sublist]
new_keys = [k for sublist in new_placements.values() for k in sublist]
self.assertEqual(sorted(keys), sorted(new_keys))
added = 0
removed = 0
for node, assignments in placements.items():
after = set(assignments)
before = set(new_placements.get(node, []))
removed += len(before.difference(after))
added += len(after.difference(before))
self.assertEqual(added, removed)
self.assertEqual(202, (added + removed))
if __name__ == '__main__':
unittest.main()
|
ewdurbin/clandestined-python
|
clandestined/test/test_rendezvous_hash.py
|
Python
|
mit
| 5,256 | 0.00019 |
#!/usr/bin/env python
import ast
import atexit
from codecs import open
from distutils.spawn import find_executable
import os
import sys
import subprocess
import setuptools
import setuptools.command.sdist
from setuptools.command.test import test
HERE = os.path.abspath(os.path.dirname(__file__))
setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ())) + ['README.md'])
NAME = 'jira-context'
NAME_FILE = NAME.replace('-', '_')
PACKAGE = False
def get_metadata(main_file):
"""Get metadata about the package/module.
Positional arguments:
main_file -- python file path within `HERE` which has __author__ and the others defined as global variables.
Returns:
Dictionary to be passed into setuptools.setup().
"""
with open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(HERE, main_file), encoding='utf-8') as f:
lines = [l.strip() for l in f if l.startswith('__')]
metadata = ast.literal_eval("{'" + ", '".join([l.replace(' = ', "': ") for l in lines]) + '}')
__author__, __license__, __version__ = [metadata[k] for k in ('__author__', '__license__', '__version__')]
everything = dict(version=__version__, long_description=long_description, author=__author__, license=__license__)
if not all(everything.values()):
raise ValueError('Failed to obtain metadata from package/module.')
return everything
class PyTest(test):
TEST_ARGS = ['--cov-report', 'term-missing', '--cov', NAME_FILE, 'tests']
def finalize_options(self):
test.finalize_options(self)
setattr(self, 'test_args', self.TEST_ARGS)
setattr(self, 'test_suite', True)
def run_tests(self):
# Import here, cause outside the eggs aren't loaded.
pytest = __import__('pytest')
err_no = pytest.main(self.test_args)
sys.exit(err_no)
class PyTestPdb(PyTest):
TEST_ARGS = ['--pdb', 'tests']
class PyTestCovWeb(PyTest):
TEST_ARGS = ['--cov-report', 'html', '--cov', NAME_FILE, 'tests']
def run_tests(self):
if find_executable('open'):
atexit.register(lambda: subprocess.call(['open', os.path.join(HERE, 'htmlcov', 'index.html')]))
PyTest.run_tests(self)
class CmdStyle(setuptools.Command):
user_options = []
CMD_ARGS = ['flake8', '--max-line-length', '120', '--statistics', NAME_FILE + ('' if PACKAGE else '.py')]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.call(self.CMD_ARGS)
class CmdLint(CmdStyle):
CMD_ARGS = ['pylint', '--max-line-length', '120', NAME_FILE + ('' if PACKAGE else '.py')]
ALL_DATA = dict(
name=NAME,
description='Cache JIRA basic authentication sessions to disk for console apps.',
url='https://github.com/Robpol86/{0}'.format(NAME),
author_email='robpol86@gmail.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Text Processing :: Markup',
],
keywords='jira',
py_modules=[NAME_FILE],
zip_safe=True,
install_requires=['jira'],
tests_require=['pytest', 'pytest-cov', 'pytest-httpretty'],
cmdclass=dict(test=PyTest, testpdb=PyTestPdb, testcovweb=PyTestCovWeb, style=CmdStyle, lint=CmdLint),
# Pass the rest from get_metadata().
**get_metadata(os.path.join(NAME_FILE + ('/__init__.py' if PACKAGE else '.py')))
)
if __name__ == '__main__':
setuptools.setup(**ALL_DATA)
|
Robpol86/jira-context
|
setup.py
|
Python
|
mit
| 4,088 | 0.00318 |
from datetime import date
from rest_framework import status as status_code
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from app.models import Passphrase, SlackUser
from pantry.models import Pantry
class PantryViewSet(viewsets.ViewSet):
"""
A simple ViewSet for accessing Pantry details
"""
queryset = Pantry.objects.all()
@action(methods=["post"], url_path="tap", detail=False)
def pantrytap(self, request):
"""
A method that taps a user via an NFC card for the pantry service
---
parameters:
- name: slackUserId
description: slack ID
required: true
type: string
paramType: form
"""
slack_id = request.POST.get("slackUserId")
if not slack_id:
content = {"message": "You're unauthorized to make this request"}
return Response(content, status=status_code.HTTP_401_UNAUTHORIZED)
user = SlackUser.objects.filter(slack_id=slack_id).first()
if not user:
content = {"message": "The user doesnt exist on waitress"}
return Response(content, status=status_code.HTTP_404_NOT_FOUND)
user_tapped = Pantry.is_tapped(user.id)
content = {"firstname": user.firstname, "lastname": user.lastname}
if not user.is_active:
content[
"message"
] = f"{user.firstname} has been deactivated. Contact the Ops team."
return Response(content, status=status_code.HTTP_400_BAD_REQUEST)
if user_tapped:
content["message"] = f"{user.firstname} has tapped already."
return Response(content, status=status_code.HTTP_406_NOT_ACCEPTABLE)
content["message"] = f"{user.firstname} has successfully tapped."
user_pantry_session = Pantry(user=user)
user_pantry_session.save()
return Response(content, status=status_code.HTTP_200_OK)
@action(methods=["post"], url_path="auth", detail=False)
def auth(self, request):
passphrase = request.POST.get("passphrase", "")
if not passphrase:
content = {"status": "failed", "message": "Passphrase not supplied"}
return Response(content, status=status_code.HTTP_400_BAD_REQUEST)
exists = Passphrase.exists(passphrase)
if not exists.status:
content = {
"status": "failed",
"message": "Invalid Passphrase. Reach out to Ops!",
}
return Response(content, status=status_code.HTTP_401_UNAUTHORIZED)
content = {"status": "success", "message": "Successfully authenticated."}
return Response(content, status=status_code.HTTP_200_OK)
@action(methods=["get"], url_path="report", detail=False)
def report(self, request):
reportDate = request.GET.get("date", date.today())
queryset = self.queryset.filter(date=reportDate).order_by("date")
content = {
"status": "success",
"data": {"date": reportDate, "count": queryset.count()},
}
return Response(content, status=status_code.HTTP_200_OK)
|
waitress-andela/waitress
|
waitress/pantry/viewsets.py
|
Python
|
mit
| 3,247 | 0.000924 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.394491
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/mobile/channels.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class channels(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(channels, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['channels'] # u"$tstrings['channels']" on line 23, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['channels']")) # from line 23, col 64.
write(u'''</li>\r
''')
for channel in VFFSL(SL,"channels",True): # generated from line 24, col 5
write(u'''\t\t\t\t<li>\r
\t\t\t\t<a href="/mobile/channelinfo?sref=''')
_v = VFFSL(SL,"channel.ref",True) # u'$channel.ref' on line 26, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$channel.ref')) # from line 26, col 39.
write(u'''" style="padding: 3px;">\r
\t\t\t\t<span class="ui-li-heading" style="margin-top: 0px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"channel.name",True) # u'$channel.name' on line 27, col 78
if _v is not None: write(_filter(_v, rawExpr=u'$channel.name')) # from line 27, col 78.
write(u'''</span>\r
''')
if VFN(VFFSL(SL,"channel",True),"has_key",False)('now_title'): # generated from line 28, col 5
write(u'''\t\t\t\t<span class="ui-li-desc" style="margin-bottom: 0px;">''')
_v = VFFSL(SL,"channel.now_title",True) # u'$channel.now_title' on line 29, col 58
if _v is not None: write(_filter(_v, rawExpr=u'$channel.now_title')) # from line 29, col 58.
write(u'''</span>\r
''')
write(u'''\t\t\t\t</a>\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 39, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 39, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_channels= 'respond'
## END CLASS DEFINITION
if not hasattr(channels, '_initCheetahAttributes'):
templateAPIClass = getattr(channels, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(channels)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=channels()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/mobile/channels.py
|
Python
|
gpl-2.0
| 7,404 | 0.013236 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-29 13:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0054_add_field_user_to_productionform'),
]
operations = [
migrations.AddField(
model_name='applicationform',
name='requires_development',
field=models.BooleanField(default=False, verbose_name='requires_development'),
),
]
|
efornal/pulmo
|
app/migrations/0055_applicationform_requires_development.py
|
Python
|
gpl-3.0
| 523 | 0.001912 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pwd
import stat
import time
import os.path
import logging
def root_dir():
root_dir = os.path.split(os.path.realpath(__file__))[0]
return root_dir
def get_logger(name):
def local_date():
return str(time.strftime("%Y-%m-%d", time.localtime()))
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(root_dir(), 'log', '%s-%s.log' % (name, local_date())),
filemode='a+')
logger = logging.getLogger(name)
return logger
def elapsetime(func):
'''wapper for elapse time for function
'''
def wapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print("execute %s used time:%.2f s" % (func.__name__, (end - start)))
return result
return wapper
def elapsetime(ostream=sys.stdout):
'''wapper for elapse time for function'''
def decorator(func):
def wapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print >> ostream, "execute %s used time:%.2f s" % (func.__name__, (end - start))
return result
return wapper
return decorator
def is_readable(path, user):
user_info = pwd.getpwnam(user)
uid = user_info.pw_uid
gid = user_info.pw_gid
s = os.stat(path)
mode = s[stat.ST_MODE]
return (((s[stat.ST_UID] == uid) and (mode & stat.S_IRUSR > 0)) or \
((s[stat.ST_GID] == gid) and (mode & stat.S_IRGRP > 0)) or \
(mode & stat.S_IROTH > 0))
def is_writable(path, user):
user_info = pwd.getpwnam(user)
uid = user_info.pw_uid
gid = user_info.pw_gid
s = os.stat(path)
mode = s[stat.ST_MODE]
return (((s[stat.ST_UID] == uid) and (mode & stat.S_IWUSR > 0)) or \
((s[stat.ST_GID] == gid) and (mode & stat.S_IWGRP > 0)) or \
(mode & stat.S_IWOTH > 0))
def is_executable(path, user):
user_info = pwd.getpwnam(user)
uid = user_info.pw_uid
gid = user_info.pw_gid
s = os.stat(path)
mode = s[stat.ST_MODE]
return (((s[stat.ST_UID] == uid) and (mode & stat.S_IXUSR > 0)) or \
((s[stat.ST_GID] == gid) and (mode & stat.S_IXGRP > 0)) or \
(mode & stat.S_IXOTH > 0))
a = open('log.txt', 'a+')
@elapsetime(a)
def test():
print "hello"
if __name__ == '__main__':
test()
|
cwlseu/recipes
|
pyrecipes/utils.py
|
Python
|
gpl-3.0
| 2,631 | 0.009502 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import db
from datetime import datetime
class User(db.Model):
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8mb4'
}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
openid = db.Column(db.String(32), unique=True, nullable=False)
nickname = db.Column(db.String(32), nullable=True)
realname = db.Column(db.String(32), nullable=True)
classname = db.Column(db.String(32), nullable=True)
sex = db.Column(db.SmallInteger, default=0, nullable=False)
province = db.Column(db.String(20), nullable=True)
city = db.Column(db.String(20), nullable=True)
country = db.Column(db.String(20), nullable=True)
headimgurl = db.Column(db.String(150), nullable=True)
regtime = db.Column(db.DateTime, default=datetime.now, nullable=False)
def __init__(self, openid, nickname=None, realname=None,
classname=None, sex=None, province=None, city=None,
country=None, headimgurl=None, regtime=None):
self.openid = openid
self.nickname = nickname
self.realname = realname
self.classname = classname
self.sex = sex
self.province = province
self.city = city
self.country = country
self.headimgurl = headimgurl
self.regtime = regtime
def __repr__(self):
return '<openid %r>' % self.openid
def save(self):
db.session.add(self)
db.session.commit()
return self
def update(self):
db.session.commit()
return self
|
15klli/WeChat-Clone
|
main/models/user.py
|
Python
|
mit
| 1,617 | 0 |
# -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
from kotori.version import __VERSION__
from pyramid.config import Configurator
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
settings['SOFTWARE_VERSION'] = __VERSION__
config = Configurator(settings=settings)
# Addons
config.include('pyramid_jinja2')
# http://docs.pylonsproject.org/projects/pyramid-jinja2/en/latest/#adding-or-overriding-a-renderer
config.add_jinja2_renderer('.html')
config.include('cornice')
# Views and routes
config.add_static_view('static/app', 'static/app', cache_max_age=0)
config.add_static_view('static/lib', 'static/lib', cache_max_age=60 * 24)
config.add_route('index', '/')
config.scan()
return config.make_wsgi_app()
|
daq-tools/kotori
|
kotori/frontend/app.py
|
Python
|
agpl-3.0
| 843 | 0 |
import logging
import os
import re
import sys
import time
import warnings
import ConfigParser
import StringIO
import nose.case
from nose.plugins import Plugin
from sqlalchemy import util, log as sqla_log
from sqlalchemy.test import testing, config, requires
from sqlalchemy.test.config import (
_create_testing_engine, _engine_pool, _engine_strategy, _engine_uri, _list_dbs, _log,
_prep_testing_database, _require, _reverse_topological, _server_side_cursors,
_set_table_options, base_config, db, db_label, db_url, file_config, post_configure)
log = logging.getLogger('nose.plugins.sqlalchemy')
class NoseSQLAlchemy(Plugin):
"""
Handles the setup and extra properties required for testing SQLAlchemy
"""
enabled = True
name = 'sqlalchemy'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
opt("--log-info", action="callback", type="string", callback=_log,
help="turn on info logging for <LOG> (multiple OK)")
opt("--log-debug", action="callback", type="string", callback=_log,
help="turn on debug logging for <LOG> (multiple OK)")
opt("--require", action="append", dest="require", default=[],
help="require a particular driver or module version (multiple OK)")
opt("--db", action="store", dest="db", default="sqlite",
help="Use prefab database uri")
opt('--dbs', action='callback', callback=_list_dbs,
help="List available prefab dbs")
opt("--dburi", action="store", dest="dburi",
help="Database uri (overrides --db)")
opt("--dropfirst", action="store_true", dest="dropfirst",
help="Drop all tables in the target database first (use with caution on Oracle, "
"MS-SQL)")
opt("--mockpool", action="store_true", dest="mockpool",
help="Use mock pool (asserts only one connection used)")
opt("--enginestrategy", action="callback", type="string",
callback=_engine_strategy,
help="Engine strategy (plain or threadlocal, defaults to plain)")
opt("--reversetop", action="store_true", dest="reversetop", default=False,
help="Reverse the collection ordering for topological sorts (helps "
"reveal dependency issues)")
opt("--unhashable", action="store_true", dest="unhashable", default=False,
help="Disallow SQLAlchemy from performing a hash() on mapped test objects.")
opt("--noncomparable", action="store_true", dest="noncomparable", default=False,
help="Disallow SQLAlchemy from performing == on mapped test objects.")
opt("--truthless", action="store_true", dest="truthless", default=False,
help="Disallow SQLAlchemy from truth-evaluating mapped test objects.")
opt("--serverside", action="callback", callback=_server_side_cursors,
help="Turn on server side cursors for PG")
opt("--mysql-engine", action="store", dest="mysql_engine", default=None,
help="Use the specified MySQL storage engine for all tables, default is "
"a db-default/InnoDB combo.")
opt("--table-option", action="append", dest="tableopts", default=[],
help="Add a dialect-specific table option, key=value")
global file_config
file_config = ConfigParser.ConfigParser()
file_config.readfp(StringIO.StringIO(base_config))
file_config.read(['test.cfg', os.path.expanduser('~/.satest.cfg')])
config.file_config = file_config
def configure(self, options, conf):
Plugin.configure(self, options, conf)
self.options = options
def begin(self):
testing.db = db
testing.requires = requires
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(self.options, file_config)
def describeTest(self, test):
return ""
def wantClass(self, cls):
"""Return true if you want the main test selector to collect
tests from this class, false if you don't, and None if you don't
care.
:Parameters:
cls : class
The class being examined by the selector
"""
if not issubclass(cls, testing.TestBase):
return False
else:
if (hasattr(cls, '__whitelist__') and testing.db.name in cls.__whitelist__):
return True
else:
return not self.__should_skip_for(cls)
def __should_skip_for(self, cls):
if hasattr(cls, '__requires__'):
def test_suite(): return 'ok'
test_suite.__name__ = cls.__name__
for requirement in cls.__requires__:
check = getattr(requires, requirement)
if check(test_suite)() != 'ok':
# The requirement will perform messaging.
return True
if cls.__unsupported_on__:
spec = testing.db_spec(*cls.__unsupported_on__)
if spec(testing.db):
print "'%s' unsupported on DB implementation '%s'" % (
cls.__class__.__name__, testing.db.name)
return True
if getattr(cls, '__only_on__', None):
spec = testing.db_spec(*util.to_list(cls.__only_on__))
if not spec(testing.db):
print "'%s' unsupported on DB implementation '%s'" % (
cls.__class__.__name__, testing.db.name)
return True
if getattr(cls, '__skip_if__', False):
for c in getattr(cls, '__skip_if__'):
if c():
print "'%s' skipped by %s" % (
cls.__class__.__name__, c.__name__)
return True
for rule in getattr(cls, '__excluded_on__', ()):
if testing._is_excluded(*rule):
print "'%s' unsupported on DB %s version %s" % (
cls.__class__.__name__, testing.db.name,
_server_version())
return True
return False
def beforeTest(self, test):
testing.resetwarnings()
def afterTest(self, test):
testing.resetwarnings()
def afterContext(self):
testing.global_cleanup_assertions()
#def handleError(self, test, err):
#pass
#def finalize(self, result=None):
#pass
|
obeattie/sqlalchemy
|
lib/sqlalchemy/test/noseplugin.py
|
Python
|
mit
| 6,603 | 0.004695 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.addons.hr_holidays.tests.common import TestHrHolidaysBase
from openerp.exceptions import AccessError
from openerp.exceptions import ValidationError
from openerp.tools import mute_logger
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_leave_request_flow(self):
""" Testing leave request flow """
cr, uid = self.cr, self.uid
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrUser creates some holiday statuses -> crash because only HrManagers should do this
with self.assertRaises(AccessError):
self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, {
'name': 'UserCheats',
'limit': True,
})
# HrManager creates some holiday statuses
self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'WithMeetingType',
'limit': True,
'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}),
})
self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'NotLimited',
'limit': True,
})
self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'Limited',
'limit': False,
'double_validation': True,
})
# --------------------------------------------------
# Case1: unlimited type of leave request
# --------------------------------------------------
# Employee creates a leave request for another employee -> should crash
with self.assertRaises(ValidationError):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol10',
'employee_id': self.employee_hruser_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
ids = self.hr_holidays.search(cr, uid, [('name', '=', 'Hol10')])
self.hr_holidays.unlink(cr, uid, ids)
# Employee creates a leave request in a no-limit category
hol1_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id)
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# Employee validates its leave request -> should not work
self.hr_holidays.signal_workflow(cr, self.user_employee_id, [hol1_id], 'validate')
hol1.refresh()
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')
# HrUser validates the employee leave request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol1_id], 'validate')
hol1.refresh()
self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
# --------------------------------------------------
# Case2: limited type of leave request
# --------------------------------------------------
# Employee creates a new leave request at the same time -> crash, avoid interlapping
with self.assertRaises(ValidationError):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol21',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a limited category -> crash, not enough days left
with self.assertRaises(ValidationError):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=1)),
'number_of_days_temp': 1,
})
# Clean transaction
self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])]))
# HrUser allocates some leaves to the employee
aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, {
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'type': 'add',
'number_of_days_temp': 2,
})
# HrUser validates the allocation request
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'validate')
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [aloc1_id], 'second_validate')
# Checks Employee has effectively some days left
hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2)
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days_temp': 1,
})
hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id)
# Check left days: - 1 virtual remaining day
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0)
# HrUser validates the first step
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'validate')
hol2.refresh()
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrUser validates the second step
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'second_validate')
hol2.refresh()
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'refuse')
hol2.refresh()
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
self.hr_holidays.signal_workflow(cr, self.user_hruser_id, [hol2_id], 'reset')
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: hr_user should not be able to reset a refused leave request')
# HrManager resets the request
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'reset')
hol2.refresh()
self.assertEqual(hol2.state, 'draft',
'hr_holidays: resetting should lead to draft state')
# HrManager changes the date and put too much days -> crash when confirming
self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], {
'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=7)),
'number_of_days_temp': 4,
})
with self.assertRaises(ValidationError):
self.hr_holidays.signal_workflow(cr, self.user_hrmanager_id, [hol2_id], 'confirm')
|
odoousers2014/odoo
|
addons/hr_holidays/tests/test_holidays_flow.py
|
Python
|
agpl-3.0
| 10,541 | 0.003226 |
# -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter the connection parameters.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from PyQt5.QtSql import QSqlDatabase
from E5Gui.E5Completers import E5FileCompleter
from E5Gui import E5FileDialog
from .Ui_SqlConnectionDialog import Ui_SqlConnectionDialog
import Utilities
import UI.PixmapCache
class SqlConnectionDialog(QDialog, Ui_SqlConnectionDialog):
"""
Class implementing a dialog to enter the connection parameters.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(SqlConnectionDialog, self).__init__(parent)
self.setupUi(self)
self.databaseFileButton.setIcon(UI.PixmapCache.getIcon("open.png"))
self.databaseFileCompleter = E5FileCompleter()
self.okButton = self.buttonBox.button(QDialogButtonBox.Ok)
drivers = QSqlDatabase.drivers()
# remove compatibility names
if "QMYSQL3" in drivers:
drivers.remove("QMYSQL3")
if "QOCI8" in drivers:
drivers.remove("QOCI8")
if "QODBC3" in drivers:
drivers.remove("QODBC3")
if "QPSQL7" in drivers:
drivers.remove("QPSQL7")
if "QTDS7" in drivers:
drivers.remove("QTDS7")
self.driverCombo.addItems(drivers)
self.__updateDialog()
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def __updateDialog(self):
"""
Private slot to update the dialog depending on its contents.
"""
driver = self.driverCombo.currentText()
if driver.startswith("QSQLITE"):
self.databaseEdit.setCompleter(self.databaseFileCompleter)
self.databaseFileButton.setEnabled(True)
else:
self.databaseEdit.setCompleter(None)
self.databaseFileButton.setEnabled(False)
if self.databaseEdit.text() == "" or driver == "":
self.okButton.setEnabled(False)
else:
self.okButton.setEnabled(True)
@pyqtSlot(str)
def on_driverCombo_activated(self, txt):
"""
Private slot handling the selection of a database driver.
@param txt text of the driver combo (string)
"""
self.__updateDialog()
@pyqtSlot(str)
def on_databaseEdit_textChanged(self, txt):
"""
Private slot handling the change of the database name.
@param txt text of the edit (string)
"""
self.__updateDialog()
@pyqtSlot()
def on_databaseFileButton_clicked(self):
"""
Private slot to open a database file via a file selection dialog.
"""
startdir = self.databaseEdit.text()
dbFile = E5FileDialog.getOpenFileName(
self,
self.tr("Select Database File"),
startdir,
self.tr("All Files (*)"))
if dbFile:
self.databaseEdit.setText(Utilities.toNativeSeparators(dbFile))
def getData(self):
"""
Public method to retrieve the connection data.
@return tuple giving the driver name (string), the database name
(string), the user name (string), the password (string), the
host name (string) and the port (integer)
"""
return (
self.driverCombo.currentText(),
self.databaseEdit.text(),
self.usernameEdit.text(),
self.passwordEdit.text(),
self.hostnameEdit.text(),
self.portSpinBox.value(),
)
|
davy39/eric
|
SqlBrowser/SqlConnectionDialog.py
|
Python
|
gpl-3.0
| 3,950 | 0.00481 |
from werkzeug.exceptions import NotFound
from werkzeug.utils import redirect
from .models import URL
from .utils import expose
from .utils import Pagination
from .utils import render_template
from .utils import url_for
from .utils import validate_url
@expose("/")
def new(request):
error = url = ""
if request.method == "POST":
url = request.form.get("url")
alias = request.form.get("alias")
if not validate_url(url):
error = "I'm sorry but you cannot shorten this URL."
elif alias:
if len(alias) > 140:
error = "Your alias is too long"
elif "/" in alias:
error = "Your alias might not include a slash"
elif URL.load(alias):
error = "The alias you have requested exists already"
if not error:
url = URL(
target=url,
public="private" not in request.form,
shorty_id=alias if alias else None,
)
url.store()
uid = url.id
return redirect(url_for("display", uid=uid))
return render_template("new.html", error=error, url=url)
@expose("/display/<uid>")
def display(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return render_template("display.html", url=url)
@expose("/u/<uid>")
def link(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return redirect(url.target, 301)
@expose("/list/", defaults={"page": 1})
@expose("/list/<int:page>")
def list(request, page):
def wrap(doc):
data = doc.value
data["_id"] = doc.id
return URL.wrap(data)
code = """function(doc) { if (doc.public){ map([doc._id], doc); }}"""
docResults = URL.query(code)
results = [wrap(doc) for doc in docResults]
pagination = Pagination(results, 1, page, "list")
if pagination.page > 1 and not pagination.entries:
raise NotFound()
return render_template("list.html", pagination=pagination)
def not_found(request):
return render_template("not_found.html")
|
mitsuhiko/werkzeug
|
examples/couchy/views.py
|
Python
|
bsd-3-clause
| 2,120 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
cc_plugin_ncei/ncei_trajectory.py
'''
from compliance_checker.base import BaseCheck
from cc_plugin_ncei.ncei_base import TestCtx, NCEI1_1Check, NCEI2_0Check
from cc_plugin_ncei import util
from isodate import parse_duration
class NCEITrajectoryBase(BaseCheck):
_cc_spec = 'ncei-trajectory'
valid_feature_types = [
'trajectory',
'trajectory_id'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a trajectory dataset
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are trajectory feature types')
message = ("{} must be a valid trajectory feature type. It must have dimensions of (trajectoryID, time)."
" And all coordinates must have dimensions (trajectoryID, time)")
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_cf_trajectory(dataset, variable)
is_valid = is_valid or util.is_single_trajectory(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITrajectory1_1(NCEI1_1Check, NCEITrajectoryBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF trajectory Incomplete '
'template version 1.1 (found at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/'
'trajectoryIncomplete.cdl). The NCEI version 1.1 templates are based on “feature types”, '
'as identified by Unidata and CF, and conform to ACDD version 1.0 and CF version 1.6. You '
'can find more information about the version 1.1 templates at https://www.nodc.noaa.gov/'
'data/formats/netcdf/v1.1/. This test is specifically for the trajectory feature type '
'in an Incomplete multidimensional array representation. This representation is typically '
'used for a series of data points along a path through space with monotonically '
'increasing times.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/trajectoryIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_Trajectory_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectory',
'featureType attribute must be set to trajectory'
)
results.append(required_ctx.to_result())
return results
class NCEITrajectory2_0(NCEI2_0Check, NCEITrajectoryBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF trajectory Incomplete '
'template version 2.0 (found at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/'
'trajectoryIncomplete.cdl). The NCEI version 2.0 templates are based on “feature types”, '
'as identified by Unidata and CF, and conform to ACDD version 1.3 and CF version 1.6. You '
'can find more information about the version 2.0 templates at https://www.nodc.noaa.gov/'
'data/formats/netcdf/v2.0/. This test is specifically for the trajectory feature type '
'in an Incomplete multidimensional array representation. This representation is typically '
'used for a series of data points along a path through space with monotonically '
'increasing times.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/trajectoryIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_Trajectory_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory dataset')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectory',
'featureType attribute must be set to trajectory'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
|
ioos/cc-plugin-ncei
|
cc_plugin_ncei/ncei_trajectory.py
|
Python
|
apache-2.0
| 7,802 | 0.004747 |
from selenium.common.exceptions import NoSuchElementException
from .base import FunctionalTest, login_test_user_with_browser
class EditPostTest(FunctionalTest):
@login_test_user_with_browser
def test_modify_post(self):
self.browser.get(self.live_server_url)
self.move_to_default_board()
# 지훈이는 'django' 대한 게시글을 작성한다.
self.add_post('pjango', 'Hello pjango')
# 게시글의 오타를 발견하고 수정하려고 한다.
# 해당하는 게시글을 클릭하여 이동한 후 수정 버튼을 누른다.
table = self.browser.find_element_by_id('id_post_list_table')
rows = table.find_elements_by_css_selector('tbody > tr > td > a')
rows[0].click()
self.browser.find_element_by_id('id_edit_post_button').click()
# 웹 페이지 타이틀과 헤더가 'Edit Post'를 표시하고 있다.
header_text = self.browser.find_element_by_tag_name('h3').text
self.assertIn('글 수정', self.browser.title)
self.assertIn('글 수정', header_text)
# 작성되어 있던 게시글의 제목인 'pjango'가 보인다.
titlebox = self.browser.find_element_by_id('id_post_title')
self.assertEqual(titlebox.get_attribute('value'), 'pjango')
# 'django'로 수정한다.
titlebox.clear()
titlebox.send_keys('django')
# 게시글의 내용은 'Hello pjango'으로 보인다.
contentbox = self.get_contentbox()
self.assertEqual(contentbox.text, 'Hello pjango')
# 'Hello django'로 수정한다.
contentbox.clear()
contentbox.send_keys('Hello django')
self.browser.switch_to.default_content()
# 실수로 '취소' 버튼을 누른다.
self.browser.find_element_by_id('id_cancel_button').click()
# 제목과 내용이 변경되지 않고 그대로이다.
titlebox = self.browser.find_element_by_class_name('panel-title')
self.assertEqual(titlebox.text, 'pjango')
content = self.browser.find_element_by_class_name('panel-body')
self.assertIn('Hello pjango', content.text)
# 다시 수정 버튼을 클릭하여 내용을 수정한다.
edit_button = self.browser.find_element_by_id('id_edit_post_button')
edit_button.click()
titlebox = self.browser.find_element_by_id('id_post_title')
titlebox.clear()
titlebox.send_keys('django')
contentbox = self.get_contentbox()
contentbox.clear()
contentbox.send_keys('Hello django')
self.browser.switch_to.default_content()
# 이번에는 제대로 '확인' 버튼을 누른다.
self.click_submit_button()
# 내용에 'Hello django'가 보여지고 있다.
body = self.browser.find_element_by_class_name('panel-body')
self.assertIn('Hello django', body.text)
# 수정 내역 버튼을 누른다.
history_button = self.browser.find_element_by_css_selector('#post_history > a')
history_button.click()
# 하나의 수정 내역이 보인다.
panel_list = self.browser.find_elements_by_css_selector('.post-history')
self.assertEqual(len(panel_list), 1)
# 'pjango'라는 제목을 가진 기록이 있다.
history_title = self.browser.find_element_by_class_name('panel-title')
self.assertIn('pjango', history_title.text)
# 그 기록은 'Hello pjango'라는 내용을 가지고 있다.
history_body = self.browser.find_element_by_class_name('panel-body')
self.assertIn('Hello pjango', history_body.text)
# 내역을 확인한 지훈이는 글 보기 버튼을 누른다.
back_button = self.browser.find_element_by_id('back_to_view_post_button')
back_button.click()
# 다시 원문이 보인다.
panel_body = self.browser.find_element_by_class_name('panel-body')
self.assertIn('Hello django', panel_body.text)
|
cjh5414/kboard
|
kboard/functional_test/test_post_edit.py
|
Python
|
mit
| 4,015 | 0.00086 |
from ..widget import Widget
def category_widget(value, title=None, description=None, footer=None, read_only=False, weight=1):
"""Helper function for quickly creating a category widget.
Args:
value (str): Column name of the category value.
title (str, optional): Title of widget.
description (str, optional): Description text widget placed under widget title.
footer (str, optional): Footer text placed on the widget bottom.
read_only (boolean, optional): Interactively filter a category by selecting it in the widget.
Set to "False" by default.
weight (int, optional): Weight of the category widget. Default value is 1.
Returns:
cartoframes.viz.widget.Widget
Example:
>>> category_widget(
... 'column_name',
... title='Widget title',
... description='Widget description',
... footer='Widget footer')
"""
return Widget('category', value, title, description, footer,
read_only=read_only, weight=weight)
|
CartoDB/cartoframes
|
cartoframes/viz/widgets/category_widget.py
|
Python
|
bsd-3-clause
| 1,071 | 0.003735 |
class UserInfoModel(object):
PartenaireID = 0
Mail = ""
CodeUtilisateur = ""
TypeAbonnement = ""
DateExpiration = ""
DateSouscription = ""
AccountExist = False
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def create_dummy_model(self):
self.Mail = "dummy@gmail.com"
self.CodeUtilisateur = "dummy1234"
self.AccountExist = True
self.PartenaireID = 0
|
NextINpact/LaPresseLibreSDK
|
python_django/sdk_lpl/models/UserInfosModel.py
|
Python
|
mit
| 439 | 0 |
from __future__ import absolute_import
import re
__all__ = [
'_SGML_AVAILABLE',
'sgmllib',
'charref',
'tagfind',
'attrfind',
'entityref',
'incomplete',
'interesting',
'shorttag',
'shorttagopen',
'starttagopen',
'endbracket',
]
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content sanitizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
|
eleonrk/SickRage
|
lib/feedparser/sgml.py
|
Python
|
gpl-3.0
| 2,683 | 0.003727 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
# Ninja and CMake do not support setting the build directory.
test = TestGyp.TestGyp(formats=['!make', '!ninja', '!cmake'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
if test.format == 'msvs':
if test.uses_msbuild:
test.must_contain('src/prog1.vcxproj',
'<OutDir>..\\builddir\\Default\\</OutDir>')
else:
test.must_contain('src/prog1.vcproj',
'OutputDirectory="..\\builddir\\Default\\"')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test()
|
Jet-Streaming/gyp
|
test/builddir/gyptest-default.py
|
Python
|
bsd-3-clause
| 2,759 | 0.004712 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Basic tests for Cerebrum.Entity.EntitySpread. """
import pytest
@pytest.fixture
def entity_spread(Spread, entity_type):
code = Spread('f303846618175b16',
entity_type,
description='Test spread for entity_type')
code.insert()
return code
@pytest.fixture
def entity_spread_alt(Spread, entity_type):
code = Spread('b36b563d6a4db0e5',
entity_type,
description='Second test spread for entity_type')
code.insert()
return code
@pytest.fixture
def Entity(entity_module):
u""" Branch and test each subtype of Entity. """
return getattr(entity_module, 'EntitySpread')
@pytest.fixture
def entity_obj(database, Entity):
u""" An instance of Entity, with database. """
return Entity(database)
@pytest.fixture
def entity_simple(entity_obj, entity_type):
u""" entity_obj, but populated. """
entity_obj.populate(entity_type)
entity_obj.write_db()
return entity_obj
@pytest.fixture
def entity(entity_simple, entity_spread, entity_spread_alt):
u""" entity_simple, but with spreads. """
entity_simple.add_spread(entity_spread)
entity_simple.add_spread(entity_spread_alt)
return entity_simple
@pytest.fixture
def entities(entity_obj, entity_type, entity_spread, entity_spread_alt):
u""" Entity info on four entities with different sets of spreads. """
entities = list()
spread_dist = [
(),
(entity_spread, ),
(entity_spread, entity_spread_alt, ),
(entity_spread_alt, ), ]
for spreads in spread_dist:
try:
entry = dict()
entity_obj.populate(entity_type)
entity_obj.write_db()
for spread in spreads:
entity_obj.add_spread(spread)
entry = {
'entity_id': entity_obj.entity_id,
'entity_type': entity_obj.entity_type,
'spreads': spreads, }
entities.append(entry)
except Exception:
entity_obj._db.rollback()
raise
finally:
entity_obj.clear()
return entities
def test_delete_with_spread(entity):
from Cerebrum.Errors import NotFoundError
entity_id = entity.entity_id
entity.delete()
entity.clear()
with pytest.raises(NotFoundError):
entity.find(entity_id)
def test_get_spread(entity, entity_spread, entity_spread_alt):
spreads = [row['spread'] for row in entity.get_spread()]
assert all(int(spread) in spreads
for spread in (entity_spread, entity_spread_alt))
def test_has_spread(entity_simple, entity_spread, entity_spread_alt):
entity_simple.add_spread(entity_spread_alt)
assert entity_simple.has_spread(entity_spread_alt)
assert not entity_simple.has_spread(entity_spread)
entity_simple.add_spread(entity_spread)
assert entity_simple.has_spread(entity_spread)
def test_delete_spread(entity, entity_spread, entity_spread_alt):
entity.delete_spread(entity_spread)
assert not entity.has_spread(entity_spread)
assert entity.has_spread(entity_spread_alt)
def test_list_spreads(entity, entity_type, entity_spread, entity_spread_alt):
columns = ['spread_code', 'spread', 'description', 'entity_type',
'entity_type_str']
all_spreads = entity.list_spreads()
assert len(all_spreads) >= len((entity_spread, entity_spread_alt))
for col in columns:
assert col in dict(all_spreads[0])
# 'entity_spread' and 'entity_spread_alt' should be the only spreads that
# apply to 'entity_type'
entity_spreads = entity.list_spreads(entity_types=entity_type)
assert len(entity_spreads) == len((entity_spread, entity_spread_alt))
assert entity_spread.description in [r['description'] for r in
entity_spreads]
assert str(entity_spread_alt) in [r['spread'] for r in entity_spreads]
def test_list_all_with_spread(entity_obj, entities):
spreads = {spread for ent in entities for spread in ent['spreads']}
result = entity_obj.list_all_with_spread(spreads=spreads)
result_ids = {r['entity_id'] for r in result}
for entry in entities:
if entry['spreads']:
assert entry['entity_id'] in result_ids
else:
assert entry['entity_id'] not in result_ids
def test_list_entity_spreads(entity_obj, entities, entity_type):
expected = [(long(ent['entity_id']), long(int(spread)))
for ent in entities
for spread in ent['spreads']]
entity_types = {ent['entity_type'] for ent in entities}
all_results = entity_obj.list_entity_spreads()
assert len(all_results) >= len(expected)
results = entity_obj.list_entity_spreads(entity_types=entity_types)
assert list(tuple(r) for r in results) == expected
|
unioslo/cerebrum
|
testsuite/tests/test_core/test_core_Entity/test_EntitySpread.py
|
Python
|
gpl-2.0
| 4,894 | 0 |
import sys
import argparse
parser = argparse.ArgumentParser(description='''
Phase haplotypes from phased pairs.
''')
parser.add_argument('pairs', nargs=1,
help='List of phased pairs (use - for stdin).')
parser.add_argument('--buffer',
default=1000, action='store', type=int,
help='''
Number of pairs to read in before processing a batch of connected
components. The default should be a good choice for must purposes.
''')
args = parser.parse_args()
# FIXME: handle missing files more gracefully
if args.pairs[0] == '-':
infile = sys.stdin
else:
infile = open(args.pairs[0])
class node(object):
def __init__(self, chrom, pos):
self.chrom = chrom
self.pos = pos
self.in_edges = []
self.out_edges = []
self.component = None
# FIXME: Doesn't handle switches from one chromosome to another!!!
def collect_buffer_of_nodes(buffer, max_src = 0):
'''Collect a number of pairs into a buffer to be processed.
Parameter *buffer* is the left-overs from the last buffer processed.
This is the buffer that will be extended.
Parameter *max_src* is the largest source node seen in the previous
call.
Returns the new buffer, the largest source node seen, and a status
flag indicating if there are more pairs in the input file.
'''
number_inserted = 0
for line in infile:
chrom, pos1, pos2, phase1, _, phase2, _ = line.split()
pos1, pos2 = int(pos1), int(pos2)
try:
src = buffer[(chrom,pos1)]
except:
src = node(chrom,pos1)
buffer[(chrom,pos1)] = src
try:
dst = buffer[(chrom,pos2)]
except:
dst = node(chrom,pos2)
buffer[(chrom,pos2)] = dst
src.out_edges.append( (phase1,phase2,dst) )
dst.in_edges.append( (phase1,phase2,src) )
if src.pos > max_src:
max_src = src.pos
number_inserted += 1
if number_inserted >= args.buffer:
return buffer, max_src, True
return buffer, max_src, False
def split_in_components(nodes):
'''Split a buffer of nodes into connected components.'''
def assign_components(node, component):
def dfs(n):
if n.component is not None:
assert n.component == component
else:
n.component = component
for _,_,src in n.in_edges:
dfs(src)
for _,_,dst in n.out_edges:
dfs(dst)
dfs(node)
loci = nodes.keys()
loci.sort()
component = 0
for locus in loci:
node = nodes[locus]
if node.component is None:
assign_components(node,component)
component += 1
components = [ list() for i in xrange(component) ]
for locus in loci:
node = nodes[locus]
components[node.component].append(node)
return components
def split_components(components, max_src):
'''Split a list of components in those that are done
and those that are potentially still incomplete (based on *max_src*).
Returns the finished components as a list and the non-finished
as nodes in a dictionary matching the buffer format.
'''
finished_components = []
new_buffer = {}
for component in components:
max_pos = max(n.pos for n in component)
if max_pos < max_src:
finished_components.append(component)
else:
for n in component:
n.component = None # don't save the assignment for next time
new_buffer[(n.chrom,n.pos)] = n
return finished_components, new_buffer
## Phase a connected component
class InconsistentComponent:
pass # FIXME: give warning message here rather than to stderr
def phase_component(graph):
'''Phase a finished component and write the phase to stdout.'''
for idx,node in enumerate(graph):
out_allele_1 = [phase1[0] for phase1,phase2,n in node.out_edges]
out_allele_2 = [phase2[0] for phase1,phase2,n in node.out_edges]
in_allele_1 = [phase1[1] for phase1,phase2,n in node.in_edges]
in_allele_2 = [phase2[1] for phase1,phase2,n in node.in_edges]
alleles = set(out_allele_1+out_allele_2+in_allele_1+in_allele_2)
if len(alleles) != 2:
print >> sys.stderr, "Non biallelic", alleles
raise InconsistentComponent
node.alleles = tuple(alleles)
node.phased = False
def dfs(node):
assert node.phased
for phase1,phase2,n in node.out_edges:
if node.alleles[0] == phase1[0]:
n_phase = phase1[1],phase2[1]
else:
n_phase = phase2[1],phase1[1]
if n.phased:
if n.alleles != n_phase:
print >> sys.stderr, "Inconsistent phasing:",
print >> sys.stderr, n.alleles, "!=",
print >> sys.stderr, n_phase
print >> sys.stderr, graph[0].chrom,
print >> sys.stderr, [x.pos for x in graph]
raise InconsistentComponent
else:
n.alleles = n_phase
n.phased = True
dfs(n)
for phase1,phase2,n in node.in_edges:
if node.alleles[0] == phase1[1]:
n_phase = phase1[0],phase2[0]
else:
n_phase = phase2[0],phase1[0]
if n.phased:
if n.alleles != n_phase:
print >> sys.stderr, "Inconsistent phasing:",
print >> sys.stderr, n.alleles, "!=",
print >> sys.stderr, n_phase
print >> sys.stderr, graph[0].chrom,
print >> sys.stderr, [x.pos for x in graph]
raise InconsistentComponent
else:
n.alleles = n_phase
n.phased = True
dfs(n)
first = graph[0]
first.phased = True # arbitrary phase
dfs(first)
last = graph[-1]
indices = [n.pos for n in graph]
hap1 = [n.alleles[0] for n in graph]
hap2 = [n.alleles[1] for n in graph]
print "%s\t%d\t%d\t%s\t%s\t%s" % \
(first.chrom, first.pos, last.pos,
','.join(map(str,indices)),
''.join(hap1), ''.join(hap2))
def phase_finished_components(buffer, max_src, flush = False):
'''Build components, phase those that are completely read in,
and return an updated buffer with the nodes that are not completed.'''
components = split_in_components(buffer)
if flush:
finished, new_buffer = components, dict()
else:
finished, new_buffer = split_components(components, max_src)
for component in finished:
try:
phase_component(component)
except InconsistentComponent:
pass
return new_buffer
def process_file(infile):
## MAIN LOOP, READING IN PAIRS AND PROCESSING BUFFERS
buffer = {}
max_src = 0
number_inserted = 0
last_chrom = None
for line in infile:
chrom, pos1, pos2, phase1, _, phase2, _ = line.split()
pos1, pos2 = int(pos1), int(pos2)
if chrom != last_chrom:
# flush the buffer when we switch chromosome
buffer = phase_finished_components(buffer, max_src, True)
last_chrom = chrom
max_src = pos1
try:
src = buffer[(chrom,pos1)]
except:
src = node(chrom,pos1)
buffer[(chrom,pos1)] = src
try:
dst = buffer[(chrom,pos2)]
except:
dst = node(chrom,pos2)
buffer[(chrom,pos2)] = dst
src.out_edges.append( (phase1,phase2,dst) )
dst.in_edges.append( (phase1,phase2,src) )
if src.pos > max_src:
max_src = src.pos
number_inserted += 1
if number_inserted >= args.buffer:
buffer = phase_finished_components(buffer, max_src, False)
number_inserted = 0
# flush last buffer
phase_finished_components(buffer, max_src, True)
try:
process_file(infile)
except IOError, e:
if e.errno == errno.EPIPE:
pass
|
mailund/read-phaser
|
phase_haplotypes.py
|
Python
|
gpl-3.0
| 8,417 | 0.011049 |
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.urlresolvers import reverse
class TestHomePage(TestCase):
def test_uses_index_template(self):
response = self.client.get(reverse("home"))
self.assertTemplateUsed(response, "home/index.html")
def test_uses_base_template(self):
response = self.client.get(reverse("home"))
self.assertTemplateUsed(response, "base.html")
|
janusnic/dj-21v
|
unit_02/mysite/home/test.py
|
Python
|
mit
| 439 | 0.009112 |
def extract17LiterarycornerWordpressCom(item):
'''
Parser for '17literarycorner.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('King Of Hell\'s Genius Pampered Wife', 'King Of Hell\'s Genius Pampered Wife', 'translated'),
('KOH', 'King Of Hell\'s Genius Pampered Wife', 'translated'),
('Addicted to Boundlessly Pampering You', 'Addicted to Boundlessly Pampering You', 'translated'),
('ATBPY', 'Addicted to Boundlessly Pampering You', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['tags'] == ['Uncategorized']:
titlemap = [
('KOH Chapter ', 'King Of Hell\'s Genius Pampered Wife', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extract17LiterarycornerWordpressCom.py
|
Python
|
bsd-3-clause
| 1,613 | 0.022939 |
#!/usr/bin/env python2
# Copyright (c) 2013-2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
BUILDDIR="/root/2.0/dragos/bitcredit-2.0"
EXEEXT=".exe"
# These will turn into comments if they were disabled when configuring.
ENABLE_WALLET=1
ENABLE_UTILS=1
ENABLE_BITCREDITD=1
#ENABLE_ZMQ=1
|
dragosbdi/bitcredit-2.0
|
qa/pull-tester/tests_config.py
|
Python
|
mit
| 413 | 0.016949 |
#!/usr/bin/python
#Covered by GPL V2.0
from encoders import *
from payloads import *
# generate_dictio evolution
class dictionary:
def __init__(self,dicc=None):
if dicc:
self.__payload=dicc.getpayload()
self.__encoder=dicc.getencoder()
else:
self.__payload=payload()
self.__encoder = [lambda x: encoder().encode(x)]
self.restart()
def count (self):
return self.__payload.count() * len(self.__encoder)
def setpayload(self,payl):
self.__payload = payl
self.restart()
def setencoder(self,encd):
self.__encoder=encd
self.generator = self.gen()
def getpayload (self):
return self.__payload
def getencoder (self):
return self.__encoder
def generate_all(self):
dicc=[]
for i in self.__payload:
dicc.append(self.__encoder.encode(i))
return dicc
def __iter__(self):
self.restart()
return self
def gen(self):
while 1:
pl=self.iter.next()
for encode in self.__encoder:
yield encode(pl)
def next(self):
return self.generator.next()
def restart(self):
self.iter=self.__payload.__iter__()
self.generator = self.gen()
|
GHubgenius/wfuzz-1
|
dictio.py
|
Python
|
gpl-2.0
| 1,098 | 0.054645 |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param a ListNode
# @return a ListNode
def swapPairs(self, head):
if head is None:
return head
res = None
res_end = None
temp = None
temp_end = None
i = 1
while head is not None:
next_node = head.next
# Append current node to temp list
if temp is None:
temp_end = head
head.next = temp
temp = head
if i % 2 == 0:
# Append temp to res
if res is None:
res = temp
res_end = temp_end
else:
res_end.next = temp
res_end = temp_end
temp = None
i += 1
head = next_node
if temp is not None:
if res is None:
res = temp
res_end = temp_end
else:
res_end.next = temp
res_end = temp_end
return res
|
huanqi/leetcode-python
|
swap_nodes_in_pairs/solution2.py
|
Python
|
bsd-2-clause
| 1,177 | 0 |
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class ChangesResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'ChangesResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # ChangesResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/ChangesResponse.py
|
Python
|
apache-2.0
| 1,137 | 0.007916 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Akretion
# (<http://www.akretion.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import pooler, SUPERUSER_ID
from openerp.openupgrade import openupgrade, openupgrade_80
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
uid = SUPERUSER_ID
openupgrade_80.set_message_last_post(
cr, uid, pool, ['account.analytic.account']
)
|
bealdav/OpenUpgrade
|
addons/analytic/migrations/8.0.1.1/pre-migration.py
|
Python
|
agpl-3.0
| 1,314 | 0 |
from PySide import QtCore, QtGui
class MakinFrame(QtGui.QFrame):
mousegeser = QtCore.Signal(int,int)
def __init__(self,parent=None):
super(MakinFrame,self).__init__(parent)
self.setMouseTracking(True)
def setMouseTracking(self, flag):
def recursive_set(parent):
for child in parent.findChildren(QtCore.QObject):
try:
child.setMouseTracking(flag)
except:
pass
recursive_set(child)
QtGui.QWidget.setMouseTracking(self,flag)
recursive_set(self)
def mouseMoveEvent(self, me):
a = QtGui.QFrame.mouseMoveEvent(self,me)
self.mousegeser.emit(me.x(), me.y())
return a
|
imakin/PersonalAssistant
|
GameBot/src_py/makinreusable/makinframe.py
|
Python
|
mit
| 610 | 0.04918 |
#!/usr/bin/env python2
# Copyright (C) 2013-:
# Gabes Jean, naparuba@gmail.com
# Pasche Sebastien, sebastien.pasche@leshop.ch
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
'''
This script is a check for lookup at disks consumption
'''
import os
import sys
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
DEFAULT_WARNING = '75%'
DEFAULT_CRITICAL = '90%'
MOUNTS = None
UNITS= {'B': 0,
'KB': 1,
'MB': 2,
'GB': 3,
'TB': 4
}
def convert_to(unit, value):
power = 0
if unit in UNITS:
power = UNITS[unit]
return round(float(value)/(1024**power), power)
def get_df(client):
# We are looking for a line like
#Filesystem Type 1K-blocks Used Available Use% Mounted on
#/dev/sda2 ext3 28834744 21802888 5567132 80% /
#udev devtmpfs 1021660 4 1021656 1% /dev
#tmpfs tmpfs 412972 1040 411932 1% /run
#none tmpfs 5120 4 5116 1% /run/lock
#none tmpfs 1032428 13916 1018512 2% /run/shm
#none tmpfs 102400 8 102392 1% /run/user
#/dev/sda5 fuseblk 251536380 184620432 66915948 74% /media/ntfs
#/dev/sdb1 ext3 961432072 833808328 78785744 92% /media/bigdata
# Beware of the export!
stdin, stdout, stderr = client.exec_command('export LC_LANG=C && unset LANG && df -l -T -k -P')
dfs = {}
for line in stdout:
line = line.strip()
# By pass the firt line, we already know about it
if not line or line.startswith('Filesystem'):
continue
# Only keep non void elements
tmp = [s for s in line.split(' ') if s]
_type = tmp[1]
# Ok maybe we got a none, iso9660 or devtmpfs system, if so, bailout
if _type in ['devtmpfs', 'iso9660']:
continue
# Exclude /dev/ /sys/ and /run/ folder
mounted = ' '.join(tmp[6:])
if mounted.startswith('/run/') or mounted.startswith('/sys/') or mounted.startswith('/dev/'):
continue
#if we specify a list of mountpoints to check then verify that current line is in the list
to_check = True
if MOUNTS:
to_check = False
for mnt in MOUNTS:
if tmp[6].startswith(mnt):
to_check = True
# Maybe this mount point did not match any required mount point
if not to_check:
continue
# Ok now grep values
fs = tmp[0]
size = int(tmp[2])*1024
used = int(tmp[3])*1024
avail = int(tmp[4])*1024
used_pct = int(tmp[5][:-1]) # we remove the %
dfs[mounted] = {'fs':fs, 'size':size, 'used':used, 'avail':avail, 'used_pct':used_pct}
# Before return, close the client
client.close()
return dfs
parser = schecks.get_parser()
## Specific options
parser.add_option('-w', '--warning',
dest="warning",
help='Warning value for physical used memory. In percent. Default : 75%')
parser.add_option('-c', '--critical',
dest="critical",
help='Critical value for physical used memory. In percent. Must be '
'superior to warning value. Default : 90%')
parser.add_option('-m', '--mount-points',
dest="mounts",
help='comma separated list of mountpoints to check. Default all mount '
'points except if mounted in /dev, /sys and /run')
parser.add_option('-U', '--unit',
dest="unit", help='Unit of Disk Space. B, KB, GB, TB. Default : B')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if opts.mounts:
mounts = opts.mounts.split(',')
MOUNTS=mounts
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
warning, critical = schecks.get_warn_crit(s_warning, s_critical)
# Get Unit
s_unit = opts.unit or 'B'
# Ok now got an object that link to our destination
client = schecks.get_client(opts)
## And get real data
dfs = get_df(client)
# Maybe we failed at getting data
if not dfs:
print "Error : cannot fetch disks values from host"
sys.exit(2)
perfdata = ''
status = 0 # all is green until it is no more ok :)
bad_volumes = []
for (mount, df) in dfs.iteritems():
size = convert_to(s_unit,df['size'])
used = convert_to(s_unit,df['used'])
used_pct = df['used_pct']
# Let first dump the perfdata
_size_warn = convert_to(s_unit,df['size'] * float(warning)/100)
_size_crit = convert_to(s_unit,df['size'] * float(critical)/100)
perfdata += '"%s_used_pct"=%s%%;%s%%;%s%%;0%%;100%% "%s_used"=%s%s;%s;%s;0;%s ' % (mount, used_pct, warning, critical, mount, used, s_unit, _size_warn, _size_crit, size)
# And compare to limits
if used_pct >= critical:
status = 2
bad_volumes.append( (mount, used_pct) )
if used_pct >= warning and status == 0:
status = 1
bad_volumes.append( (mount, used_pct) )
if status == 0:
print "Ok: all disks are in the limits | %s" % (perfdata)
sys.exit(0)
if status == 1:
print "Warning: some disks are not good : %s | %s" % (','.join( ["%s:%s%%" % (mount, used_pct) for (mount, used_pct) in bad_volumes]), perfdata)
sys.exit(1)
if status == 2:
print "Critical: some disks are not good : %s | %s" % (','.join( ["%s:%s%%" % (mount, used_pct) for (mount, used_pct) in bad_volumes]), perfdata)
sys.exit(2)
|
naparuba/check-linux-by-ssh
|
check_disks_by_ssh.py
|
Python
|
mit
| 7,141 | 0.009803 |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2016, Marcos Salomão.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import webtest
import logging
import datetime
from google.appengine.ext import ndb
from protorpc.remote import protojson
from protorpc import message_types
import mock
from mock import Mock
from mock import MagicMock
from mock import PropertyMock
from google.appengine.ext import testbed
from google.appengine.api import users
from app.purchase.services import PurchaseService
from app.purchase.messages import PurchasePostMessage
from app.purchase.messages import PurchaseGetMessage
from app.purchase.messages import PurchaseKeyMessage
from app.purchase.messages import PurchaseCollectionMessage
from app.exceptions import NotFoundEntityException
from app.purchase import models as purchaseModel
from app.product import models as productModel
class PurchaseTestCase(unittest.TestCase):
""" Test Case for Purchases.
"""
def test_purchases_statistics_by_products(self):
""" Unit test to get purchases statistics by products.
"""
# Sales list
purchasesList = []
# Mock purchase model and products models
purchasesMock = [{
'id': 1,
'product_id': 1,
'cost': 5,
'quantity': 7
}, {
'id': 2,
'product_id': 2,
'cost': 3,
'quantity': 20
}, {
'id': 3,
'product_id': 1,
'cost': 15,
'quantity': 8
}, {
'id': 4,
'product_id': 3,
'cost': 1,
'quantity': 1
}, {
'id': 5,
'product_id': 2,
'cost': 9,
'quantity': 40
}]
# Iterate purchases mock
for x in purchasesMock:
# Create purchase mock
purchase = Mock(spec_set=purchaseModel.PurchaseModel())
purchase.key = Mock(spec_set=ndb.Key('PurchaseModel', x['id']))
# Create product mock
purchase.product = Mock(spec_set=ndb.Key('ProductModel', x['product_id']))
purchase.product.id = Mock(return_value=x['product_id'])
purchase.product.get = Mock(return_value=purchase.product)
# Net total value
purchase.cost = x['cost']
purchase.quantity = x ['quantity']
# Append to list
purchasesList.append(purchase)
# Mock list method
purchaseModel.list = MagicMock(return_value=purchasesList)
# Call report_customers_by_product method
result = purchaseModel.get_stats_by_products()
# Must have lenght == 3
self.assertEqual(len(result), 3)
# Verify quantity
self.assertEqual(15, result[0]['sum_quantity'])
self.assertEqual(60, result[1]['sum_quantity'])
self.assertEqual(1, result[2]['sum_quantity'])
# Verify sum cost
self.assertEqual(20, result[0]['sum_cost'])
self.assertEqual(12, result[1]['sum_cost'])
self.assertEqual(1, result[2]['sum_cost'])
# Verify sum net profit
self.assertEqual(10, result[0]['avg_cost'])
self.assertEqual(6, result[1]['avg_cost'])
self.assertEqual(1, result[2]['avg_cost'])
# Verify sum net profit
self.assertEqual(10.33, round(result[0]['weighted_avg_cost'], 2))
self.assertEqual(7, result[1]['weighted_avg_cost'])
self.assertEqual(1, result[2]['weighted_avg_cost'])
|
salomax/livremarketplace
|
app_test/purchase_test.py
|
Python
|
apache-2.0
| 4,115 | 0.002674 |
from sympy.core.numbers import comp, Rational
from sympy.physics.optics.utils import (refraction_angle, fresnel_coefficients,
deviation, brewster_angle, critical_angle, lens_makers_formula,
mirror_formula, lens_formula, hyperfocal_distance,
transverse_magnification)
from sympy.physics.optics.medium import Medium
from sympy.physics.units import e0
from sympy import symbols, sqrt, Matrix, oo
from sympy.geometry.point import Point3D
from sympy.geometry.line import Ray3D
from sympy.geometry.plane import Plane
from sympy.utilities.pytest import raises
ae = lambda a, b, n: comp(a, b, 10**-n)
def test_refraction_angle():
n1, n2 = symbols('n1, n2')
m1 = Medium('m1')
m2 = Medium('m2')
r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
i = Matrix([1, 1, 1])
n = Matrix([0, 0, 1])
normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1))
P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
assert refraction_angle(r1, 1, 1, n) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle([1, 1, 1], 1, 1, n) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle((1, 1, 1), 1, 1, n) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, [0, 0, 1]) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, (0, 0, 1)) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, normal_ray) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, plane=P) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(r1, 1, 1, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
assert refraction_angle(r1, m1, 1.33, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(Rational(100, 133), Rational(100, 133), -789378201649271*sqrt(3)/1000000000000000))
assert refraction_angle(r1, 1, m2, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
assert refraction_angle(r1, n1, n2, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)))
assert refraction_angle(r1, 1.33, 1, plane=P) == 0 # TIR
assert refraction_angle(r1, 1, 1, normal_ray) == \
Ray3D(Point3D(0, 0, 0), direction_ratio=[1, 1, -1])
assert ae(refraction_angle(0.5, 1, 2), 0.24207, 5)
assert ae(refraction_angle(0.5, 2, 1), 1.28293, 5)
raises(ValueError, lambda: refraction_angle(r1, m1, m2, normal_ray, P))
raises(TypeError, lambda: refraction_angle(m1, m1, m2)) # can add other values for arg[0]
raises(TypeError, lambda: refraction_angle(r1, m1, m2, None, i))
raises(TypeError, lambda: refraction_angle(r1, m1, m2, m2))
def test_fresnel_coefficients():
assert all(ae(i, j, 5) for i, j in zip(
fresnel_coefficients(0.5, 1, 1.33),
[0.11163, -0.17138, 0.83581, 0.82862]))
assert all(ae(i, j, 5) for i, j in zip(
fresnel_coefficients(0.5, 1.33, 1),
[-0.07726, 0.20482, 1.22724, 1.20482]))
m1 = Medium('m1')
m2 = Medium('m2', n=2)
assert all(ae(i, j, 5) for i, j in zip(
fresnel_coefficients(0.3, m1, m2),
[0.31784, -0.34865, 0.65892, 0.65135]))
ans = [[-0.23563, -0.97184], [0.81648, -0.57738]]
got = fresnel_coefficients(0.6, m2, m1)
for i, j in zip(got, ans):
for a, b in zip(i.as_real_imag(), j):
assert ae(a, b, 5)
def test_deviation():
n1, n2 = symbols('n1, n2')
r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
n = Matrix([0, 0, 1])
i = Matrix([-1, -1, -1])
normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1))
P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
assert deviation(r1, 1, 1, normal=n) == 0
assert deviation(r1, 1, 1, plane=P) == 0
assert deviation(r1, 1, 1.1, plane=P).evalf(3) + 0.119 < 1e-3
assert deviation(i, 1, 1.1, normal=normal_ray).evalf(3) + 0.119 < 1e-3
assert deviation(r1, 1.33, 1, plane=P) is None # TIR
assert deviation(r1, 1, 1, normal=[0, 0, 1]) == 0
assert deviation([-1, -1, -1], 1, 1, normal=[0, 0, 1]) == 0
assert ae(deviation(0.5, 1, 2), -0.25793, 5)
assert ae(deviation(0.5, 2, 1), 0.78293, 5)
def test_brewster_angle():
m1 = Medium('m1', n=1)
m2 = Medium('m2', n=1.33)
assert ae(brewster_angle(m1, m2), 0.93, 2)
m1 = Medium('m1', permittivity=e0, n=1)
m2 = Medium('m2', permittivity=e0, n=1.33)
assert ae(brewster_angle(m1, m2), 0.93, 2)
assert ae(brewster_angle(1, 1.33), 0.93, 2)
def test_critical_angle():
m1 = Medium('m1', n=1)
m2 = Medium('m2', n=1.33)
assert ae(critical_angle(m2, m1), 0.85, 2)
def test_lens_makers_formula():
n1, n2 = symbols('n1, n2')
m1 = Medium('m1', permittivity=e0, n=1)
m2 = Medium('m2', permittivity=e0, n=1.33)
assert lens_makers_formula(n1, n2, 10, -10) == 5*n2/(n1 - n2)
assert ae(lens_makers_formula(m1, m2, 10, -10), -20.15, 2)
assert ae(lens_makers_formula(1.33, 1, 10, -10), 15.15, 2)
def test_mirror_formula():
u, v, f = symbols('u, v, f')
assert mirror_formula(focal_length=f, u=u) == f*u/(-f + u)
assert mirror_formula(focal_length=f, v=v) == f*v/(-f + v)
assert mirror_formula(u=u, v=v) == u*v/(u + v)
assert mirror_formula(u=oo, v=v) == v
assert mirror_formula(u=oo, v=oo) is oo
assert mirror_formula(focal_length=oo, u=u) == -u
assert mirror_formula(u=u, v=oo) == u
assert mirror_formula(focal_length=oo, v=oo) is oo
assert mirror_formula(focal_length=f, v=oo) == f
assert mirror_formula(focal_length=oo, v=v) == -v
assert mirror_formula(focal_length=oo, u=oo) is oo
assert mirror_formula(focal_length=f, u=oo) == f
assert mirror_formula(focal_length=oo, u=u) == -u
raises(ValueError, lambda: mirror_formula(focal_length=f, u=u, v=v))
def test_lens_formula():
u, v, f = symbols('u, v, f')
assert lens_formula(focal_length=f, u=u) == f*u/(f + u)
assert lens_formula(focal_length=f, v=v) == f*v/(f - v)
assert lens_formula(u=u, v=v) == u*v/(u - v)
assert lens_formula(u=oo, v=v) == v
assert lens_formula(u=oo, v=oo) is oo
assert lens_formula(focal_length=oo, u=u) == u
assert lens_formula(u=u, v=oo) == -u
assert lens_formula(focal_length=oo, v=oo) is -oo
assert lens_formula(focal_length=oo, v=v) == v
assert lens_formula(focal_length=f, v=oo) == -f
assert lens_formula(focal_length=oo, u=oo) is oo
assert lens_formula(focal_length=oo, u=u) == u
assert lens_formula(focal_length=f, u=oo) == f
raises(ValueError, lambda: lens_formula(focal_length=f, u=u, v=v))
def test_hyperfocal_distance():
f, N, c = symbols('f, N, c')
assert hyperfocal_distance(f=f, N=N, c=c) == f**2/(N*c)
assert ae(hyperfocal_distance(f=0.5, N=8, c=0.0033), 9.47, 2)
def test_transverse_magnification():
si, so = symbols('si, so')
assert transverse_magnification(si, so) == -si/so
assert transverse_magnification(30, 15) == -2
|
kaushik94/sympy
|
sympy/physics/optics/tests/test_utils.py
|
Python
|
bsd-3-clause
| 7,792 | 0.003208 |
import os
import numpy as np
from csv import reader
from collections import defaultdict
from common import Plate
from pprint import pprint
def hung_ji_adapter():
folder_ = 'C:/Users/ank/Desktop'
file_ = 'HJT_fittness.csv'
dose_curves = {}
with open(os.path.join(folder_, file_)) as source_file:
source_ = reader(source_file)
source_.next()
dose2data_frame = defaultdict(lambda:defaultdict(Plate))
for line in source_:
dose, time, col, row, intensity = line
plate = dose2data_frame[dose][time]
plate.set_elt(row, col, intensity)
for dose, timedict in dose2data_frame.iteritems():
bound_tuple = [[], []]
dose_curves[dose] = bound_tuple
for time in sorted(timedict.keys()):
bound_tuple[0].append(time)
bound_tuple[1].append(timedict[time].np_array)
bound_tuple[0] = np.array(bound_tuple[0]).astype(np.float)
bound_tuple[1] = np.array(bound_tuple[1]).astype(np.float)
return dose_curves
if __name__ == "__main__":
pprint(hung_ji_adapter())
|
chiffa/TcanAnalyzer
|
src/adapters.py
|
Python
|
bsd-3-clause
| 1,131 | 0.001768 |
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
RPGOne/Skynet
|
scikit-learn-0.18.1/benchmarks/bench_20newsgroups.py
|
Python
|
bsd-3-clause
| 3,555 | 0 |
'''
SAPI 5+ driver.
Copyright (c) 2009 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
#import comtypes.client
import win32com.client
import pythoncom
import time
import math
import weakref
from ..voice import Voice
# common voices
MSSAM = 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\MSSam'
MSMARY = 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\MSMary'
MSMIKE = 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\MSMike'
# coeffs for wpm conversion
E_REG = {MSSAM : (137.89, 1.11),
MSMARY : (156.63, 1.11),
MSMIKE : (154.37, 1.11)}
def buildDriver(proxy):
return SAPI5Driver(proxy)
class SAPI5Driver(object):
def __init__(self, proxy):
self._tts = win32com.client.Dispatch('SAPI.SPVoice')
#self._tts = comtypes.client.CreateObject('SAPI.SPVoice')
# all events
self._tts.EventInterests = 33790
self._advise = win32com.client.WithEvents(self._tts,
SAPI5DriverEventSink)
self._advise.setDriver(weakref.proxy(self))
#self._debug = comtypes.client.ShowEvents(self._tts)
#self._advise = comtypes.client.GetEvents(self._tts, self)
self._proxy = proxy
self._looping = False
self._speaking = False
self._stopping = False
# initial rate
self._rateWpm = 200
self.setProperty('voice', self.getProperty('voice'))
def destroy(self):
self._tts.EventInterests = 0
def say(self, text):
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
self._speaking = True
self._tts.Speak(unicode(text), 19)
def stop(self):
if not self._speaking:
return
self._proxy.setBusy(True)
self._stopping = True
self._tts.Speak('', 3)
def _toVoice(self, attr):
return Voice(attr.Id, attr.GetDescription())
def _tokenFromId(self, id):
tokens = self._tts.GetVoices()
for token in tokens:
if token.Id == id: return token
raise ValueError('unknown voice id %s', id)
def getProperty(self, name):
if name == 'voices':
return [self._toVoice(attr) for attr in self._tts.GetVoices()]
elif name == 'voice':
return self._tts.Voice.Id
elif name == 'rate':
return self._rateWpm
elif name == 'volume':
return self._tts.Volume/100.0
else:
raise KeyError('unknown property %s' % name)
def setProperty(self, name, value):
if name == 'voice':
token = self._tokenFromId(value)
self._tts.Voice = token
a, b = E_REG.get(value, E_REG[MSMARY])
self._tts.Rate = int(math.log(self._rateWpm/a, b))
elif name == 'rate':
id = self._tts.Voice.Id
a, b = E_REG.get(id, E_REG[MSMARY])
try:
self._tts.Rate = int(math.log(value/a, b))
except TypeError, e:
raise ValueError(str(e))
self._rateWpm = value
elif name == 'volume':
try:
self._tts.Volume = int(round(value*100, 2))
except TypeError, e:
raise ValueError(str(e))
else:
raise KeyError('unknown property %s' % name)
def startLoop(self):
first = True
self._looping = True
while self._looping:
if first:
self._proxy.setBusy(False)
first = False
pythoncom.PumpWaitingMessages()
time.sleep(0.05)
def endLoop(self):
self._looping = False
def iterate(self):
self._proxy.setBusy(False)
while 1:
pythoncom.PumpWaitingMessages()
yield
class SAPI5DriverEventSink(object):
def __init__(self):
self._driver = None
def setDriver(self, driver):
self._driver = driver
def OnWord(self, stream, pos, char, length):
self._driver._proxy.notify('started-word', location=char, length=length)
def OnEndStream(self, stream, pos):
d = self._driver
if d._speaking:
d._proxy.notify('finished-utterance', completed=not d._stopping)
d._speaking = False
d._stopping = False
d._proxy.setBusy(False)
|
thisismyrobot/gedit-pytts
|
src/pyttsx/drivers/sapi5.py
|
Python
|
bsd-2-clause
| 5,041 | 0.005356 |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
from numpy import nan
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.types.missing import isnull, notnull
from pandas.types.cast import _maybe_upcast
from pandas.types.common import _ensure_platform_int
from pandas.core.common import _try_sort
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
import pandas.core.generic as generic
from pandas.sparse.series import SparseSeries, SparseArray
from pandas.util.decorators import Appender
import pandas.core.ops as ops
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
"""
_constructor_sliced = SparseSeries
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if isinstance(data, dict):
mgr = self._init_dict(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = _ensure_index(columns)
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
else:
columns = Index(_try_sort(list(data.keys())))
if index is None:
index = extract_index(list(data.values()))
sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True)
sdict = DataFrame()
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_vec = np.empty(len(index))
nan_vec.fill(nan)
for c in columns:
if c not in sdict:
sdict[c] = sp_maker(nan_vec)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
data = _prep_ndarray(data, copy=False)
N, K = data.shape
if index is None:
index = _default_index(N)
if columns is None:
columns = _default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: %d vs. %d' %
(len(columns), K))
if len(index) != N:
raise ValueError('Index length mismatch: %d vs. %d' %
(len(index), N))
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
return self._init_dict(data, index, columns, dtype)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = dict((k, v.to_dense()) for k, v in compat.iteritems(self))
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
""" get new SparseDataFrame applying func to each columns """
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum([ser.sp_index.npoints
for _, ser in compat.iteritems(self)])
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
sp_maker = lambda x, index=None: SparseArray(
x, index=index, fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def __getitem__(self, key):
"""
Retrieve column or slice from DataFrame
"""
if isinstance(key, slice):
date_rng = self.index[key]
return self.reindex(date_rng)
elif isinstance(key, (np.ndarray, list, Series)):
return self._getitem_array(key)
else:
return self._get_item_cache(key)
@Appender(DataFrame.get_value.__doc__, indents=0)
def get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series.get_value(index, takeable=takeable)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
dense = self.to_dense().set_value(index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
new_fill_value = None
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None, fill_value=None):
new_data = {}
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
if isnull(other.fill_value) or isnull(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, raise_on_error=True):
return self._apply_columns(lambda x: func(x, other))
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = _ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = _maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, copy, level, fill_value, limit=None,
takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notnull(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
# TODO: fill value handling
sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns)
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s'
% to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isnull'])
def isnull(self):
return self._apply_columns(lambda x: x.isnull())
@Appender(generic._shared_docs['isnotnull'])
def isnotnull(self):
return self._apply_columns(lambda x: x.isnotnull())
def apply(self, func, axis=0, broadcast=False, reduce=False):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
else:
if not broadcast:
return self._apply_standard(func, axis, reduce=reduce)
else:
return self._apply_broadcast(func, axis)
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sortlevel(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_special_funcs)
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/pandas/sparse/frame.py
|
Python
|
apache-2.0
| 30,372 | 0.000099 |
#https://raw.githubusercontent.com/AaronJiang/ProjectEuler/master/py/problem072.py
"""
Consider the fraction, n/d, where n and d are positive integers.
If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d <= 8 in
ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7,
3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that there are 21 elements in this set.
How many elements would be contained in the set of reduced proper
fractions for d <= 1,000,000?
"""
# Euler totient function phi(n):
# counts the number of positive integers less than or equal
# to n that are relatively prime to n
# phi(p) = p -1, for prime number p
# phi(n) = n * (1-1/p1) * (1- 1/p2) *...*(1-1/pk)
# p1,p2,..pk are prime factors of n
# this problem equals to: find the sum of phi(2), phi(3), ...phi(1000000)
from Helper import isPrime
limit = 1000001
totients = range(limit) # [0,1,2,..,1000000]
for i in range(2, limit): # for (i=2; i<limit; i++)
if isPrime(i):
totients[i] = i - 1;
for j in range(2*i, limit, i):
totients[j] *= (1.0 - 1.0 / i)
print sum(totients) - 1
|
paulmcquad/projecteuler
|
0-100/problem72.py
|
Python
|
gpl-3.0
| 1,185 | 0.014346 |
"""
.. moduleauthor:: Chris Dusold <DriveLink@chrisdusold.com>
A module containing general purpose, cross instance hashing.
This module intends to make storage and cache checking stable accross instances.
"""
from drivelink.hash._hasher import hash
from drivelink.hash._hasher import frozen_hash
from drivelink.hash._hasher import Deterministic_Hashable
|
cdusold/DriveLink
|
drivelink/hash/__init__.py
|
Python
|
mit
| 357 | 0.002801 |
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
|
zaxtax/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
Python
|
bsd-3-clause
| 39,945 | 0.000075 |
# -*- coding: utf-8 -*-
#
# 2017-01-23 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Avoid XML bombs
# 2016-07-17 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add GPG encrpyted import
# 2016-01-16 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PSKC import with pre shared key
# 2015-05-28 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PSKC import
# 2014-12-11 Cornelius Kölbel <cornelius@privacyidea.org>
# code cleanup during flask migration
# 2014-10-27 Cornelius Kölbel <cornelius@privacyidea.org>
# add parsePSKCdata
# 2014-05-08 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''This file is part of the privacyidea service
It is used for importing SafeNet (former Aladdin)
XML files, that hold the OTP secrets for eToken PASS.
'''
import defusedxml.ElementTree as etree
import re
import binascii
import base64
from privacyidea.lib.utils import modhex_decode
from privacyidea.lib.utils import modhex_encode
from privacyidea.lib.log import log_with
from privacyidea.lib.crypto import aes_decrypt
from Crypto.Cipher import AES
from bs4 import BeautifulSoup
import traceback
from passlib.utils.pbkdf2 import pbkdf2
from privacyidea.lib.utils import to_utf8
import gnupg
import logging
log = logging.getLogger(__name__)
def _create_static_password(key_hex):
'''
According to yubikey manual 5.5.5 the static-ticket is the same
algorithm with no moving factors.
The msg_hex that is encoded with the AES key is
'000000000000ffffffffffffffff0f2e'
'''
msg_hex = "000000000000ffffffffffffffff0f2e"
msg_bin = binascii.unhexlify(msg_hex)
aes = AES.new(binascii.unhexlify(key_hex), AES.MODE_ECB)
password_bin = aes.encrypt(msg_bin)
password = modhex_encode(password_bin)
return password
class ImportException(Exception):
def __init__(self, description):
self.description = description
def __str__(self):
return ('{0!s}'.format(self.description))
def getTagName(elem):
match = re.match("^({.*?})(.*)$", elem.tag)
if match:
return match.group(2)
else:
return elem.tag
@log_with(log)
def parseOATHcsv(csv):
'''
(#653)
This function parses CSV data for oath token.
The file format is
serial, key, [hotp,totp], [6,8], [30|60],
serial, key, ocra, [ocra-suite]
It imports sha1 hotp or totp token.
I can also import ocra token.
The default is hotp
if totp is set, the default seconds are 30
if ocra is set, an ocra-suite is required, otherwise the default
ocra-suite is used.
It returns a dictionary:
{
serial: { 'type' : xxxx,
'otpkey' : xxxx,
'timeStep' : xxxx,
'otplen' : xxx,
'ocrasuite' : xxx }
}
'''
TOKENS = {}
csv_array = csv.split('\n')
log.debug("the file contains {0:d} tokens.".format(len(csv_array)))
for line in csv_array:
l = line.split(',')
serial = ""
key = ""
ttype = "hotp"
seconds = 30
otplen = 6
hashlib = "sha1"
ocrasuite = ""
serial = l[0].strip()
# check for empty line
if len(serial) > 0 and not serial.startswith('#'):
if len(l) >= 2:
key = l[1].strip()
if len(key) == 32:
hashlib = "sha256"
else:
log.error("the line {0!s} did not contain a hotp key".format(line))
continue
# ttype
if len(l) >= 3:
ttype = l[2].strip().lower()
# otplen or ocrasuite
if len(l) >= 4:
if ttype != "ocra":
otplen = int(l[3].strip())
elif ttype == "ocra":
ocrasuite = l[3].strip()
# timeStep
if len(l) >= 5:
seconds = int(l[4].strip())
log.debug("read the line |{0!s}|{1!s}|{2!s}|{3:d} {4!s}|{5:d}|".format(serial, key, ttype, otplen, ocrasuite, seconds))
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'timeStep': seconds,
'otplen': otplen,
'hashlib': hashlib,
'ocrasuite': ocrasuite
}
return TOKENS
@log_with(log)
def parseYubicoCSV(csv):
'''
This function reads the CSV data as created by the Yubico personalization
GUI.
Traditional Format:
Yubico OTP,12/11/2013 11:10,1,vvgutbiedkvi,
ab86c04de6a3,d26a7c0f85fdda28bd816e406342b214,,,0,0,0,0,0,0,0,0,0,0
OATH-HOTP,11.12.13 18:55,1,cccccccccccc,,
916821d3a138bf855e70069605559a206ba854cd,,,0,0,0,6,0,0,0,0,0,0
Static Password,11.12.13 19:08,1,,d5a3d50327dc,
0e8e37b0e38b314a56748c030f58d21d,,,0,0,0,0,0,0,0,0,0,0
Yubico Format:
# OATH mode
508326,,0,69cfb9202438ca68964ec3244bfa4843d073a43b,,2013-12-12T08:41:07,
1382042,,0,bf7efc1c8b6f23604930a9ce693bdd6c3265be00,,2013-12-12T08:41:17,
# Yubico mode
508326,cccccccccccc,83cebdfb7b93,a47c5bf9c152202f577be6721c0113af,,
2013-12-12T08:43:17,
# static mode
508326,,,9e2fd386224a7f77e9b5aee775464033,,2013-12-12T08:44:34,
column 0: serial
column 1: public ID in yubico mode
column 2: private ID in yubico mode, 0 in OATH mode, blank in static mode
column 3: AES key
BUMMER: The Yubico Format does not contain the information,
which slot of the token was written.
If now public ID or serial is given, we can not import the token, as the
returned dictionary needs the token serial as a key.
It returns a dictionary with the new tokens to be created:
{
serial: { 'type' : yubico,
'otpkey' : xxxx,
'otplen' : xxx,
'description' : xxx
}
}
'''
TOKENS = {}
csv_array = csv.split('\n')
log.debug("the file contains {0:d} tokens.".format(len(csv_array)))
for line in csv_array:
l = line.split(',')
serial = ""
key = ""
otplen = 32
public_id = ""
slot = ""
if len(l) >= 6:
first_column = l[0].strip()
if first_column.lower() in ["yubico otp",
"oath-hotp",
"static password"]:
# traditional format
typ = l[0].strip()
slot = l[2].strip()
public_id = l[3].strip()
key = l[5].strip()
if public_id == "":
# Usually a "static password" does not have a public ID!
# So we would bail out here for static passwords.
log.warning("No public ID in line {0!r}".format(line))
continue
serial_int = int(binascii.hexlify(modhex_decode(public_id)),
16)
if typ.lower() == "yubico otp":
ttype = "yubikey"
otplen = 32 + len(public_id)
serial = "UBAM{0:08d}_{1!s}".format(serial_int, slot)
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
elif typ.lower() == "oath-hotp":
'''
WARNING: this does not work out at the moment, since the
Yubico GUI either
1. creates a serial in the CSV, but then the serial is
always prefixed! We can not authenticate with this!
2. if it does not prefix the serial there is no serial in
the CSV! We can not import and assign the token!
'''
ttype = "hotp"
otplen = 6
serial = "UBOM{0:08d}_{1!s}".format(serial_int, slot)
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
else:
log.warning("at the moment we do only support Yubico OTP"
" and HOTP: %r" % line)
continue
elif first_column.isdigit():
# first column is a number, (serial number), so we are
# in the yubico format
serial = first_column
# the yubico format does not specify a slot
slot = "X"
key = l[3].strip()
if l[2].strip() == "0":
# HOTP
typ = "hotp"
serial = "UBOM{0!s}_{1!s}".format(serial, slot)
otplen = 6
elif l[2].strip() == "":
# Static
typ = "pw"
serial = "UBSM{0!s}_{1!s}".format(serial, slot)
key = _create_static_password(key)
otplen = len(key)
log.warning("We can not enroll a static mode, since we do"
" not know the private identify and so we do"
" not know the static password.")
continue
else:
# Yubico
typ = "yubikey"
serial = "UBAM{0!s}_{1!s}".format(serial, slot)
public_id = l[1].strip()
otplen = 32 + len(public_id)
TOKENS[serial] = {'type': typ,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
else:
log.warning("the line {0!r} did not contain a enough values".format(line))
continue
return TOKENS
@log_with(log)
def parseSafeNetXML(xml):
"""
This function parses XML data of a Aladdin/SafeNet XML
file for eToken PASS
It returns a dictionary of
serial : { otpkey , counter, type }
"""
TOKENS = {}
elem_tokencontainer = etree.fromstring(xml)
if getTagName(elem_tokencontainer) != "Tokens":
raise ImportException("No toplevel element Tokens")
for elem_token in list(elem_tokencontainer):
SERIAL = None
COUNTER = None
HMAC = None
DESCRIPTION = None
if getTagName(elem_token) == "Token":
SERIAL = elem_token.get("serial")
log.debug("Found token with serial {0!s}".format(SERIAL))
for elem_tdata in list(elem_token):
tag = getTagName(elem_tdata)
if "ProductName" == tag:
DESCRIPTION = elem_tdata.text
log.debug("The Token with the serial %s has the "
"productname %s" % (SERIAL, DESCRIPTION))
if "Applications" == tag:
for elem_apps in elem_tdata:
if getTagName(elem_apps) == "Application":
for elem_app in elem_apps:
tag = getTagName(elem_app)
if "Seed" == tag:
HMAC = elem_app.text
if "MovingFactor" == tag:
COUNTER = elem_app.text
if not SERIAL:
log.error("Found token without a serial")
else:
if HMAC:
hashlib = "sha1"
if len(HMAC) == 64:
hashlib = "sha256"
TOKENS[SERIAL] = {'otpkey': HMAC,
'counter': COUNTER,
'type': 'hotp',
'hashlib': hashlib
}
else:
log.error("Found token {0!s} without a element 'Seed'".format(
SERIAL))
return TOKENS
def strip_prefix_from_soup(xml_soup):
"""
We strip prefixes from the XML tags.
<pskc:encryption>
</pskc:encryption>
results in:
<encryption>
</encryption>
:param xml_soup: Beautiful Soup XML with tags with prefixes
:type xml_soup: Beautiful Soup object
:return: Beautiful Soup without prefixes in the tags
"""
# strip the prefixes from the tags!
for tag in xml_soup.findAll():
if tag.name.find(":") >= 1:
prefix, name = tag.name.split(":")
tag.name = name
return xml_soup
def derive_key(xml, password):
"""
Derive the encryption key from the password with the parameters given
in the XML soup.
:param xml: The XML
:param password: the password
:return: The derived key, hexlified
"""
if not password:
raise ImportException("The XML KeyContainer specifies a derived "
"encryption key, but no password given!")
keymeth= xml.keycontainer.encryptionkey.derivedkey.keyderivationmethod
derivation_algo = keymeth["algorithm"].split("#")[-1]
if derivation_algo.lower() != "pbkdf2":
raise ImportException("We only support PBKDF2 as Key derivation "
"function!")
salt = keymeth.find("salt").text.strip()
keylength = keymeth.find("keylength").text.strip()
rounds = keymeth.find("iterationcount").text.strip()
r = pbkdf2(to_utf8(password), base64.b64decode(salt), int(rounds),
int(keylength))
return binascii.hexlify(r)
@log_with(log)
def parsePSKCdata(xml_data,
preshared_key_hex=None,
password=None,
do_checkserial=False):
"""
This function parses XML data of a PSKC file, (RFC6030)
It can read
* AES-128-CBC encrypted (preshared_key_bin) data
* password based encrypted data
* plain text data
:param xml_data: The XML data
:type xml_data: basestring
:param preshared_key_hex: The preshared key, hexlified
:param password: The password that encrypted the keys
:param do_checkserial: Check if the serial numbers conform to the OATH
specification (not yet implemented)
:return: a dictionary of token dictionaries
{ serial : { otpkey , counter, .... }}
"""
tokens = {}
#xml = BeautifulSoup(xml_data, "lxml")
xml = strip_prefix_from_soup(BeautifulSoup(xml_data, "lxml"))
if xml.keycontainer.encryptionkey and \
xml.keycontainer.encryptionkey.derivedkey:
# If we have a password we also need a tag EncryptionKey in the
# KeyContainer
preshared_key_hex = derive_key(xml, password)
key_packages = xml.keycontainer.findAll("keypackage")
for key_package in key_packages:
token = {}
key = key_package.key
try:
token["description"] = key_package.deviceinfo.manufacturer.string
except Exception as exx:
log.debug("Can not get manufacturer string {0!s}".format(exx))
serial = key["id"]
try:
serial = key_package.deviceinfo.serialno.string
except Exception as exx:
log.debug("Can not get serial string from device info {0!s}".format(exx))
algo = key["algorithm"]
token["type"] = algo[-4:].lower()
parameters = key.algorithmparameters
token["otplen"] = parameters.responseformat["length"] or 6
try:
if key.data.secret.plainvalue:
secret = key.data.secret.plainvalue.string
token["otpkey"] = binascii.hexlify(base64.b64decode(secret))
elif key.data.secret.encryptedvalue:
encryptionmethod = key.data.secret.encryptedvalue.encryptionmethod
enc_algorithm = encryptionmethod["algorithm"].split("#")[-1]
if enc_algorithm.lower() != "aes128-cbc":
raise ImportException("We only import PSKC files with "
"AES128-CBC.")
enc_data = key.data.secret.encryptedvalue.ciphervalue.text
enc_data = base64.b64decode(enc_data.strip())
enc_iv = enc_data[:16]
enc_cipher = enc_data[16:]
secret = aes_decrypt(binascii.unhexlify(preshared_key_hex),
enc_iv, enc_cipher)
token["otpkey"] = binascii.hexlify(secret)
except Exception as exx:
log.error("Failed to import tokendata: {0!s}".format(exx))
log.debug(traceback.format_exc())
raise ImportException("Failed to import tokendata. Wrong "
"encryption key? %s" % exx)
if token["type"] == "hotp" and key.data.counter:
token["counter"] = key.data.counter.text.strip()
elif token["type"] == "totp" and key.data.timeinterval:
token["timeStep"] = key.data.timeinterval.text.strip()
tokens[serial] = token
return tokens
class GPGImport(object):
"""
This class is used to decrypt GPG encrypted import files.
The decrypt method returns the unencrpyted files.
Create the keypair like this:
GNUPGHOME=/etc/privacyidea/gpg gpg --gen-key
"""
def __init__(self, config=None):
self.config = config or {}
self.gnupg_home = self.config.get("PI_GNUPG_HOME",
"/etc/privacyidea/gpg")
self.gpg = gnupg.GPG(gnupghome=self.gnupg_home)
self.private_keys = self.gpg.list_keys(True)
def get_publickeys(self):
"""
This returns the public GPG key to be displayed in the Import Dialog.
The administrator can send this public key to his token vendor and
the token vendor can use this public key to encrypt the token import
file.
:return: a dictionary of public keys with fingerprint
"""
public_keys = {}
keys = self.gpg.list_keys(secret=True)
for key in keys:
ascii_armored_public_key = self.gpg.export_keys(key.get("keyid"))
public_keys[key.get("keyid")] = {"armor": ascii_armored_public_key,
"fingerprint": key.get(
"fingerprint")}
return public_keys
def decrypt(self, input_data):
"""
Decrypts the input data with one of the private keys
:param input_data:
:return:
"""
decrypted = self.gpg.decrypt(message=input_data)
if not decrypted.ok:
log.error(u"Decrpytion failed: {0!s}. {1!s}".format(
decrypted.status, decrypted.stderr))
raise Exception(decrypted.stderr)
return decrypted.data
|
wheldom01/privacyidea
|
privacyidea/lib/importotp.py
|
Python
|
agpl-3.0
| 20,360 | 0.00059 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 20:41:02 2017
@author: jacoblashner
"""
import numpy as np
import matplotlib.pyplot as plt
ae = lambda nu : 1.47*10**(-7) * nu**(2.2)
ao = lambda nu : 8.7*10**(-5)*nu+3.1*10**(-7)*nu**2 + 3.0*10**(-10)*nu**3
d = .3 #cm
ee = lambda nu : (1 - np.exp(ae(nu) * d))
eo = lambda nu : (1 - np.exp(ao(nu) * d))
freqs = np.linspace(60, 240)
plt.plot(freqs, map(ae, freqs))
plt.plot(freqs, map(ao, freqs))
plt.show()
plt.plot(freqs, map(ee, freqs))
plt.plot(freqs, map(eo, freqs))
plt.show()
print "ae: ", ae(145)
print "ao: ", ao(145)
print "eps_pol: ", .5*(ee(145)**2 - eo(145)**2)
|
MillerCMBLabUSC/lab_analysis
|
apps/4f_model/Memos/A2/plots.py
|
Python
|
gpl-2.0
| 658 | 0.018237 |
import sys, os
from tempfile import TemporaryDirectory
import pytest
import multiprocessing
from spinalcordtoolbox.utils import sct_test_path, sct_dir_local_path
sys.path.append(sct_dir_local_path('scripts'))
from spinalcordtoolbox import resampling
import spinalcordtoolbox.reports.qc as qc
from spinalcordtoolbox.image import Image
import spinalcordtoolbox.reports.slice as qcslice
def gen_qc(args):
i, path_qc = args
t2_image = sct_test_path('t2', 't2.nii.gz')
t2_seg = sct_test_path('t2', 't2_seg-manual.nii.gz')
qc.generate_qc(fname_in1=t2_image, fname_seg=t2_seg, path_qc=path_qc, process="sct_deepseg_gm")
return True
def test_many_qc():
"""Test many qc images can be made in parallel"""
if multiprocessing.cpu_count() < 2:
pytest.skip("Can't test parallel behaviour")
with TemporaryDirectory(prefix="sct-qc-") as tmpdir:
# install: sct_download_data -d sct_testing_data
with multiprocessing.Pool(2) as p:
p.map(gen_qc, ((i, tmpdir) for i in range(5)))
|
neuropoly/spinalcordtoolbox
|
testing/api/test_qc_parallel.py
|
Python
|
mit
| 1,038 | 0.00578 |
#!/usr/bin/env python
# encoding: utf-8
from .user import *
from .upload import *
from .post import *
from .system import *
def all():
result = []
models = []
for m in models:
result += m.__all__
return result
__all__ = all()
|
luke0922/MarkdownEditor
|
application/models/__init__.py
|
Python
|
gpl-2.0
| 255 | 0.003922 |
from PIL import Image
import sys
def resize(img, baseheight, newname):
hpercent = (baseheight / float(img.size[1]))
wsize = int((float(img.size[0]) * float(hpercent)))
img = img.resize((wsize, baseheight), Image.ANTIALIAS)
img.save(newname)
def makethumbnails(fname):
img = Image.open(fname)
x1 = fname.replace('.png', 'x1.png')
resize(img, 200, x1)
x15 = fname.replace('.png', 'x1.5.png')
resize(img, 300, x15)
x2 = fname.replace('.png', 'x2.png')
resize(img, 400, x2)
|
ejegg/FractalEditorSite
|
util/image.py
|
Python
|
gpl-3.0
| 519 | 0.001927 |
"""
Empirical Likelihood Linear Regression Inference
The script contains the function that is optimized over nuisance parameters to
conduct inference on linear regression parameters. It is called by eltest
in OLSResults.
General References
-----------------
Owen, A.B.(2001). Empirical Likelihood. Chapman and Hall
"""
import numpy as np
from statsmodels.emplike.descriptive import _OptFuncts
class _ELRegOpts(_OptFuncts):
"""
A class that holds functions to be optimized over when conducting
hypothesis tests and calculating confidence intervals.
Parameters
----------
OLSResults : Results instance
A fitted OLS result
"""
def __init__(self):
pass
def _opt_nuis_regress(self, nuisance_params, param_nums=None,
endog=None, exog=None,
nobs=None, nvar=None, params=None, b0_vals=None,
stochastic_exog=None):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(nvar),
param_nums))
params[nuis_param_index] = nuisance_params
new_params = params.reshape(nvar, 1)
self.new_params = new_params
est_vect = exog * \
(endog - np.squeeze(np.dot(exog, new_params))).reshape(nobs, 1)
if not stochastic_exog:
exog_means = np.mean(exog, axis=0)[1:]
exog_mom2 = (np.sum(exog * exog, axis=0))[1:]\
/ nobs
mean_est_vect = exog[:, 1:] - exog_means
mom2_est_vect = (exog * exog)[:, 1:] - exog_mom2
regressor_est_vect = np.concatenate((mean_est_vect, mom2_est_vect),
axis=1)
est_vect = np.concatenate((est_vect, regressor_est_vect),
axis=1)
wts = np.ones(nobs) * (1. / nobs)
x0 = np.zeros(est_vect.shape[1]).reshape(-1, 1)
try:
eta_star = self._modif_newton(x0, est_vect, wts)
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
# the following commented out code is to verify weights
# see open issue #1845
#self.new_weights /= self.new_weights.sum()
#if not np.allclose(self.new_weights.sum(), 1., rtol=0, atol=1e-10):
# raise RuntimeError('weights do not sum to 1')
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
except np.linalg.linalg.LinAlgError:
return np.inf
|
DonBeo/statsmodels
|
statsmodels/emplike/elregress.py
|
Python
|
bsd-3-clause
| 3,091 | 0.002588 |
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="side",
parent_name="scattergeo.marker.colorbar.title",
**kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/title/_side.py
|
Python
|
mit
| 569 | 0 |
from __future__ import absolute_import
from __future__ import division
# Copyright (c) 2010-2017 openpyxl
import math
#constants
DEFAULT_ROW_HEIGHT = 15. # Default row height measured in point size.
BASE_COL_WIDTH = 13 # in characters
DEFAULT_COLUMN_WIDTH = 51.85 # in points, should be characters
DEFAULT_LEFT_MARGIN = 0.7 # in inches, = right margin
DEFAULT_TOP_MARGIN = 0.7874 # in inches = bottom margin
DEFAULT_HEADER = 0.3 # in inches
# Conversion functions
"""
From the ECMA Spec (4th Edition part 1)
Page setup: "Left Page Margin in inches" p. 1647
Docs from
http://startbigthinksmall.wordpress.com/2010/01/04/points-inches-and-emus-measuring-units-in-office-open-xml/
See also http://msdn.microsoft.com/en-us/library/dd560821(v=office.12).aspx
dxa: The main unit in OOXML is a twentieth of a point. Also called twips.
pt: point. In Excel there are 72 points to an inch
hp: half-points are used to specify font sizes. A font-size of 12pt equals 24 half points
pct: Half-points are used to specify font sizes. A font-size of 12pt equals 24 half points
EMU: English Metric Unit, EMUs are used for coordinates in vector-based
drawings and embedded pictures. One inch equates to 914400 EMUs and a
centimeter is 360000. For bitmaps the default resolution is 96 dpi (known as
PixelsPerInch in Excel). Spec p. 1122
For radial geometry Excel uses integert units of 1/60000th of a degree.
"""
def inch_to_dxa(value):
"""1 inch = 72 * 20 dxa"""
return int(value * 20 * 72)
def dxa_to_inch(value):
return value / 72 / 20
def dxa_to_cm(value):
return 2.54 * dxa_to_inch(value)
def cm_to_dxa(value):
emu = cm_to_EMU(value)
inch = EMU_to_inch(emu)
return inch_to_dxa(inch)
def pixels_to_EMU(value):
"""1 pixel = 9525 EMUs"""
return int(value * 9525)
def EMU_to_pixels(value):
return round(value / 9525)
def cm_to_EMU(value):
"""1 cm = 360000 EMUs"""
return int(value * 360000)
def EMU_to_cm(value):
return round(value / 360000, 4)
def inch_to_EMU(value):
"""1 inch = 914400 EMUs"""
return int(value * 914400)
def EMU_to_inch(value):
return round(value / 914400, 4)
def pixels_to_points(value, dpi=96):
"""96 dpi, 72i"""
return value * 72 / dpi
def points_to_pixels(value, dpi=96):
return int(math.ceil(value * dpi / 72))
def degrees_to_angle(value):
"""1 degree = 60000 angles"""
return int(round(value * 60000))
def angle_to_degrees(value):
return round(value / 60000, 2)
def short_color(color):
""" format a color to its short size """
if len(color) > 6:
return color[2:]
return color
|
171121130/SWI
|
venv/Lib/site-packages/openpyxl/utils/units.py
|
Python
|
mit
| 2,629 | 0.005325 |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, Field, IntField, DecimalField, \
StringField, BytesField, DateField
__all__ = ['HousingPhoto', 'Housing', 'Query', 'City', 'ICapHousing']
class HousingPhoto(CapBaseObject):
"""
Photo of a housing.
"""
url = StringField('Direct URL to photo')
data = BytesField('Data of photo')
def __init__(self, url):
CapBaseObject.__init__(self, url.split('/')[-1])
self.url = url
def __iscomplete__(self):
return self.data
def __str__(self):
return self.url
def __repr__(self):
return u'<HousingPhoto "%s" data=%do>' % (self.id, len(self.data) if self.data else 0)
class Housing(CapBaseObject):
"""
Content of a housing.
"""
title = StringField('Title of housing')
area = DecimalField('Area of housing, in m2')
cost = DecimalField('Cost of housing')
currency = StringField('Currency of cost')
date = DateField('Date when the housing has been published')
location = StringField('Location of housing')
station = StringField('What metro/bus station next to housing')
text = StringField('Text of the housing')
phone = StringField('Phone number to contact')
photos = Field('List of photos', list)
details = Field('Key/values of details', dict)
class Query(CapBaseObject):
"""
Query to find housings.
"""
TYPE_RENT = 0
TYPE_SALE = 1
type = IntField('Type of housing to find (TYPE_* constants)')
cities = Field('List of cities to search in', list, tuple)
area_min = IntField('Minimal area (in m2)')
area_max = IntField('Maximal area (in m2)')
cost_min = IntField('Minimal cost')
cost_max = IntField('Maximal cost')
nb_rooms = IntField('Number of rooms')
def __init__(self):
CapBaseObject.__init__(self, '')
class City(CapBaseObject):
"""
City.
"""
name = StringField('Name of city')
class ICapHousing(IBaseCap):
"""
Capability of websites to search housings.
"""
def search_housings(self, query):
"""
Search housings.
:param query: search query
:type query: :class:`Query`
:rtype: iter[:class:`Housing`]
"""
raise NotImplementedError()
def get_housing(self, housing):
"""
Get an housing from an ID.
:param housing: ID of the housing
:type housing: str
:rtype: :class:`Housing` or None if not found.
"""
raise NotImplementedError()
def search_city(self, pattern):
"""
Search a city from a pattern.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`City`]
"""
raise NotImplementedError()
|
eirmag/weboob
|
weboob/capabilities/housing.py
|
Python
|
agpl-3.0
| 3,610 | 0.007202 |
# -*- encoding:utf8 -*-
"""
使用mongodb作为缓存器
测试本地缓存
"""
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import json
from pymongo import MongoClient
from datetime import datetime, timedelta
from bson.binary import Binary
import zlib
import time
class MongoCache:
def __init__(self, client=None, expires=timedelta(days=30)):
self.client = client or MongoClient(connect=False)
# 使用cache作为缓存的collection
self.db = self.client.cache
# cache过期后自动删除
self.db.webpage.create_index('timestamp', expireAfterSeconds=expires.total_seconds())
def __contains__(self, url):
try:
self[url]
except KeyError:
return False
else:
return True
def __getitem__(self, url):
result = self.db.webpage.find_one({'_id': url})
if result:
result['html'] = zlib.decompress(result['html'])
return result
else:
raise KeyError(url + 'does not exists')
pass
def __setitem__(self, url, result):
result['html'] = Binary(zlib.compress(result['html']))
self.db.webpage.replace_one({'_id': url}, result, upsert=True)
result['html'] = zlib.decompress(result['html'])
def clear(self):
self.db.webpage.drop()
def test(timesleep=60):
cache = MongoCache(expires=timedelta())
time.sleep(timesleep)
cache['http://www.baidu.com'] = {'html': '<p>asd</p>', 'timestamp': str(datetime.utcnow())}
print cache['http://www.baidu.com']
if __name__ == '__main__':
from link_crawler import link_crawler
link_crawler('http://example.webscraping.com/', delay=3, link_regex='/(index|view)',
max_urls=-1, cache=MongoCache())
|
basicworld/pycrawler
|
mongo_cache.py
|
Python
|
mit
| 1,791 | 0.006908 |
"""
"""
import traceback
from AnyQt.QtWidgets import QWidget, QPlainTextEdit, QVBoxLayout, QSizePolicy
from AnyQt.QtGui import QTextCursor, QTextCharFormat, QFont
from AnyQt.QtCore import Qt, QObject, QCoreApplication, QThread, QSize
from AnyQt.QtCore import pyqtSignal as Signal
class TerminalView(QPlainTextEdit):
def __init__(self, *args, **kwargs):
QPlainTextEdit.__init__(self, *args, **kwargs)
self.setFrameStyle(QPlainTextEdit.NoFrame)
self.setTextInteractionFlags(Qt.TextBrowserInteraction)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
font = self.font()
font.setStyleHint(QFont.Monospace)
font.setFamily("Monospace")
self.setFont(font)
def sizeHint(self):
metrics = self.fontMetrics()
width = metrics.boundingRect("_" * 81).width()
height = metrics.lineSpacing()
scroll_width = self.verticalScrollBar().width()
size = QSize(width + scroll_width, height * 25)
return size
class OutputView(QWidget):
def __init__(self, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
self.__lines = 5000
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.__text = TerminalView()
self.__currentCharFormat = self.__text.currentCharFormat()
self.layout().addWidget(self.__text)
def setMaximumLines(self, lines):
"""
Set the maximum number of lines to keep displayed.
"""
if self.__lines != lines:
self.__lines = lines
self.__text.setMaximumBlockCount(lines)
def maximumLines(self):
"""
Return the maximum number of lines in the display.
"""
return self.__lines
def clear(self):
"""
Clear the displayed text.
"""
self.__text.clear()
def setCurrentCharFormat(self, charformat):
"""Set the QTextCharFormat to be used when writing.
"""
if self.__currentCharFormat != charformat:
self.__currentCharFormat = charformat
def currentCharFormat(self):
return self.__currentCharFormat
def toPlainText(self):
"""
Return the full contents of the output view.
"""
return self.__text.toPlainText()
# A file like interface.
def write(self, string):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(self.__currentCharFormat)
self.__text.insertPlainText(string)
def writelines(self, lines):
self.write("".join(lines))
def flush(self):
pass
def writeWithFormat(self, string, charformat):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(charformat)
self.__text.insertPlainText(string)
def writelinesWithFormat(self, lines, charformat):
self.writeWithFormat("".join(lines), charformat)
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a formated file like object proxy.
"""
charformat = update_char_format(
self.currentCharFormat(), color, background, weight,
italic, underline, font
)
return formater(self, charformat)
def update_char_format(baseformat, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a copy of `baseformat` :class:`QTextCharFormat` with
updated color, weight, background and font properties.
"""
charformat = QTextCharFormat(baseformat)
if color is not None:
charformat.setForeground(color)
if background is not None:
charformat.setBackground(background)
if font is not None:
charformat.setFont(font)
else:
font = update_font(baseformat.font(), weight, italic, underline)
charformat.setFont(font)
return charformat
def update_font(basefont, weight=None, italic=None, underline=None,
pixelSize=None, pointSize=None):
"""
Return a copy of `basefont` :class:`QFont` with updated properties.
"""
font = QFont(basefont)
if weight is not None:
font.setWeight(weight)
if italic is not None:
font.setItalic(italic)
if underline is not None:
font.setUnderline(underline)
if pixelSize is not None:
font.setPixelSize(pixelSize)
if pointSize is not None:
font.setPointSize(pointSize)
return font
class formater(object):
def __init__(self, outputview, charformat):
self.outputview = outputview
self.charformat = charformat
def write(self, string):
self.outputview.writeWithFormat(string, self.charformat)
def writelines(self, lines):
self.outputview.writelines(lines, self.charformat)
def flush(self):
self.outputview.flush()
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
charformat = update_char_format(self.charformat, color, background,
weight, italic, underline, font)
return formater(self.outputview, charformat)
def __enter__(self):
return self
def __exit__(self, *args):
self.outputview = None
self.charformat = None
class TextStream(QObject):
stream = Signal(str)
flushed = Signal()
def __init__(self, parent=None):
QObject.__init__(self, parent)
def write(self, string):
self.stream.emit(string)
def writelines(self, lines):
self.stream.emit("".join(lines))
def flush(self):
self.flushed.emit()
class ExceptHook(QObject):
handledException = Signal(object)
def __init__(self, parent=None, stream=None, canvas=None, **kwargs):
QObject.__init__(self, parent, **kwargs)
self._stream = stream
self._canvas = canvas
def __call__(self, exc_type, exc_value, tb):
if self._stream:
header = exc_type.__name__ + ' Exception'
if QThread.currentThread() != QCoreApplication.instance().thread():
header += " (in non-GUI thread)"
text = traceback.format_exception(exc_type, exc_value, tb)
text.insert(0, '{:-^79}\n'.format(' ' + header + ' '))
text.append('-' * 79 + '\n')
self._stream.writelines(text)
self.handledException.emit(((exc_type, exc_value, tb), self._canvas))
|
cheral/orange3
|
Orange/canvas/application/outputview.py
|
Python
|
bsd-2-clause
| 6,738 | 0 |
import numpy as np
import load_data
from generative_alg import *
from keras.utils.generic_utils import Progbar
from load_data import load_word_indices
from keras.preprocessing.sequence import pad_sequences
import pandas as pa
import augment
def test_points(premises, labels, noises, gtest, cmodel, hypo_len):
p = Progbar(len(premises))
hypos = []
bs = 64
for i in range(len(labels) / bs):
words, _ = generative_predict_beam(gtest, premises[i * bs: (i+1)*bs],
noises[i * bs: (i+1)*bs,None,:], labels[i * bs: (i+1)*bs], True, hypo_len)
hypos.append(words)
p.add(len(words))
hypos = np.vstack(hypos)
cpreds = cmodel.evaluate([premises[:len(hypos)], hypos], labels[:len(hypos)])
print cpreds
def print_hypos(premise, label, gen_test, beam_size, hypo_len, noise_size, wi):
words = single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size)
batch_size = gen_test[0].input_layers[0].input_shape[0]
per_batch = batch_size / beam_size
premises = [premise] * per_batch
noise_input = np.random.normal(scale=0.11, size=(per_batch, 1, noise_size))
class_indices = np.ones(per_batch) * label
class_indices = load_data.convert_to_one_hot(class_indices, 3)
words, loss = generative_predict_beam(gen_test, premises, noise_input,
class_indices, True, hypo_len)
print 'Premise:', wi.print_seq(premise)
print 'Label:', load_data.LABEL_LIST[label]
print
print 'Hypotheses:'
for h in words:
print wi.print_seq(h)
def load_sentence(string, wi, len = 25):
tokens = string.split()
tokens = load_word_indices(tokens, wi.index)
return pad_sequences([tokens], maxlen = len, padding = 'pre')[0]
def find_true_examples():
models = ['8-150-2', '8-150-4', '8-150-8', '8-150-16', '8-150-32', '8-150-147', '6-150-8', '7-150-8' ,'9-226-8']
final_premises = set()
subset = {}
for model in models:
data = pa.read_csv('models/real' + model + '/dev1')
data = data[data['ctrue']]
neutr = data[data['label'] == 'neutral']
contr = data[data['label'] == 'contradiction']
entail = data[data['label'] == 'entailment']
subset[model] = [neutr, contr, entail]
premises = set(neutr['premise']) & set(contr['premise']) & set(entail['premise'])
if len(final_premises) == 0:
final_premises = premises
else:
final_premises &= premises
final_premises = list(final_premises)
with open('results/ext_examples.txt', 'w') as fi:
for i in range(len(final_premises)):
premise = final_premises[i]
fi.write(premise + '\n')
for m in models:
fi.write(m + '\n')
for l in range(3):
filtered = subset[m][l][subset[m][l]['premise'] == premise]
for f in range(len(filtered)):
hypo = filtered['hypo'].iloc[f]
label = filtered['label'].iloc[f][:4]
fi.write(label + '\t' + hypo + '\n')
fi.write('\n')
|
jstarc/deep_reasoning
|
visualize.py
|
Python
|
mit
| 3,189 | 0.014111 |
# -*- coding: utf-8 -*-
#
# GromacsWrapper documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 23 19:38:56 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'GromacsWrapper'
copyright = u'2009-2018, The Authors of GromacsWrapper (see AUTHORS)'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# Dynamically calculate the version (uses versioneer)
packageversion = __import__('gromacs').__version__
# The short X.Y version.
version = '.'.join(packageversion.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = packageversion
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logos/GromacsWrapper_logo_200x200.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "logos/GromacsWrapper_logo_32x32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'GromacsWrapperdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'GromacsWrapper.tex', u'GromacsWrapper Documentation',
u'Oliver Beckstein', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Options for ext.intersphinx
# ---------------------------
# intersphinx: reference standard lib and RecSQL
# http://sphinx.pocoo.org/latest/ext/intersphinx.html
intersphinx_mapping = {'https://docs.python.org/': None,
'https://docs.scipy.org/doc/numpy/': None,
'https://docs.scipy.org/doc/scipy/reference/': None,
}
# Options for ext.autodoc
# -----------------------
# see http://sphinx.pocoo.org/ext/autodoc.html
# This value selects what content will be inserted into the main body of an autoclass directive.
# "class", "init", "both"
autoclass_content = "both"
|
PicoCentauri/GromacsWrapper
|
doc/sphinx/source/conf.py
|
Python
|
gpl-3.0
| 6,547 | 0.005346 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from chart.tests.helm_template_generator import render_chart
class CreateUserJobTest(unittest.TestCase):
def test_should_run_by_default(self):
docs = render_chart(show_only=["templates/jobs/create-user-job.yaml"])
assert "Job" == docs[0]["kind"]
assert "create-user" == jmespath.search("spec.template.spec.containers[0].name", docs[0])
assert 50000 == jmespath.search("spec.template.spec.securityContext.runAsUser", docs[0])
def test_should_support_annotations(self):
docs = render_chart(
values={"createUserJob": {"annotations": {"foo": "bar"}, "jobAnnotations": {"fiz": "fuz"}}},
show_only=["templates/jobs/create-user-job.yaml"],
)
annotations = jmespath.search("spec.template.metadata.annotations", docs[0])
assert "foo" in annotations
assert "bar" == annotations["foo"]
job_annotations = jmespath.search("metadata.annotations", docs[0])
assert "fiz" in job_annotations
assert "fuz" == job_annotations["fiz"]
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"createUserJob": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/jobs/create-user-job.yaml"],
)
assert "Job" == jmespath.search("kind", docs[0])
assert "foo" == jmespath.search(
"spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.template.spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.template.spec.tolerations[0].key",
docs[0],
)
|
dhuang/incubator-airflow
|
chart/tests/test_create_user_job.py
|
Python
|
apache-2.0
| 3,564 | 0.001684 |
from .google import GoogleSpeaker
from .watson import WatsonSpeaker
"""
alfred
~~~~~~~~~~~~~~~~
Google tts.
"""
__all__ = [
'GoogleSpeaker',
'WatsonSpeaker'
]
|
lowdev/alfred
|
speaker/tts/__init__.py
|
Python
|
gpl-3.0
| 169 | 0 |
from textwrap import dedent
from unittest import TestCase
from lxml import etree
from pcs_test.tools.assertions import AssertPcsMixin
from pcs_test.tools.cib import get_assert_pcs_effect_mixin
from pcs_test.tools.misc import get_test_resource as rc
from pcs_test.tools.misc import (
get_tmp_file,
skip_unless_crm_rule,
skip_unless_pacemaker_supports_rsc_and_op_rules,
write_data_to_tmpfile,
write_file_to_tmpfile,
)
from pcs_test.tools.pcs_runner import PcsRunner
from pcs_test.tools.xml import XmlManipulation
empty_cib = rc("cib-empty.xml")
empty_cib_rules = rc("cib-empty-3.4.xml")
class TestDefaultsMixin:
def setUp(self):
# pylint: disable=invalid-name
self.temp_cib = get_tmp_file("tier1_cib_options")
self.pcs_runner = PcsRunner(self.temp_cib.name)
def tearDown(self):
# pylint: disable=invalid-name
self.temp_cib.close()
class DefaultsConfigMixin(TestDefaultsMixin, AssertPcsMixin):
cli_command = []
prefix = ""
def test_success(self):
xml_rsc = """
<rsc_defaults>
<meta_attributes id="rsc-set1" score="10">
<nvpair id="rsc-set1-nv1" name="name1" value="rsc1"/>
<nvpair id="rsc-set1-nv2" name="name2" value="rsc2"/>
</meta_attributes>
<meta_attributes id="rsc-setA">
<nvpair id="rsc-setA-nv1" name="name1" value="rscA"/>
<nvpair id="rsc-setA-nv2" name="name2" value="rscB"/>
</meta_attributes>
</rsc_defaults>
"""
xml_op = """
<op_defaults>
<meta_attributes id="op-set1" score="10">
<nvpair id="op-set1-nv1" name="name1" value="op1"/>
<nvpair id="op-set1-nv2" name="name2" value="op2"/>
</meta_attributes>
<meta_attributes id="op-setA">
<nvpair id="op-setA-nv1" name="name1" value="opA"/>
<nvpair id="op-setA-nv2" name="name2" value="opB"/>
</meta_attributes>
</op_defaults>
"""
xml_manip = XmlManipulation.from_file(empty_cib)
xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
self.assert_pcs_success(
self.cli_command,
stdout_full=dedent(
f"""\
Meta Attrs: {self.prefix}-set1 score=10
name1={self.prefix}1
name2={self.prefix}2
Meta Attrs: {self.prefix}-setA
name1={self.prefix}A
name2={self.prefix}B
"""
),
)
def test_success_rule(self):
xml_template = """<{tag}_defaults>
<meta_attributes id="{tag}-set1">
<rule id="{tag}-set1-rule" boolean-op="and" score="INFINITY">
<rule id="{tag}-set1-rule-rule" boolean-op="or" score="0">
<expression id="{tag}-set1-rule-rule-expr"
operation="defined" attribute="attr1"
/>
<expression id="{tag}-set1-rule-rule-expr-1"
attribute="attr2" operation="gte"
type="number" value="12"
/>
<expression id="{tag}-set1-rule-rule-expr-2"
attribute="attr3" operation="lt"
type="version" value="3.2.1"
/>
<expression id="{tag}-set1-rule-rule-expr-3"
attribute="attr4" operation="ne"
type="string" value="test"
/>
<expression id="{tag}-set1-rule-rule-expr-4"
attribute="attr5" operation="lt" value="3"
/>
</rule>
<rule id="{tag}-set1-rule-rule-1" boolean-op="or" score="0">
<date_expression id="{tag}-set1-rule-rule-1-expr"
operation="gt" start="2018-05-17T13:28:19"
/>
<date_expression id="{tag}-set1-rule-rule-1-expr-1"
operation="in_range"
start="2019-01-01" end="2019-03-15"
/>
<date_expression id="{tag}-set1-rule-rule-1-expr-2"
operation="in_range" start="2019-05-01"
>
<duration id="{tag}-set1-rule-rule-1-expr-2-duration"
months="2"
/>
</date_expression>
<date_expression id="{tag}-set1-rule-rule-1-expr-3"
operation="date_spec"
>
<date_spec id="{tag}-set1-rule-rule-1-expr-3-datespec"
months="7-8" weekdays="6-7" years="2019"
/>
</date_expression>
<date_expression id="{tag}-set1-rule-rule-1-expr-4"
operation="in_range" end="2019-12-15"
/>
</rule>
</rule>
<nvpair id="{tag}-set1-nam1" name="nam1" value="val1"/>
<nvpair id="{tag}-set1-nam2" name="nam2" value="val2"/>
</meta_attributes>
</{tag}_defaults>"""
xml_rsc = xml_template.format(tag="rsc")
xml_op = xml_template.format(tag="op")
xml_manip = XmlManipulation.from_file(empty_cib)
xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
self.assert_pcs_success(
self.cli_command,
stdout_full=dedent(
f"""\
Meta Attrs: {self.prefix}-set1
nam1=val1
nam2=val2
Rule: boolean-op=and score=INFINITY
Rule: boolean-op=or score=0
Expression: defined attr1
Expression: attr2 gte number 12
Expression: attr3 lt version 3.2.1
Expression: attr4 ne string test
Expression: attr5 lt 3
Rule: boolean-op=or score=0
Expression: date gt 2018-05-17T13:28:19
Expression: date in_range 2019-01-01 to 2019-03-15
Expression: date in_range 2019-05-01 to duration
Duration: months=2
Expression:
Date Spec: months=7-8 weekdays=6-7 years=2019
Expression: date in_range to 2019-12-15
"""
),
)
xml_expired_template = """<{tag}_defaults>
<meta_attributes id="{tag}-set1">
<rule id="{tag}-set1-rule" boolean-op="and" score="INFINITY">
<date_expression id="{tag}-set1-rule-expr"
operation="gt" start="3000-01-01"
/>
</rule>
<nvpair id="{tag}-set1-name" name="name1" value="value1"/>
</meta_attributes>
<meta_attributes id="{tag}-set2">
<rule id="{tag}-set2-rule" boolean-op="and" score="INFINITY">
<date_expression id="{tag}-set2-rule-expr"
operation="lt" end="1000-01-01"
/>
</rule>
<nvpair id="{tag}-set2-name" name="name2" value="value2"/>
</meta_attributes>
<meta_attributes id="{tag}-set3">
<rule id="{tag}-set3-rule" boolean-op="and" score="INFINITY">
<date_expression id="{tag}-set3-rule-expr"
operation="in_range" start="1000-01-01" end="3000-01-01"
/>
</rule>
<nvpair id="{tag}-set3-name" name="name3" value="value3"/>
</meta_attributes>
</{tag}_defaults>"""
@skip_unless_crm_rule()
def test_success_rule_expired(self):
xml_rsc = self.xml_expired_template.format(tag="rsc")
xml_op = self.xml_expired_template.format(tag="op")
xml_manip = XmlManipulation.from_file(empty_cib)
xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
self.assert_pcs_success(
self.cli_command,
stdout_full=dedent(
f"""\
Meta Attrs (not yet in effect): {self.prefix}-set1
name1=value1
Rule (not yet in effect): boolean-op=and score=INFINITY
Expression: date gt 3000-01-01
Meta Attrs: {self.prefix}-set3
name3=value3
Rule: boolean-op=and score=INFINITY
Expression: date in_range 1000-01-01 to 3000-01-01
"""
),
)
@skip_unless_crm_rule()
def test_success_rule_expired_all(self):
xml_rsc = self.xml_expired_template.format(tag="rsc")
xml_op = self.xml_expired_template.format(tag="op")
xml_manip = XmlManipulation.from_file(empty_cib)
xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
self.assert_pcs_success(
self.cli_command + ["--all"],
stdout_full=dedent(
f"""\
Meta Attrs (not yet in effect): {self.prefix}-set1
name1=value1
Rule (not yet in effect): boolean-op=and score=INFINITY
Expression: date gt 3000-01-01
Meta Attrs (expired): {self.prefix}-set2
name2=value2
Rule (expired): boolean-op=and score=INFINITY
Expression: date lt 1000-01-01
Meta Attrs: {self.prefix}-set3
name3=value3
Rule: boolean-op=and score=INFINITY
Expression: date in_range 1000-01-01 to 3000-01-01
"""
),
)
class RscDefaultsConfig(
DefaultsConfigMixin,
TestCase,
):
cli_command = ["resource", "defaults"]
prefix = "rsc"
@skip_unless_pacemaker_supports_rsc_and_op_rules()
def test_success_rules_rsc_op(self):
xml = """
<rsc_defaults>
<meta_attributes id="X">
<rule id="X-rule" boolean-op="and" score="INFINITY">
<rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
</rule>
<nvpair id="X-nam1" name="nam1" value="val1"/>
</meta_attributes>
</rsc_defaults>
"""
xml_manip = XmlManipulation.from_file(empty_cib_rules)
xml_manip.append_to_first_tag_name("configuration", xml)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
self.assert_pcs_success(
self.cli_command,
stdout_full=dedent(
"""\
Meta Attrs: X
nam1=val1
Rule: boolean-op=and score=INFINITY
Expression: resource ::Dummy
"""
),
)
class OpDefaultsConfig(
DefaultsConfigMixin,
TestCase,
):
cli_command = ["resource", "op", "defaults"]
prefix = "op"
@skip_unless_pacemaker_supports_rsc_and_op_rules()
def test_success_rules_rsc_op(self):
xml = """
<op_defaults>
<meta_attributes id="X">
<rule id="X-rule" boolean-op="and" score="INFINITY">
<rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
<op_expression id="X-rule-op-monitor" name="monitor"/>
</rule>
<nvpair id="X-nam1" name="nam1" value="val1"/>
</meta_attributes>
</op_defaults>
"""
xml_manip = XmlManipulation.from_file(empty_cib_rules)
xml_manip.append_to_first_tag_name("configuration", xml)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
self.assert_pcs_success(
self.cli_command,
stdout_full=dedent(
"""\
Meta Attrs: X
nam1=val1
Rule: boolean-op=and score=INFINITY
Expression: resource ::Dummy
Expression: op monitor
"""
),
)
class DefaultsSetCreateMixin(TestDefaultsMixin, AssertPcsMixin):
cli_command = []
cib_tag = ""
def setUp(self):
super().setUp()
write_file_to_tmpfile(empty_cib, self.temp_cib)
def test_no_args(self):
self.assert_effect(
self.cli_command + ["set", "create"],
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="{self.cib_tag}-meta_attributes"/>
</{self.cib_tag}>
"""
),
output=(
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
),
)
def test_success(self):
self.assert_effect(
self.cli_command
+ "set create id=mine score=10 meta nam1=val1 nam2=val2 --force".split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="mine" score="10">
<nvpair id="mine-nam1" name="nam1" value="val1"/>
<nvpair id="mine-nam2" name="nam2" value="val2"/>
</meta_attributes>
</{self.cib_tag}>
"""
),
output=(
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
),
)
def test_success_rule(self):
self.assert_effect(
self.cli_command
+ (
"-- set create id=mine score=10 meta nam1=val1 nam2=val2 "
"rule (date gt 2018-05-17T13:28:19 or "
"date in_range 2019-01-01 to 2019-03-15 or "
"date in_range 2019-05-01 to duration months=2 or "
"date-spec years=2019 months=7-8 weekdays=6-7 or "
"date in_range to 2019-12-15)"
).split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="mine" score="10">
<rule id="mine-rule" boolean-op="or" score="INFINITY">
<date_expression id="mine-rule-expr"
operation="gt" start="2018-05-17T13:28:19"
/>
<date_expression id="mine-rule-expr-1"
operation="in_range"
start="2019-01-01" end="2019-03-15"
/>
<date_expression id="mine-rule-expr-2"
operation="in_range" start="2019-05-01"
>
<duration id="mine-rule-expr-2-duration"
months="2"
/>
</date_expression>
<date_expression id="mine-rule-expr-3"
operation="date_spec"
>
<date_spec
id="mine-rule-expr-3-datespec"
months="7-8" weekdays="6-7" years="2019"
/>
</date_expression>
<date_expression id="mine-rule-expr-4"
operation="in_range" end="2019-12-15"
/>
</rule>
<nvpair id="mine-nam1" name="nam1" value="val1"/>
<nvpair id="mine-nam2" name="nam2" value="val2"/>
</meta_attributes>
</{self.cib_tag}>
"""
),
output=(
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
),
)
def test_rule_error_messages(self):
self.assert_pcs_fail(
self.cli_command
+ (
"set create id=mine score=10 meta nam1=val1 nam2=val2 "
"rule (date gt 2018-05-1X or "
"date in_range 2019-03-05 to 2019-01-11 or "
"date in_range 2019-05-0X to duration months=2 months=3a x=y or "
"date-spec years=2019 months=7-X weekdays=7-6 years=202a x=y)"
).split(),
(
"Error: '2018-05-1X' is not a valid date value, use ISO 8601 date\n"
"Error: Since '2019-03-05' is not sooner than until '2019-01-11'\n"
"Error: '2019-05-0X' is not a valid date value, use ISO 8601 date\n"
"Error: '3a' is not a valid months value, use a positive integer\n"
"Error: invalid duration option 'x', allowed options are: "
"'hours', 'monthdays', 'months', 'moon', 'weekdays', "
"'weeks', 'weekyears', 'years', 'yearsdays'\n"
"Error: Duplicate options in a single (sub)expression: 'months'\n"
"Error: '7-X' is not a valid months value, use 1..12 or 1..11-2..12\n"
"Error: '7-6' is not a valid weekdays value, use 1..7 or 1..6-2..7\n"
"Error: '202a' is not a valid years value, use an integer or "
"integer-integer\n"
"Error: invalid datespec option 'x', allowed options are: "
"'hours', 'monthdays', 'months', 'moon', 'weekdays', "
"'weeks', 'weekyears', 'years', 'yearsdays'\n"
"Error: Duplicate options in a single (sub)expression: 'years'\n"
"Error: Errors have occurred, therefore pcs is unable to continue\n"
),
)
class RscDefaultsSetCreate(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//rsc_defaults")[0]
)
),
DefaultsSetCreateMixin,
TestCase,
):
cli_command = ["resource", "defaults"]
cib_tag = "rsc_defaults"
@skip_unless_pacemaker_supports_rsc_and_op_rules()
def test_success_rules_rsc_op(self):
self.assert_effect(
self.cli_command
+ "set create id=X meta nam1=val1 rule resource ::Dummy".split(),
f"""\
<{self.cib_tag}>
<meta_attributes id="X">
<rule id="X-rule" boolean-op="and" score="INFINITY">
<rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
</rule>
<nvpair id="X-nam1" name="nam1" value="val1"/>
</meta_attributes>
</{self.cib_tag}>
""",
output=(
"CIB has been upgraded to the latest schema version.\n"
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
),
)
def test_node_attr_expressions(self):
self.assert_pcs_fail(
self.cli_command + ("set create rule defined attr").split(),
(
"Error: Keywords 'defined', 'not_defined', 'eq', 'ne', 'gte', "
"'gt', 'lte' and 'lt' cannot be used in a rule in this command\n"
"Error: Errors have occurred, therefore pcs is unable to continue\n"
),
)
class OpDefaultsSetCreate(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//op_defaults")[0]
)
),
DefaultsSetCreateMixin,
TestCase,
):
cli_command = ["resource", "op", "defaults"]
cib_tag = "op_defaults"
def test_rule_error_messages(self):
self.assert_pcs_fail(
self.cli_command
+ (
"set create rule defined attr1 or attr2 gte number 12a or "
"attr3 lt version 3.2.1a or attr4 ne string test or attr5 lt 3 "
).split(),
(
"Error: '12a' is not a valid number attribute value, use a "
"floating-point number\n"
"Error: '3.2.1a' is not a valid version attribute value, use "
"a version number (e.g. 1, 1.2, 1.23.45, ...)\n"
"Error: Errors have occurred, therefore pcs is unable to continue\n"
),
)
@skip_unless_pacemaker_supports_rsc_and_op_rules()
def test_success_rules_rsc_op(self):
self.assert_effect(
self.cli_command
+ (
"-- set create id=X meta nam1=val1 "
"rule resource ::Dummy and (op start or op stop) and "
"(defined attr1 or attr2 gte number -1.2 or "
"attr3 lt version 3.2.1 or attr4 ne string test or attr5 lt 3) "
).split(),
f"""\
<{self.cib_tag}>
<meta_attributes id="X">
<rule id="X-rule" boolean-op="and" score="INFINITY">
<rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
<rule id="X-rule-rule" boolean-op="or" score="0">
<op_expression id="X-rule-rule-op-start"
name="start"
/>
<op_expression id="X-rule-rule-op-stop"
name="stop"
/>
</rule>
<rule id="X-rule-rule-1" boolean-op="or" score="0">
<expression id="X-rule-rule-1-expr"
operation="defined" attribute="attr1"
/>
<expression id="X-rule-rule-1-expr-1"
attribute="attr2" operation="gte"
type="number" value="-1.2"
/>
<expression id="X-rule-rule-1-expr-2"
attribute="attr3" operation="lt"
type="version" value="3.2.1"
/>
<expression id="X-rule-rule-1-expr-3"
attribute="attr4" operation="ne"
type="string" value="test"
/>
<expression id="X-rule-rule-1-expr-4"
attribute="attr5" operation="lt" value="3"
/>
</rule>
</rule>
<nvpair id="X-nam1" name="nam1" value="val1"/>
</meta_attributes>
</{self.cib_tag}>
""",
output=(
"CIB has been upgraded to the latest schema version.\n"
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
),
)
class DefaultsSetDeleteMixin(TestDefaultsMixin, AssertPcsMixin):
cli_command = []
prefix = ""
cib_tag = ""
def setUp(self):
super().setUp()
xml_rsc = """
<rsc_defaults>
<meta_attributes id="rsc-set1" />
<meta_attributes id="rsc-set2" />
<meta_attributes id="rsc-set3" />
<meta_attributes id="rsc-set4" />
</rsc_defaults>
"""
xml_op = """
<op_defaults>
<meta_attributes id="op-set1" />
<meta_attributes id="op-set2" />
<meta_attributes id="op-set3" />
<meta_attributes id="op-set4" />
</op_defaults>
"""
xml_manip = XmlManipulation.from_file(empty_cib)
xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
def test_success(self):
self.assert_effect(
[
self.cli_command
+ f"set delete {self.prefix}-set1 {self.prefix}-set3".split(),
self.cli_command
+ f"set remove {self.prefix}-set1 {self.prefix}-set3".split(),
],
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="{self.prefix}-set2" />
<meta_attributes id="{self.prefix}-set4" />
</{self.cib_tag}>
"""
),
)
class RscDefaultsSetDelete(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//rsc_defaults")[0]
)
),
DefaultsSetDeleteMixin,
TestCase,
):
cli_command = ["resource", "defaults"]
prefix = "rsc"
cib_tag = "rsc_defaults"
class OpDefaultsSetDelete(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//op_defaults")[0]
)
),
DefaultsSetDeleteMixin,
TestCase,
):
cli_command = ["resource", "op", "defaults"]
prefix = "op"
cib_tag = "op_defaults"
class DefaultsSetUpdateMixin(TestDefaultsMixin, AssertPcsMixin):
cli_command = []
prefix = ""
cib_tag = ""
def test_success(self):
xml = f"""
<{self.cib_tag}>
<meta_attributes id="my-set">
<nvpair id="my-set-name1" name="name1" value="value1" />
<nvpair id="my-set-name2" name="name2" value="value2" />
<nvpair id="my-set-name3" name="name3" value="value3" />
</meta_attributes>
</{self.cib_tag}>
"""
xml_manip = XmlManipulation.from_file(empty_cib)
xml_manip.append_to_first_tag_name("configuration", xml)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
warnings = (
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
)
self.assert_effect(
self.cli_command
+ "set update my-set meta name2=value2A name3=".split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="my-set">
<nvpair id="my-set-name1" name="name1" value="value1" />
<nvpair id="my-set-name2" name="name2" value="value2A" />
</meta_attributes>
</{self.cib_tag}>
"""
),
output=warnings,
)
self.assert_effect(
self.cli_command + "set update my-set meta name1= name2=".split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="my-set" />
</{self.cib_tag}>
"""
),
output=warnings,
)
class RscDefaultsSetUpdate(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//rsc_defaults")[0]
)
),
DefaultsSetUpdateMixin,
TestCase,
):
cli_command = ["resource", "defaults"]
prefix = "rsc"
cib_tag = "rsc_defaults"
class OpDefaultsSetUpdate(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//op_defaults")[0]
)
),
DefaultsSetUpdateMixin,
TestCase,
):
cli_command = ["resource", "op", "defaults"]
prefix = "op"
cib_tag = "op_defaults"
class DefaultsSetUsageMixin(TestDefaultsMixin, AssertPcsMixin):
cli_command = []
def test_no_args(self):
self.assert_pcs_fail(
self.cli_command + ["set"],
stdout_start=f"\nUsage: pcs {' '.join(self.cli_command)} set...\n",
)
def test_bad_command(self):
self.assert_pcs_fail(
self.cli_command + ["set", "bad-command"],
stdout_start=f"\nUsage: pcs {' '.join(self.cli_command)} set ...\n",
)
class RscDefaultsSetUsage(
DefaultsSetUsageMixin,
TestCase,
):
cli_command = ["resource", "defaults"]
class OpDefaultsSetUsage(
DefaultsSetUsageMixin,
TestCase,
):
cli_command = ["resource", "op", "defaults"]
class DefaultsUpdateMixin(TestDefaultsMixin, AssertPcsMixin):
cli_command = []
prefix = ""
cib_tag = ""
def assert_success_legacy(self, update_keyword):
write_file_to_tmpfile(empty_cib, self.temp_cib)
warning_lines = []
if not update_keyword:
warning_lines.append(
f"Deprecation Warning: This command is deprecated and will be "
f"removed. Please use 'pcs {' '.join(self.cli_command)} "
f"update' instead.\n"
)
warning_lines.append(
"Warning: Defaults do not apply to resources which override "
"them with their own defined values\n"
)
warnings = "".join(warning_lines)
command = self.cli_command[:]
if update_keyword:
command.append("update")
self.assert_effect(
command + "name1=value1 name2=value2 name3=value3".split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="{self.cib_tag}-meta_attributes">
<nvpair id="{self.cib_tag}-meta_attributes-name1"
name="name1" value="value1"
/>
<nvpair id="{self.cib_tag}-meta_attributes-name2"
name="name2" value="value2"
/>
<nvpair id="{self.cib_tag}-meta_attributes-name3"
name="name3" value="value3"
/>
</meta_attributes>
</{self.cib_tag}>
"""
),
output=warnings,
)
self.assert_effect(
command + "name2=value2A name3=".split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="{self.cib_tag}-meta_attributes">
<nvpair id="{self.cib_tag}-meta_attributes-name1"
name="name1" value="value1"
/>
<nvpair id="{self.cib_tag}-meta_attributes-name2"
name="name2" value="value2A"
/>
</meta_attributes>
</{self.cib_tag}>
"""
),
output=warnings,
)
self.assert_effect(
command + "name1= name2=".split(),
dedent(
f"""\
<{self.cib_tag}>
<meta_attributes id="{self.cib_tag}-meta_attributes" />
</{self.cib_tag}>
"""
),
output=warnings,
)
def test_deprecated(self):
self.assert_success_legacy(False)
def test_legacy(self):
self.assert_success_legacy(True)
class RscDefaultsUpdate(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//rsc_defaults")[0]
)
),
DefaultsUpdateMixin,
TestCase,
):
cli_command = ["resource", "defaults"]
prefix = "rsc"
cib_tag = "rsc_defaults"
class OpDefaultsUpdate(
get_assert_pcs_effect_mixin(
lambda cib: etree.tostring(
# pylint:disable=undefined-variable
etree.parse(cib).findall(".//op_defaults")[0]
)
),
DefaultsUpdateMixin,
TestCase,
):
cli_command = ["resource", "op", "defaults"]
prefix = "op"
cib_tag = "op_defaults"
|
tomjelinek/pcs
|
pcs_test/tier1/test_cib_options.py
|
Python
|
gpl-2.0
| 32,976 | 0.000667 |
#This doesn't actually do anything, its just a gui that looks like the windows one(kinda)
from Tkinter import *
mGui=Tk()
mGui.geometry('213x240')
mGui.title('Calculator')
mGui["bg"]="#D9E3F6"
##set images
image1 = PhotoImage(file="images/mc.gif")
image2 = PhotoImage(file="images/mr.gif")
image3 = PhotoImage(file="images/ms.gif")
image4 = PhotoImage(file="images/m+.gif")
image5 = PhotoImage(file="images/m-.gif")
##row1
image6 = PhotoImage(file="images/arrow.gif")
image7 = PhotoImage(file="images/ce.gif")
image8 = PhotoImage(file="images/c.gif")
image9 = PhotoImage(file="images/+-.gif")
image10 = PhotoImage(file="images/check.gif")
##row2
image11 = PhotoImage(file="images/7.gif")
image12 = PhotoImage(file="images/8.gif")
image13 = PhotoImage(file="images/9.gif")
image14 = PhotoImage(file="images/div.gif")
image15 = PhotoImage(file="images/percent.gif")
##row3
image16 = PhotoImage(file="images/4.gif")
image17 = PhotoImage(file="images/5.gif")
image18 = PhotoImage(file="images/9.gif")
image19 = PhotoImage(file="images/mult.gif")
image20 = PhotoImage(file="images/ricp.gif")
##row4
image21 = PhotoImage(file="images/1.gif")
image22 = PhotoImage(file="images/2.gif")
image23 = PhotoImage(file="images/3.gif")
image24 = PhotoImage(file="images/-.gif")
##row5
image26 = PhotoImage(file="images/0.gif")
image27 = PhotoImage(file="images/decpt.gif")
image28 = PhotoImage(file="images/plus.gif")
image29 = PhotoImage(file="images/=.gif")
image30 = PhotoImage(file="images/mc.gif")
##gui
##row
c = Label(mGui,text="maybe",width=20,height=3,bg="#FFFFFF")
c.grid(row=1,column=1,columnspan=5)
mbutton1 = Button(image=image1,bd=0).grid(row=5,column=1,padx=2,pady=2)
mbutton2 = Button(image=image2,bd=0).grid(row=5,column=2,padx=2,pady=2)
mbutton3 = Button(image=image3,bd=0).grid(row=5,column=3,padx=2,pady=2)
mbutton3 = Button(image=image4,bd=0).grid(row=5,column=4,padx=2,pady=2)
mbutton3 = Button(image=image5,bd=0).grid(row=5,column=5,padx=2,pady=2)
##row 2
mbutton4 = Button(image=image6,bd=0).grid(row=6,column=1,padx=2,pady=2)
mbutton5 = Button(image=image7,bd=0).grid(row=6,column=2,padx=2,pady=2)
mbutton6 = Button(image=image8,bd=0).grid(row=6,column=3,padx=2,pady=2)
mbutton7 = Button(image=image9,bd=0).grid(row=6,column=4,padx=2,pady=2)
mbutton8 = Button(image=image10,bd=0).grid(row=6,column=5,padx=2,pady=2)
##row 3
mbutton9 = Button(image=image11,bd=0).grid(row=7,column=1,padx=2,pady=2)
mbutton10 = Button(image=image12,bd=0).grid(row=7,column=2,padx=2,pady=2)
mbutton11 = Button(image=image13,bd=0).grid(row=7,column=3,padx=2,pady=2)
mbutton12 = Button(image=image14,bd=0).grid(row=7,column=4,padx=2,pady=2)
mbutton13 = Button(image=image15,bd=0).grid(row=7,column=5,padx=2,pady=2)
###row4
mbutton14 = Button(image=image16,bd=0).grid(row=8,column=1,padx=2,pady=2)
mbutton15 = Button(image=image17,bd=0).grid(row=8,column=2,padx=2,pady=2)
mbutton16 = Button(image=image18,bd=0).grid(row=8,column=3,padx=2,pady=2)
mbutton17 = Button(image=image19,bd=0).grid(row=8,column=4,padx=2,pady=2)
mbutton18 = Button(image=image20,bd=0).grid(row=8,column=5,padx=2,pady=2)
####row 5
mbutton19 = Button(image=image21,bd=0).grid(row=9,column=1,padx=2,pady=2)
mbutton20 = Button(image=image22,bd=0).grid(row=9,column=2,padx=2,pady=2)
mbutton21 = Button(image=image23,bd=0).grid(row=9,column=3,padx=2,pady=2)
mbutton23 = Button(image=image24,bd=0).grid(row=9,column=4,padx=2,pady=2)
mbutton28 = Button(image=image29,bd=0).grid(row=9,column=5,rowspan=2,padx=2,pady=2)
####row 6
mbutton25 = Button(image=image26,bd=0).grid(row=10,column=1,columnspan=2,padx=2,pady=2)
mbutton26 = Button(image=image27,bd=0).grid(row=10,column=3,padx=2,pady=2)
mbutton27 = Button(image=image28,bd=0).grid(row=10,column=4,padx=2,pady=2)
##menu
menubar=Menu(mGui)
filemenu= Menu(menubar)
editmenu= Menu(menubar)
helpmenu= Menu(menubar)
##
filemenu = Menu(menubar)
filemenu.add_command(label="Standard")
filemenu.add_command(label="Basic")
filemenu.add_command(label="History")
menubar.add_cascade(label="View",menu=filemenu)
##
editmenu.add_command(label="Copy")
editmenu.add_command(label="Paste")
menubar.add_cascade(label="Edit",menu=editmenu)
##
helpmenu.add_command(label="View Help")
helpmenu.add_command(label="About Calculator")
menubar.add_cascade(label="Help",menu=helpmenu)
mGui.config(menu=menubar)
mGui.mainloop()
|
covxx/Random-Python-Stuff
|
WindowsCalc.py
|
Python
|
mit
| 4,314 | 0.034771 |
# # define Line class
import math
class Line(object):
def __init__(self, p1,p2):
self.p1 = p1
self.p2 = p2
def getP1(self):
return self.p1
def getP2(self):
return self.p2
def getDistance(self):
euclidean_dist = math.sqrt((self.p1.getXCoord() - self.p2.getXCoord())**2 + \
(self.p1.getYCoord() - self.p2.getYCoord())**2)
return euclidean_dist
def __str__(self):
return "("+str(self.getP1()) + ', ' + \
str(self.getP2()) + ', ' + \
'distance: '+ self.getDistance()+")"
|
jskye/car-classifier-research
|
src/hyp.verification.tools/py/Line.py
|
Python
|
mit
| 564 | 0.019504 |
DEBUG = 0
# cardinal diretions
directions = ("left","up","right","down")
# logic
maxExamined = 75000 # maximum number of tries when solving
maxMoves = 19 # maximum number of moves
cullFrequency = 75000 # number of tries per cull update
cullCutoff = 1.2 # fraction of average to cull
# grid size
gridRows = 5
gridColumns = 6
# text strings
textCalculateCurrentCombos = "Calculate Damage"
textClose = "Close"
textDamageDisplayAmount = "Total: "
textChoosePaint = "Choose a color to paint:"
textSolve = "Solve"
textTitle = "Puzzle and Dragons Helper"
# orbs
orbDefault = "light"
orbDefaultConfig = ("heal","light","wood","wood","fire","light","dark","heal","wood","water","heal","dark","fire","light","light","fire","fire","wood","heal","wood","dark","wood","water","light","light","dark","heal","heal","fire","dark")
orbDefaultStrength = 100
orbList = ("heal","fire","water","wood","light","dark")
# orb image URLs
orbImageURL = dict(light="img/light.png",
dark="img/dark.png",
fire="img/fire.png",
water="img/water.png",
wood="img/wood.png",
heal="img/heal.png",
bg="img/bgOrb.png"
);
# TKinter styles
tkButtonInactive = "flat"
tkButtonActive = "groove"
tkButtonBorder = 3
tkOrbStrengthEntryWidth = 7
|
discomethod/pad-helper
|
constants.py
|
Python
|
gpl-2.0
| 1,240 | 0.046774 |
# -*- coding: utf-8 -*-
#
# EAV-Django is a reusable Django application which implements EAV data model
# Copyright © 2009—2010 Andrey Mikhaylenko
#
# This file is part of EAV-Django.
#
# EAV-Django is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EAV-Django is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with EAV-Django. If not, see <http://gnu.org/licenses/>.
# python
from copy import deepcopy
# django
from django.forms import (BooleanField, CharField, CheckboxSelectMultiple,
DateField, FloatField, ModelForm, ModelMultipleChoiceField, #MultipleChoiceField,
ValidationError)
from django.contrib.admin.widgets import AdminDateWidget, FilteredSelectMultiple #, RelatedFieldWidgetWrapper
from django.utils.translation import ugettext_lazy as _
__all__ = ['BaseSchemaForm', 'BaseDynamicEntityForm']
class BaseSchemaForm(ModelForm):
def clean_name(self):
"Avoid name clashes between static and dynamic attributes."
name = self.cleaned_data['name']
reserved_names = self._meta.model._meta.get_all_field_names()
if name not in reserved_names:
return name
raise ValidationError(_('Attribute name must not clash with reserved names'
' ("%s")') % '", "'.join(reserved_names))
class BaseDynamicEntityForm(ModelForm):
"""
ModelForm for entity with support for EAV attributes. Form fields are created
on the fly depending on Schema defined for given entity instance. If no schema
is defined (i.e. the entity instance has not been saved yet), only static
fields are used. However, on form validation the schema will be retrieved
and EAV fields dynamically added to the form, so when the validation is
actually done, all EAV fields are present in it (unless Rubric is not defined).
"""
FIELD_CLASSES = {
'text': CharField,
'float': FloatField,
'date': DateField,
'bool': BooleanField,
'many': ModelMultipleChoiceField, #RelatedFieldWidgetWrapper(MultipleChoiceField),
}
FIELD_EXTRA = {
'date': {'widget': AdminDateWidget},
'many': lambda schema: {
'widget': CheckboxSelectMultiple
if len(schema.get_choices()) <= 5 else
FilteredSelectMultiple(schema.title, is_stacked=False)
},
}
def __init__(self, data=None, *args, **kwargs):
super(BaseDynamicEntityForm, self).__init__(data, *args, **kwargs)
self._build_dynamic_fields()
def check_eav_allowed(self):
"""
Returns True if dynamic attributes can be added to this form.
If False is returned, only normal fields will be displayed.
"""
return bool(self.instance)# and self.instance.check_eav_allowed()) # XXX would break form where stuff is _being_ defined
def _build_dynamic_fields(self):
# reset form fields
self.fields = deepcopy(self.base_fields)
# do not display dynamic fields if some fields are yet defined
if not self.check_eav_allowed():
return
for schema in self.instance.get_schemata():
defaults = {
'label': schema.title.capitalize(),
'required': schema.required,
'help_text': schema.help_text,
}
datatype = schema.datatype
if datatype == schema.TYPE_MANY:
choices = getattr(self.instance, schema.name)
defaults.update({'queryset': schema.get_choices(),
'initial': [x.pk for x in choices]})
extra = self.FIELD_EXTRA.get(datatype, {})
if hasattr(extra, '__call__'):
extra = extra(schema)
defaults.update(extra)
MappedField = self.FIELD_CLASSES[datatype]
self.fields[schema.name] = MappedField(**defaults)
# fill initial data (if attribute was already defined)
value = getattr(self.instance, schema.name)
if value and not datatype == schema.TYPE_MANY: # m2m is already done above
self.initial[schema.name] = value
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance ``self.instance``
and related EAV attributes.
Returns ``instance``.
"""
if self.errors:
raise ValueError("The %s could not be saved because the data didn't"
" validate." % self.instance._meta.object_name)
# create entity instance, don't save yet
instance = super(BaseDynamicEntityForm, self).save(commit=False)
# assign attributes
for name in instance.get_schema_names():
value = self.cleaned_data.get(name)
setattr(instance, name, value)
# save entity and its attributes
if commit:
instance.save()
return instance
save.alters_data = True
def save_m2m(self, *a, **kw):
# stub for admin TODO: check if we don't need to super() if entity indeed has m2m
pass
|
omusico/eav-django
|
eav/forms.py
|
Python
|
lgpl-3.0
| 5,690 | 0.003517 |
#!/bin/python3.1
# ##### BEGIN GPL LICENSE BLOCK ##### #
# lolblender - Python addon to use League of Legends files into blender
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from io_scene_lol import lolMesh, lolSkeleton
def prettyPrintSkl(filename, start=0, stop=-1, returnStr=True, **options):
header, boneList = lolSkeleton.importSKL(filename)
headerStr = ""
if(options['PRINT_HEADER']):
headerStr += \
"Filetype:%s\nnumObjects:%d\nskeletonHash:%d\nnumElements:%d\n\n" % (header.fileType,
header.numObjects, header.skeletonHash, header.numElements)
boneStr = ""
if(options['PRINT_BONES']):
if stop == -1:
stop = len(boneList)
for id in range(start,stop):
bone = boneList[id]
if bone.parent != -1:
parentName = boneList[bone.parent].name
else:
parentName = "None"
boneStr += "%d\t%s\tparent id:%d\t(%s)\n" %(id, bone.name,
bone.parent, parentName)
boneStr += "\tscale: %f\n" %(bone.scale,)
boneStr += "\tmatrix:\t %7.4f %7.4f %7.4f %7.4f\n" %(bone.matrix[0][0],
bone.matrix[0][1], bone.matrix[0][2], bone.matrix[0][3])
boneStr += "\t\t %7.4f %7.4f %7.4f %7.4f\n" %(bone.matrix[1][0],
bone.matrix[1][1],bone.matrix[1][2],bone.matrix[1][3])
boneStr += "\t\t %7.4f %7.4f %7.4f %7.4f\n\n" %(bone.matrix[2][0],
bone.matrix[2][1], bone.matrix[2][2], bone.matrix[2][3])
if returnStr == True:
return headerStr+boneStr
else:
print(headerStr+boneStr)
def prettyPrintSkn(filename, start=0, end=-1, returnStr = True, **options):
header, materials, indices, vertices = lolMesh.importSKN(filename)
headerStr = ""
if(options['PRINT_HEADER']):
headerStr += "magic:%d\nmatHeader:%d\nnumObjects:%d\nnumMaterials:%d\n\n" % (header.magic,
header.matHeader, header.numObjects, len(materials))
materialStr = ""
if(options['PRINT_MATERIALS']):
if header.matHeader == 0:
materialStr +="No material blocks present\n\n"
else:
for material in materials:
materialStr += \
"name:%s\nstartVertex:%d\tnumVertices:%d\nstartIndex:%d\tnumIndices:%d\n\n" %\
(bytes.decode(material.name).strip('\x00'),material.startVertex, \
material.numVertices, material.startIndex, material.numIndices)
indexStr = ""
if(options['PRINT_INDICES']):
for indx in indices:
indexStr += "%d\n" %(indx[0],)
vertexStr = ""
if(options['PRINT_VERTICES']):
for indx, vtx in enumerate(vertices[start:stop]):
vertexStr += \
"%d\tpos:(%f,%f,%f)\tboneIndx:(%d,%d,%d,%d)\n"%(start+indx,
vtx.position[0], vtx.position[1],vtx.position[2],
vtx.boneIndex[0],vtx.boneIndex[1],vtx.boneIndex[2],vtx.boneIndex[3])
vertexStr += \
"\tnorm:(%f,%f,%f)\tweights:(%f,%f,%f,%f)\n"%\
(vtx.normal[0],vtx.normal[1],vtx.normal[2],\
vtx.weights[0],vtx.weights[1],vtx.weights[2],vtx.weights[3])
vertexStr += "\tuvs:(%f, %f)\n"%(vtx.texcoords[0],vtx.texcoords[1])
if returnStr == True:
return headerStr+materialStr+indexStr+vertexStr
else:
print(headerStr+materialStr+indexStr+vertexStr)
def cvsPrintSkl(filename, start=0, end=-1, returnStr=True, **options):
header, boneList = lolSkeleton.importSKL(filename)
headerStr = ""
if(options['PRINT_HEADER']):
headerStr += "#fileType, numObjects, skeletonHash, numElements\n"
headerStr += \
"%s,%d,%d,%d\n" % (header.fileType,
header.numObjects, header.skeletonHash, header.numElements)
boneStr = ""
if(options['PRINT_BONES']):
boneStr+="#boneID, name, parentID, scale,"
boneStr+="matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],"
boneStr+="matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],"
boneStr+="matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3]\n"
if end == -1:
end = len(boneList)
for id in range(start,stop):
bone = boneList[id]
if bone.parent != -1:
parentName = boneList[bone.parent].name
else:
parentName = "None"
boneStr += "%d,%s,%d," %(id, bone.name,bone.parent)
boneStr += "%f," %(bone.scale,)
boneStr += "%e,%e,%e,%e," %(bone.matrix[0][0],
bone.matrix[0][1], bone.matrix[0][2], bone.matrix[0][3])
boneStr += "%e,%e,%e,%e," %(bone.matrix[1][0],
bone.matrix[1][1],bone.matrix[1][2],bone.matrix[1][3])
boneStr += "%e,%e,%e,%e\n" %(bone.matrix[2][0],
bone.matrix[2][1], bone.matrix[2][2], bone.matrix[2][3])
if returnStr == True:
return headerStr+boneStr
else:
print(headerStr+boneStr)
def cvsPrintSkn(filename, start=0, end=-1, returnStr = True, **options):
header, materials, indices, vertices = lolMesh.importSKN(filename)
headerStr = ""
if(options['PRINT_HEADER']):
headerStr+="#magic, matHeader, numObjects\n"
headerStr += "%d,%d,%d\n" % (header['magic'],
header['matHeader'], header['numObjects'])
materialStr = ""
if(options['PRINT_MATERIALS']):
materialStr += "#numMaterials, name, startVertex, numVertices,"
materialStr += "startIndex, numIndices\n"
for material in materials:
materialStr += \
"%d,%s,%d,%d,%d,%d\n" %\
(material['numMaterials'], bytes.decode(material['name']).strip('\x00'),material['startVertex'], \
material['numVertices'], material['startIndex'], material['numIndices'])
indexStr = ""
if(options['PRINT_INDICES']):
indexStr+="#Index list"
for indx in indices:
indexStr += "%d," %(indx[0],)
indexStr+="\n"
vertexStr = ""
if(options['PRINT_VERTICES']):
vertexStr+="#pos_x, pos_y, pos_z,"
vertexStr+="boneIndex_0, boneIndex_1, boneIndex_2, boneIndex_3,"
vertexStr+="norm_x, norm_y, norm_z,"
vertexStr+="boneWeight_0, boneWeight_1, boneWeight_2, boneWeight_3,"
vertexStr+="uv_u, uv_v\n"
for indx, vtx in enumerate(vertices[start:stop]):
vertexStr += \
"%d,%f,%f,%f,%d,%d,%d,%d,"%(start+indx,
vtx['position'][0], vtx['position'][1],vtx['position'][2],
vtx['boneIndex'][0],vtx['boneIndex'][1],vtx['boneIndex'][2],vtx['boneIndex'][3])
vertexStr += \
"%f,%f,%f,%f,%f,%f,%f,"%\
(vtx['normal'][0],vtx['normal'][1],vtx['normal'][2],\
vtx['weights'][0],vtx['weights'][1],vtx['weights'][2],vtx['weights'][3])
vertexStr += "%f,%f\n"%(vtx['texcoords'][0],vtx['texcoords'][1])
if returnStr == True:
return headerStr+materialStr+indexStr+vertexStr
else:
print(headerStr+materialStr+indexStr+vertexStr)
if __name__ == '__main__':
from optparse import OptionParser
from os import path
parser = OptionParser()
parser.add_option("","--csv", dest="csv", help="Output as CSV fields",
default=False, action="store_true")
parser.add_option("-r","--range", dest="range", help="data subset",
default="", action="store", type="string")
parser.add_option("-v","--by-vertex-range",dest="vertex_range", help="subset by vertex order",
default="False", action="store_true")
parser.add_option("","--header", dest="PRINT_HEADER", help="print header info",
default=False, action="store_true")
parser.add_option("","--indices", dest="PRINT_INDICES", help="print indices",
default=False, action="store_true")
parser.add_option("","--vertices", dest="PRINT_VERTICES", help="print vertices",
default=False, action="store_true")
parser.add_option("","--materials", dest="PRINT_MATERIALS", help="print materials",
default=False, action="store_true")
parser.add_option("","--bones", dest="PRINT_BONES", help="print bones",
default=False, action="store_true")
(options, args) = parser.parse_args()
#filename = '/var/tmp/downloads/lol/Wolfman/Wolfman.skl'
fileExt = path.splitext(args[0])[-1]
if fileExt.lower() == '.skl':
if options.csv:
printFunc = cvsPrintSkl
else:
printFunc = prettyPrintSkl
elif fileExt.lower() == '.skn':
if options.csv:
printFunc = cvsPrintSkn
else:
printFunc = prettyPrintSkn
else:
print('%s file format not recognized. Enter a .skl or .skn file' %(fileExt,))
if any([options.PRINT_HEADER, options.PRINT_INDICES,
options.PRINT_VERTICES, options.PRINT_MATERIALS,
options.PRINT_BONES]):
pass
else:
options.PRINT_HEADER = True
options.PRINT_INDICES = True
options.PRINT_VERTICES = True
options.PRINT_MATERIALS = True
options.PRINT_BONES = True
indexRange = options.range.split(':')
if len(indexRange) == 1 and indexRange[0] != '':
start = int(indexRange[0])
stop = start + 1
elif len(indexRange) ==2:
if indexRange[0] != '':
start = int(indexRange[0])
if indexRange[1] != '':
stop = int(indexRange[1])
else:
start = 0
stop = -1
printFunc(args[0], start=start, stop=stop, returnStr=False, **vars(options))
#dumpMesh(args[0], printFunc, False, **vars(options))
|
lispascal/lolblender
|
dumpContents.py
|
Python
|
gpl-3.0
| 10,499 | 0.015049 |
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package that handles non-debug, non-file output for run-webkit-tests."""
import math
import optparse
from webkitpy.tool import grammar
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'),
optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'),
optparse.make_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
optparse.make_option('--timing', action='store_true', default=False,
help='display test times (summary plus per-test w/ --verbose)'),
optparse.make_option('-v', '--verbose', action='store_true', default=False,
help='print a summarized result for every test (one line per test)'),
]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("View the test results at file://%s/results.html" % results_directory)
if self._options.enable_versioned_results:
self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_time, run_results)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
stats = {}
cuml_time = 0
for result in run_results.results_by_name.values():
stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
stats[result.worker_name]['num_tests'] += 1
stats[result.worker_name]['total_time'] += result.total_run_time
cuml_time += result.total_run_time
for worker_name in stats:
self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
def _print_aggregate_test_statistics(self, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in the driver.
individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug("")
if unexpected_slow_tests:
self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug("")
if slow_tests:
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
self._print_debug("")
if timeout_or_crash_tests:
self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
self._print_debug("")
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
min_seconds_to_print = 15
timings = []
for directory in stats:
rounded_time = round(stats[directory]['total_time'], 1)
if rounded_time > min_seconds_to_print:
timings.append((directory, rounded_time, stats[directory]['num_tests']))
if not timings:
return
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
for timing in timings:
self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(" Median: %6.3f" % median)
self._print_debug(" Mean: %6.3f" % mean)
self._print_debug(" 90th percentile: %6.3f" % percentile90)
self._print_debug(" 99th percentile: %6.3f" % percentile99)
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
def _print_one_line_summary(self, total_time, run_results):
if self._options.timing:
parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
# There is serial overhead in layout_test_runner.run() that we can't easily account for when
# really running in parallel, but taking the min() ensures that in the worst case
# (if parallel time is less than run_time) we do account for it.
serial_time = total_time - min(run_results.run_time, parallel_time)
speedup = (parallel_time + serial_time) / total_time
timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
else:
timing_summary = ''
total = run_results.total - run_results.expected_skips
expected = run_results.expected - run_results.expected_skips
unexpected = run_results.unexpected
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
if self._options.verbose or self._options.debug_rwt_logging or unexpected:
self.writeln("")
expected_summary_str = ''
if run_results.expected_failures > 0:
expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)
summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
else:
summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
else:
summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
else:
summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)
self._print_quiet(summary)
self._print_quiet("")
def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
if len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_completed, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
else:
suffix = ''
if self._options.verbose:
write = self._meter.write_update
else:
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
self.num_completed += 1
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected,
self._options.timing, result.test_run_time)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
elif self._options.verbose or not expected:
self.writeln(self._test_status_line(test_name, result_message))
elif self.num_completed == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
self._completed_tests.insert(0, [test_name, result_message])
else:
self._completed_tests.append([test_name, result_message])
for test_name, result_message in self._completed_tests:
self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
self._completed_tests = []
self._running_tests.remove(test_name)
def _result_message(self, result_type, failures, expected, timing, test_run_time):
exp_string = ' unexpectedly' if not expected else ''
timing_string = ' %.4fs' % test_run_time if timing else ''
if result_type == test_expectations.PASS:
return ' passed%s%s' % (exp_string, timing_string)
else:
return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
def _print_test_trace(self, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
base = self._port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
self._print_default(' base: %s' % base)
self._print_default(' args: %s' % args)
references = self._port.reference_files(test_name)
if references:
for _, filename in references:
self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
else:
for extension in ('.txt', '.png', '.wav'):
self._print_baseline(test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port._filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
def _print_quiet(self, msg):
self.writeln(msg)
def _print_default(self, msg):
if not self._options.quiet:
self.writeln(msg)
def _print_debug(self, msg):
if self._options.debug_rwt_logging:
self.writeln(msg)
def write_throttled_update(self, msg):
self._meter.write_throttled_update(msg)
def write_update(self, msg):
self._meter.write_update(msg)
def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
self._meter.flush()
|
hgl888/crosswalk-android-extensions
|
build/idl-generator/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing.py
|
Python
|
bsd-3-clause
| 20,171 | 0.003322 |
#!/usr/bin/python
from distutils.core import setup
# Remember to change in reroute/__init__.py as well!
VERSION = '1.1.1'
setup(
name='django-reroute',
version=VERSION,
description="A drop-in replacement for django.conf.urls.defaults which supports HTTP verb dispatch and view wrapping.",
long_description=open('README.rst').read(),
author='Mark Sandstrom',
author_email='mark@deliciouslynerdy.com',
url='http://github.com/dnerdy/django-reroute',
keywords=['reroute', 'django', 'http', 'rest', 'route', 'routing', 'dispatch', 'wrapper'],
packages=['reroute'],
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
dnerdy/django-reroute
|
setup.py
|
Python
|
mit
| 982 | 0.002037 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library for managing flags-like configuration that update dynamically.
"""
import logging
import os
import re
import time
try:
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.api import validation
from google.appengine.api import yaml_object
except:
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import validation
from google.appengine.ext import yaml_object
DATASTORE_DEADLINE = 1.5
RESERVED_MARKER = 'ah__conf__'
NAMESPACE = '_' + RESERVED_MARKER
CONFIG_KIND = '_AppEngine_Config'
ACTIVE_KEY_NAME = 'active'
FILENAMES = ['conf.yaml', 'conf.yml']
PARAMETERS = 'parameters'
PARAMETER_NAME_REGEX = '[a-zA-Z][a-zA-Z0-9_]*'
_cached_config = None
class Config(db.Expando):
"""The representation of a config in the datastore and memcache."""
ah__conf__version = db.IntegerProperty(default=0, required=True)
@classmethod
def kind(cls):
"""Override the kind name to prevent collisions with users."""
return CONFIG_KIND
def ah__conf__load_from_yaml(self, parsed_config):
"""Loads all the params from a YAMLConfiguration into expando fields.
We set these expando properties with a special name prefix 'p_' to
keep them separate from the static attributes of Config. That way we
don't have to check elsewhere to make sure the user doesn't stomp on
our built in properties.
Args:
parse_config: A YAMLConfiguration.
"""
for key, value in parsed_config.parameters.iteritems():
setattr(self, key, value)
class _ValidParameterName(validation.Validator):
"""Validator to check if a value is a valid config parameter name.
We only allow valid python attribute names without leading underscores
that also do not collide with reserved words in the datastore models.
"""
def __init__(self):
self.regex = validation.Regex(PARAMETER_NAME_REGEX)
def Validate(self, value, key):
"""Check that all parameter names are valid.
This is used as a validator when parsing conf.yaml.
Args:
value: the value to check.
key: A description of the context for which this value is being
validated.
Returns:
The validated value.
"""
value = self.regex.Validate(value, key)
try:
db.check_reserved_word(value)
except db.ReservedWordError:
raise validation.ValidationError(
'The config parameter name %.100r is reserved by db.Model see: '
'https://developers.google.com/appengine/docs/python/datastore/'
'modelclass#Disallowed_Property_Names for details.' % value)
if value.startswith(RESERVED_MARKER):
raise validation.ValidationError(
'The config parameter name %.100r is reserved, as are all names '
'beginning with \'%s\', please choose a different name.' % (
value, RESERVED_MARKER))
return value
class _Scalar(validation.Validator):
"""Validator to check if a value is a simple scalar type.
We only allow scalars that are well supported by both the datastore and YAML.
"""
ALLOWED_PARAMETER_VALUE_TYPES = frozenset(
[bool, int, long, float, str, unicode])
def Validate(self, value, key):
"""Check that all parameters are scalar values.
This is used as a validator when parsing conf.yaml
Args:
value: the value to check.
key: the name of parameter corresponding to this value.
Returns:
We just return value unchanged.
"""
if type(value) not in self.ALLOWED_PARAMETER_VALUE_TYPES:
raise validation.ValidationError(
'Expected scalar value for parameter: %s, but found %.100r which '
'is type %s' % (key, value, type(value).__name__))
return value
class _ParameterDict(validation.ValidatedDict):
"""This class validates the parameters dictionary in YAMLConfiguration.
Keys must look like non-private python identifiers and values
must be a supported scalar. See the class comment for YAMLConfiguration.
"""
KEY_VALIDATOR = _ValidParameterName()
VALUE_VALIDATOR = _Scalar()
class YAMLConfiguration(validation.Validated):
"""This class describes the structure of a conf.yaml file.
At the top level the file should have a params attribue which is a mapping
from strings to scalars. For example:
parameters:
background_color: 'red'
message_size: 1024
boolean_valued_param: true
"""
ATTRIBUTES = {PARAMETERS: _ParameterDict}
def LoadSingleConf(stream):
"""Load a conf.yaml file or string and return a YAMLConfiguration object.
Args:
stream: a file object corresponding to a conf.yaml file, or its contents
as a string.
Returns:
A YAMLConfiguration instance
"""
return yaml_object.BuildSingleObject(YAMLConfiguration, stream)
def _find_yaml_path():
"""Traverse directory trees to find conf.yaml file.
Begins with the current working direcotry and then moves up the
directory structure until the file is found..
Returns:
the path of conf.yaml file or None if not found.
"""
current, last = os.getcwd(), None
while current != last:
for yaml_name in FILENAMES:
yaml_path = os.path.join(current, yaml_name)
if os.path.exists(yaml_path):
return yaml_path
last = current
current, last = os.path.dirname(current), current
return None
def _fetch_from_local_file(pathfinder=_find_yaml_path, fileopener=open):
"""Get the configuration that was uploaded with this version.
Args:
pathfinder: a callable to use for finding the path of the conf.yaml
file. This is only for use in testing.
fileopener: a callable to use for opening a named file. This is
only for use in testing.
Returns:
A config class instance for the options that were uploaded. If there
is no config file, return None
"""
yaml_path = pathfinder()
if yaml_path:
config = Config()
config.ah__conf__load_from_yaml(LoadSingleConf(fileopener(yaml_path)))
logging.debug('Loaded conf parameters from conf.yaml.')
return config
return None
def _get_active_config_key(app_version):
"""Generate the key for the active config record belonging to app_version.
Args:
app_version: the major version you want configuration data for.
Returns:
The key for the active Config record for the given app_version.
"""
return db.Key.from_path(
CONFIG_KIND,
'%s/%s' % (app_version, ACTIVE_KEY_NAME),
namespace=NAMESPACE)
def _fetch_latest_from_datastore(app_version):
"""Get the latest configuration data for this app-version from the datastore.
Args:
app_version: the major version you want configuration data for.
Side Effects:
We populate memcache with whatever we find in the datastore.
Returns:
A config class instance for most recently set options or None if the
query could not complete due to a datastore exception.
"""
rpc = db.create_rpc(deadline=DATASTORE_DEADLINE,
read_policy=db.EVENTUAL_CONSISTENCY)
key = _get_active_config_key(app_version)
config = None
try:
config = Config.get(key, rpc=rpc)
logging.debug('Loaded most recent conf data from datastore.')
except:
logging.warning('Tried but failed to fetch latest conf data from the '
'datastore.')
if config:
memcache.set(app_version, db.model_to_protobuf(config).Encode(),
namespace=NAMESPACE)
logging.debug('Wrote most recent conf data into memcache.')
return config
def _fetch_latest_from_memcache(app_version):
"""Get the latest configuration data for this app-version from memcache.
Args:
app_version: the major version you want configuration data for.
Returns:
A Config class instance for most recently set options or None if none
could be found in memcache.
"""
proto_string = memcache.get(app_version, namespace=NAMESPACE)
if proto_string:
logging.debug('Loaded most recent conf data from memcache.')
return db.model_from_protobuf(proto_string)
logging.debug('Tried to load conf data from memcache, but found nothing.')
return None
def _inspect_environment():
"""Return relevant information from the cgi environment.
This is mostly split out to simplify testing.
Returns:
A tuple: (app_version, conf_version, development)
app_version: the major version of the current application.
conf_version: the current configuration version.
development: a boolean, True if we're running under devappserver.
"""
app_version = os.environ['CURRENT_VERSION_ID'].rsplit('.', 1)[0]
conf_version = int(os.environ.get('CURRENT_CONFIGURATION_VERSION', '0'))
development = os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
return (app_version, conf_version, development)
def refresh():
"""Update the local config cache from memcache/datastore.
Normally configuration parameters are only refreshed at the start of a
new request. If you have a very long running request, or you just need
the freshest data for some reason, you can call this function to force
a refresh.
"""
app_version, _, _ = _inspect_environment()
global _cached_config
new_config = _fetch_latest_from_memcache(app_version)
if not new_config:
new_config = _fetch_latest_from_datastore(app_version)
if new_config:
_cached_config = new_config
def _new_request():
"""Test if this is the first call to this function in the current request.
This function will return True exactly once for each request
Subsequent calls in the same request will return False.
Returns:
True if this is the first call in a given request, False otherwise.
"""
if RESERVED_MARKER in os.environ:
return False
os.environ[RESERVED_MARKER] = RESERVED_MARKER
return True
def _get_config():
"""Check if the current cached config is stale, and if so update it."""
app_version, current_config_version, development = _inspect_environment()
global _cached_config
if (development and _new_request()) or not _cached_config:
_cached_config = _fetch_from_local_file() or Config()
if _cached_config.ah__conf__version < current_config_version:
newconfig = _fetch_latest_from_memcache(app_version)
if not newconfig or newconfig.ah__conf__version < current_config_version:
newconfig = _fetch_latest_from_datastore(app_version)
_cached_config = newconfig or _cached_config
return _cached_config
def get(name, default=None):
"""Get the value of a configuration parameter.
This function is guaranteed to return the same value for every call
during a single request.
Args:
name: The name of the configuration parameter you want a value for.
default: A default value to return if the named parameter doesn't exist.
Returns:
The string value of the configuration parameter.
"""
return getattr(_get_config(), name, default)
def get_all():
"""Return an object with an attribute for each conf parameter.
Returns:
An object with an attribute for each conf parameter.
"""
return _get_config()
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/api/conf.py
|
Python
|
bsd-3-clause
| 11,771 | 0.008156 |
from flask import session, Blueprint
from lexos.managers import session_manager
from lexos.helpers import constants
from lexos.models.consensus_tree_model import BCTModel
from lexos.views.base import render
consensus_tree_blueprint = Blueprint("consensus-tree", __name__)
@consensus_tree_blueprint.route("/consensus-tree", methods=["GET"])
def consensus_tree() -> str:
"""Gets the consensus tree page.
:return: The consensus tree page.
"""
# Set the default options
if "analyoption" not in session:
session["analyoption"] = constants.DEFAULT_ANALYZE_OPTIONS
if "bctoption" not in session:
session["bctoption"] = constants.DEFAULT_BCT_OPTIONS
# Return the consensus tree page
return render("consensus-tree.html")
@consensus_tree_blueprint.route("/consensus-tree/graph", methods=["POST"])
def graph() -> str:
"""Gets the consensus tree graph.
:return: The consensus tree graph.
"""
# Cache the options
session_manager.cache_bct_option()
session_manager.cache_analysis_option()
# Return the bootstrap consensus tree
return BCTModel().get_bootstrap_consensus_tree_plot_decoded()
|
WheatonCS/Lexos
|
lexos/views/consensus_tree.py
|
Python
|
mit
| 1,164 | 0 |
"""
A wrap python class of 'pbsnodes -N "note" node' command
The purpose of this class is to provide a simple API
to write some attribute and its value pairs to note attribute of cluster nodes.
"""
from __future__ import print_function
from sh import ssh
from ast import literal_eval
from types import *
from copy import deepcopy
from cloudmesh.pbs.pbs import PBS
from cloudmesh.inventory import Inventory
import json
from cloudmesh_base.logger import LOGGER
# ----------------------------------------------------------------------
# SETTING UP A LOGGER
# ----------------------------------------------------------------------
log = LOGGER(__file__)
class pbs_note_builder:
def __init__(self, user, host):
self.username = user
self.hostname = host
self.inventory = Inventory()
self.fetch_pbs_nodes_info()
self.pbs_nodes_info = None
# get recent pbsnodes info
def fetch_pbs_nodes_info(self):
pbs = PBS(self.username, self.hostname)
self.pbs_nodes_info = pbs.pbsnodes()
# print self.pbs_nodes_info
def check_node_validation(self, node):
node_id_label = self.inventory.get_host_id_label(node)
berror = False
if node_id_label is None:
berror = True
else:
(node_id, node_label) = node_id_label
if not berror:
if node_label not in self.pbs_nodes_info.keys():
berror = True
if berror:
raise NameError(
"pbs_note_builder: '{0}' is NOT a valid or existed node.".format(node))
return node_id_label
def get_note(self, node):
(node_id, node_label) = self.check_node_validation(node)
print("{0}-note: {1}".format(node_id, self.pbs_nodes_info[node_label]["note"]))
# node is the server name, e.g., i129, i15
# note is a dict, {"attr1": "value1", "attr2": "value2"}
# setNote doesn't check the correctness of the attribute-value pair
def set_note(self, node, note):
(node_id, node_label) = self.check_node_validation(node)
# ["note"] ONLY has two type: dict or string
prev_note = self.pbs_nodes_info[node_label]["note"]
if type(prev_note) is dict:
curr_note = deepcopy(prev_note)
else:
# assume the default note is for 'service'
curr_note = {"service": deepcopy(prev_note)}
# now curr_note already is a dict
# to keep consistency, the keys in note should be lower
map(lambda x: str(x).lower(), note.keys())
curr_note.update(note)
# convert the dict to a unique string
# e.g., "'other': 'test', 'temperature': '10.2', 'service': 'hpc'"
# kvpair_list = ", ".join([": ".join(map(lambda x: "'".join(["", str(x), ""]), [key, prev_note[key]])) for key in sorted(prev_note.keys())])
# snote = "".join(['{', kvpair_list, '}'])
# sshnote = '"'.join(["", snote, ""])
# try get the dict string with json dumps
sshnote = json.dumps(curr_note)
# update the note attribute in memory to real node
command = " ".join(["pbsnodes -N", sshnote, node_label])
str_ssh = "@".join([self.username, self.hostname])
log.debug("pbs_note_builder: command ready to execute is: \n > ssh {0} {1}\n".format(
str_ssh, command))
# This operation NEED authorization ...
# ssh(str_ssh, command)
# set server's temperature
# a shortcut of set_note
def set_temperature_note(self, node, temp):
self.set_one_note(node, "temperature", temp)
# set server's service type
# a shortcut of set_note
def set_service_note(self, node, service):
self.set_one_note(node, "service", service)
def set_one_note(self, node, attr, value):
self.set_note(node, {attr: value})
# test only
if __name__ == "__main__":
# only used for test
username = "change me"
hostname = "change me"
pbsnote = pbs_note_builder(username, "india")
try:
pbsnote.get_note(hostname)
# test temperature
pbsnote.set_temperature_note(hostname, 99.2)
# test service type
pbsnote.set_service_note(hostname, "down")
# test setNote
note = {"service": "down, offline",
"temperature": "-100.12", "test": "debug", 0: 12}
pbsnote.set_note(hostname, note)
except NameError, ne:
print("My exception info: ")
print(str(ne))
|
rajpushkar83/cloudmesh
|
cloudmesh/pbs/pbs_note.py
|
Python
|
apache-2.0
| 4,485 | 0.001115 |
# coding: utf8
# commentinput.py
# 5/28/2014 jichi
__all__ = 'CommentInputDialog',
if __name__ == '__main__':
import sys
sys.path.append('..')
import debug
debug.initenv()
from Qt5 import QtWidgets
from PySide.QtCore import Qt
from sakurakit import skqss
#from sakurakit.skclass import memoizedproperty
#from sakurakit.skdebug import dprint
from sakurakit.sktr import tr_
from mytr import mytr_
def create_label(text=""): # unicode -> QLabel
ret = QtWidgets.QLabel()
if text:
ret.setText(text + ":")
ret.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
return ret
class CommentInputDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(CommentInputDialog, self).__init__(parent)
skqss.class_(self, 'texture')
self.setWindowTitle(mytr_("Update reason"))
#self.setWindowIcon(rc.icon('window-shortcuts'))
self.__d = _CommentInputDialog(self)
#self.resize(300, 250)
#self.statusBar() # show status bar
#def __del__(self):
# """@reimp"""
# dprint("pass")
def text(self): return self.__d.edit.text()
def setText(self, v): self.__d.edit.setText(v)
def type(self): # -> str
return (
'updateComment' if self.__d.updateCommentButton.isChecked() else
'comment' if self.__d.commentButton.isChecked() else
'')
def method(self): # -> str
return (
'append' if self.__d.appendButton.isChecked() else
'overwrite' if self.__d.overwriteButton.isChecked() else
'')
def get(self, default=""):
"""
@param parent QWidget
@param default str
@return bool ok, unicode comment, {type:str, append:bool}
"""
if default:
self.setText(default)
r = self.exec_()
return bool(r), self.text(), {
'type': self.type(),
'method': self.method(),
}
@classmethod
def getComment(cls, parent=None, default=""):
"""
@param parent QWidget
@param default str
@return bool ok, unicode comment, {type:str, append:bool}
"""
w = cls(parent)
r = w.get(default)
if parent:
w.setParent(None)
return r
#@Q_Q
class _CommentInputDialog(object):
def __init__(self, q):
self._createUi(q)
def _createUi(self, q):
self.edit = QtWidgets.QLineEdit()
skqss.class_(self.edit, 'normal')
grid = QtWidgets.QGridLayout()
r = 0
self.updateCommentButton = QtWidgets.QRadioButton(mytr_("Update reason"))
self.updateCommentButton.setChecked(True)
self.commentButton = QtWidgets.QRadioButton(tr_("Comment"))
g = QtWidgets.QButtonGroup(q)
g.addButton(self.updateCommentButton)
g.addButton(self.commentButton)
grid.addWidget(create_label(tr_("Property")), r, 0)
for i,b in enumerate(g.buttons()):
grid.addWidget(b, r, i+1)
r += 1
self.appendButton = QtWidgets.QRadioButton(tr_("Append"))
self.appendButton.setChecked(True)
self.overwriteButton = QtWidgets.QRadioButton(tr_("Overwrite"))
g = QtWidgets.QButtonGroup(q)
g.addButton(self.appendButton)
g.addButton(self.overwriteButton)
grid.addWidget(create_label(tr_("Method")), r, 0)
for i,b in enumerate(g.buttons()):
grid.addWidget(b, r, i+1)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.edit)
optionGroup = QtWidgets.QGroupBox(tr_("Option"))
optionGroup.setLayout(grid)
layout.addWidget(optionGroup)
buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok|
QtWidgets.QDialogButtonBox.Cancel)
layout.addWidget(buttonBox)
buttonBox.accepted.connect(q.accept)
buttonBox.rejected.connect(q.reject)
okButton = buttonBox.button(buttonBox.Ok)
okButton.setDefault(True)
skqss.class_(okButton, 'btn btn-primary')
#cancelButton = buttonBox.button(buttonBox.Cancel)
q.setLayout(layout)
if __name__ == '__main__':
a = debug.app()
r = CommentInputDialog.getComment()
#a.exec_()
# EOF
|
Dangetsu/vnr
|
Frameworks/Sakura/py/apps/reader/dialogs/commentinput.py
|
Python
|
gpl-3.0
| 3,917 | 0.012254 |
from datetime import datetime
import time
import os, json
import requests
import urllib
from . import app
from app.cloudant_db import cloudant_client
from app.redis_db import get_next_user_id
from typing import List, Dict, Optional
from cloudant.document import Document
from cloudant.database import CloudantDatabase
CL_URL = app.config['CL_URL']
CL_MOVIEDB = app.config['CL_MOVIEDB']
CL_AUTHDB = app.config['CL_AUTHDB']
CL_RATINGDB = app.config['CL_RATINGDB']
CL_RECOMMENDDB = app.config['CL_RECOMMENDDB']
class RecommendationsNotGeneratedException(Exception):
pass
class RecommendationsNotGeneratedForUserException(Exception):
pass
class MovieDAO:
@staticmethod
def get_movie_names(movie_ids: List[int]) -> Dict[int, str]:
"""Retrieve the movie names from Cloudant.
Args:
movie_ids (List[int]): The movie ids to lookup the movie names for.
Returns:
Dict[int, str]: Returns a dict with { movie_id: movie_name, ... }.
An empty dict will be returned if no movies are found for the ids.
"""
# The movie_ids in cloudant are stored as strings so convert to correct format
# for querying
movie_ids = [ str(id) for id in movie_ids ]
db = cloudant_client[CL_MOVIEDB]
args = {
"keys" : movie_ids,
"include_docs" : True
}
movie_data = db.all_docs(**args)
movie_names = {}
if 'rows' in movie_data:
for row in movie_data['rows']:
if 'doc' in row:
movie_id = int(row['key'])
movie_name = row['doc']['name']
movie_names[movie_id] = movie_name
return movie_names
@staticmethod
def find_movies(search_string: str) -> Dict[int, str]:
"""Find movies in Cloudant database.
Args:
search_string (str): Search string for movie.
Returns:
Dict[int, str]: Returns a dict with { movie_id: movie_name, ... }.
An empty dict will be returned if no movies are found.
"""
movie_db = cloudant_client[CL_MOVIEDB]
index_name = 'movie-search-index'
end_point = '{0}/{1}/_design/{2}/_search/{2}'.format ( CL_URL, CL_MOVIEDB, index_name )
data = {
"q": "name:" + search_string,
"limit": 25
}
headers = { "Content-Type": "application/json" }
response = cloudant_client.r_session.post(end_point, data=json.dumps(data), headers=headers)
movies = {}
movie_data = json.loads(response.text)
if 'rows' in movie_data:
for row in movie_data['rows']:
movie_id = row['id']
movie_name = row['fields']['name']
movies[movie_id] = movie_name
return movies
class RatingDAO:
@staticmethod
def get_ratings(user_id: str, movie_ids: List[int] = None) -> Dict[int, float]:
"""Retrieve user's rated movies.
Args:
user_id (str): The user_id whose movie ratings you require.
movie_ids (List[int]): If a list of movie_ids is provided, only return a rating
if it is for a movie in this list.
Returns:
Dict[int, float]: Returns a dict with { movie_id: rating, ... }.
An empty dict will be returned if no movies have been rated
by the user.
"""
db = cloudant_client[CL_RATINGDB]
args = {
"startkey" : 'user_{0}'.format(user_id),
"endkey" : 'user_{0}/ufff0'.format(user_id),
"include_docs" : True
}
user_ratings = db.all_docs(**args)
ratings = {}
if 'rows' in user_ratings:
for row in user_ratings['rows']:
movie_id = int(row['doc']['_id'].split('/')[1].split('_')[1])
rating = float(row['doc']['rating'])
if movie_ids is None:
# movie_ids filter wasn't provided so return all ratings
ratings[movie_id] = rating
else:
if movie_id in movie_ids:
# movie_ids filter was provided so only return the rating
# if it is in the movie_ids list
ratings[movie_id] = rating
return ratings
@staticmethod
def save_rating(movie_id: int, user_id: str, rating: Optional[float]):
"""Save user's rated movie
Args:
movie_ids (int): The movie id that was rated
user_ids (str): The user id rating the movie
rating (Optional[float]): The movie rating
If the rating argument is not None:
- If the rating doesn't exist in the database it will be created
- If the rating does exist in the database it will be updated
If the rating argument is None:
- If the rating doesn't exist in the database no operation will be performed
- If the rating does exist in the database it will be deleted
"""
db = cloudant_client[CL_RATINGDB]
current_milli_time = lambda: int(round(time.time() * 1000))
id = 'user_{0}/movie_{1}'.format(user_id, movie_id)
with Document(db, id) as document:
if rating:
document.update( { 'rating': rating, 'timestamp': current_milli_time() })
print('saved/updated rating', id)
else:
if document.exists():
document.update( { '_deleted': True } )
print('deleted rating', id)
class RecommendationDAO:
@staticmethod
def get_latest_recommendation_timestamp() -> datetime:
"""Get the timestamp that the latest recommendations were generated
Returns:
datetime: Returns the UTC timestamp
"""
db = cloudant_client[CL_RECOMMENDDB]
# get recommendation_metadata document with last run details
try:
doc = db['recommendation_metadata']
doc.fetch()
except KeyError:
print('recommendation_metadata doc not found in', CL_RECOMMENDDB)
raise RecommendationsNotGeneratedException
timestamp_str = doc['timestamp_utc']
import dateutil.parser
return dateutil.parser.parse(timestamp_str)
@staticmethod
def get_recommendations_or_product_features(user_id: str) -> Dict:
"""Get the timestamp that the latest recommendations were generated
Returns:
Dict:
If the user had rated some movies:
{
'type' : 'als_recommendations',
'recommendations' : { movie_id, rating }
}
or, if user had not rated any movies:
{
'type' : 'als_product_features',
'pf_vals' : product_feature_values,
'pf_keys' : product_feature_keys
}
"""
# get recommendation_metadata document with last run details
try:
meta_db = cloudant_client[CL_RECOMMENDDB]
meta_doc = meta_db['recommendation_metadata']
meta_doc.fetch()
except KeyError:
print('recommendation_metadata doc not found in', CL_RECOMMENDDB)
raise RecommendationsNotGeneratedException
# get name of db for latest recommendations
try:
latest_recommendations_db = meta_doc['latest_db']
recommendations_db = cloudant_client[latest_recommendations_db]
except KeyError:
print('recommendationsdb not found', latest_recommendations_db)
raise RecommendationsNotGeneratedException
# get recommendations for user
try:
recommendations_doc = recommendations_db[user_id]
# If the above ran without KeyError, recommendations were generated
# when the ALS model was trained and the recommendations were saved
# to Cloudant
recommendations = {}
for rec in recommendations_doc['recommendations']:
movie_id = int(rec[1])
predicted_rating = float(rec[2])
recommendations[movie_id] = predicted_rating
return { 'type' : "als_recommendations",
'recommendations' : recommendations }
except KeyError:
# no recommendations were generated for the user - they probably hadn't
# rated any movies by the time the ALS model was trained
pf_keys = json.loads(
meta_doc.get_attachment('product_feature_keys', attachment_type='text')
)
pf_vals = json.loads(
meta_doc.get_attachment('product_feature_vals', attachment_type='text')
)
return { 'type' : "als_product_features",
'pf_keys' : pf_keys,
'pf_vals' : pf_vals }
class UserDAO:
@staticmethod
def load_user(user_id: str) -> Dict[str, str]:
"""Load user details
Args:
user_ids (str): The user id to load
Returns:
Dict[str, str]: Returns the user dict with the following fields:
{
'email': str
'password_hash': str
}
"""
db = cloudant_client[CL_AUTHDB]
user_dict = {}
try:
doc = db[user_id]
doc.fetch()
if doc.exists():
user_dict['email'] = doc['email']
user_dict['password_hash'] = doc['password_hash']
except KeyError:
pass
return user_dict
@staticmethod
def find_by_email(email: str) -> Dict[str, str]:
"""Load user details
Args:
email (str): The user email address
Returns:
Dict[str, str]: Returns the user dict with the following fields:
{
'user_id': str
'password_hash': str
}
"""
# FIXME - convert this to python-cloudant api
auth_db = cloudant_client[CL_AUTHDB]
key = urllib.parse.quote_plus(email)
view_name = 'authdb-email-index'
template = '{0}/{1}/_design/{2}/_view/{2}?key="{3}"&include_docs=true'
endpoint = template.format (
CL_URL,
CL_AUTHDB,
view_name,
key
)
response = cloudant_client.r_session.get(endpoint)
user_dict = {}
if response.status_code == 200:
rows = response.json()['rows']
if len(rows) > 0:
user_dict['password_hash'] = rows[0]['doc']['password_hash']
user_dict['user_id'] = rows[0]['doc']['_id']
print("User found for email", email)
else:
print("User not found for email", email)
return user_dict
@staticmethod
def create_user(email: str, password_hash: str) -> str:
"""Create new user
Args:
email (str): The user's email address
password_hash (str): The user's password_hash
Returns:
str: The generated user id for the new user
"""
db = cloudant_client[CL_AUTHDB]
if app.config['REDIS_ENABLED'] == True:
from app.redis_db import get_next_user_id
id = get_next_user_id()
data = {
"_id" : str(id),
"email" : email,
"password_hash" : password_hash
}
else:
# allow cloudant to generate a uuid
data = {
"email" : email,
"password_hash" : password_hash
}
doc = db.create_document(data)
if not doc.exists():
raise BaseException("Coud not save user: " + data)
return doc['_id']
|
snowch/movie-recommender-demo
|
web_app/app/dao.py
|
Python
|
apache-2.0
| 12,507 | 0.006796 |
# -*- conding: utf-8 -*-
import base64
import json
from unicodedata import normalize
import requests
from bs4 import BeautifulSoup
from .utils import normalize_key
class Institution:
"""
Classe responsavel pela coleta de todos os daddos da instituicao no site do e-MEC.
Realiza o scraping em busca de dados detalhados da instituicao e dos cursos de cada campus.
"""
def __init__(self, code_ies: int):
"""Construtor da classe.
Args:
code_ies (int): Codigo da instituicao de ensino na base de dados do MEC.
"""
self.data_ies = {}
self.code_ies = code_ies
def set_code_ies(self, code_ies: int):
"""Informa o codigo da ies.
Args:
code_ies (int): Codigo da instituicao de ensino na base de dados do MEC
"""
self.data_ies = {}
self.code_ies = code_ies
def parse(self) -> None:
"""Realiza o parse completo de todos os dados da ies."""
if self.code_ies == None or self.code_ies == 0:
print("informe o codigo da ies")
return
self.__parse_institution_details()
self.__parse_campus()
self.__parse_courses()
def __parse_institution_details(self) -> dict:
"""
Realiza o parse de todos os dados da instituicao,
mantenedora e as notas de conceito do MEC.
Returns:
dict: dados detalhados da instituicao.
"""
ies_code = str(self.code_ies).encode("utf-8")
ies_b64 = base64.b64encode(ies_code).decode("utf-8")
URL = f"https://emec.mec.gov.br/emec/consulta-ies/index/d96957f455f6405d14c6542552b0f6eb/{ies_b64}"
try:
response = requests.get(URL)
except Exception as error:
print(f"{error}")
return
soup = BeautifulSoup(response.content, "html.parser")
fields_ies = soup.find_all("tr", "avalLinhaCampos")
for fields in fields_ies:
key = ""
for f in fields.find_all("td"):
aux = f.get_text(strip=True)
if not aux:
continue
if "tituloCampos" in f["class"]:
key = normalize_key(aux).decode("UTF-8")
continue
self.data_ies[key] = aux
# insere o codigo da ies
self.data_ies["code_ies"] = self.code_ies
# pega as notas de conceito do MEC
table = soup.find(id="listar-ies-cadastro")
if table is not None and table.tbody is not None:
index = table.tbody.find_all("td")
if len(index) == 9:
item = {
"ci": index[1].get_text(strip=True),
"year_ci": index[2].get_text(strip=True),
"igc": index[4].get_text(strip=True),
"year_igc": index[5].get_text(strip=True),
"igcc": index[7].get_text(strip=True),
"year_igcc": index[8].get_text(strip=True),
}
self.data_ies["conceito"] = item
return self.data_ies
def __parse_campus(self):
"""
Realiza o parse de todos os campus referente a ies.
"""
campus = []
ies_code = str(self.code_ies).encode("utf-8")
ies_b64 = base64.b64encode(ies_code).decode("utf-8")
URL = (
f"http://emec.mec.gov.br/emec/consulta-ies/listar-endereco/"
f"d96957f455f6405d14c6542552b0f6eb/{ies_b64}/list/1000"
)
response = requests.get(URL)
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find(id="listar-ies-cadastro")
if table is None or table.tbody is None:
return
rows = table.find_all("tr", "corDetalhe2")
for r in rows:
cells = r.find_all("td")
if len(cells) > 1:
item = {
"code": cells[0].get_text(strip=True),
"city": cells[4].get_text(strip=True),
"uf": cells[5].get_text(strip=True),
}
campus.append(item)
self.data_ies["campus"] = campus
def __parse_courses(self) -> list:
"""
Realiza o parse de todos os dados dos cursos.
Returns:
list: lista com dados do cursos em json.
"""
ies_code = str(self.code_ies).encode("utf-8")
ies_b64 = base64.b64encode(ies_code).decode("utf-8")
URL = (
f"http://emec.mec.gov.br/emec/consulta-ies/listar-curso-agrupado/"
f"d96957f455f6405d14c6542552b0f6eb/{ies_b64}/list/1000?no_curso="
)
try:
response = requests.get(URL)
except Exception as error:
print(f"{error}")
return False
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find(id="listar-ies-cadastro")
if table is None or table.tbody is None:
return
courses = []
rows = table.tbody.find_all("tr")
if rows is None:
return
for r in rows:
if r.td.a:
url_list = r.td.a["href"].split("/")
code_course = url_list[len(url_list) - 1]
course_details = self.__parse_course_details(code_course)
if course_details:
courses += course_details
self.data_ies["courses"] = courses
return courses
def __parse_course_details(self, code_course: int) -> list:
"""
Realia o parse dos dados detalhados de cada curso.
Args:
code_course (int): Codigo do curso na base de dados do MEC.
Returns:
list: lista com dados detalhados de cada curso em json.
"""
ies_code = str(self.code_ies).encode("utf-8")
ies_b64 = base64.b64encode(ies_code).decode("utf-8")
# decodifica o code_course(recebido pela pagina) que esta em iso
decode_course = base64.b64decode(code_course).decode("iso-8859-1")
# transforma a string retornada em bits like object para conversao
course_obj = str(decode_course).encode("utf-8")
course_code = base64.b64encode(course_obj).decode("utf-8")
URL = (
f"https://emec.mec.gov.br/emec/consulta-curso/listar-curso-desagrupado/"
f"9f1aa921d96ca1df24a34474cc171f61/0/d96957f455f6405d14c6542552b0f6eb/"
f"{ies_b64}/c1b85ea4d704f246bcced664fdaeddb6/{course_code}/list/1000"
)
try:
response = requests.get(URL)
except Exception as error:
print(f"{error}")
return False
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find(id="listar-ies-cadastro")
if table is None or table.tbody is None:
return
courses_details = []
rows = table.tbody.find_all("tr")
if rows is None:
return
for r in rows:
cells = r.find_all("td")
if len(cells) >= 9:
# print(cells[3].get_text(strip=True).encode("utf-8").capitalize())
item = {
"codigo": cells[0].get_text(strip=True),
"modalidade": cells[1].get_text(strip=True),
"grau": cells[2].get_text(strip=True),
"curso": normalize(
"NFKD", cells[3].get_text(strip=True)
).capitalize(),
"uf": cells[4].get_text(strip=True),
"municipio": cells[5].get_text(strip=True),
"enade": cells[6].get_text(strip=True),
"cpc": cells[7].get_text(strip=True),
"cc": cells[8].get_text(strip=True),
}
courses_details.append(item)
return courses_details
def get_full_data(self) -> dict:
"""
Retorna os dados completos da instituicao.
Returns:
dict: objeto Json com todos os dados da instituicao.
"""
return self.data_ies
def write_json(self, filename: str):
"""Escreve o arquivo json no disco.
Args:
filename (str): nome com o caminho completo do arquivo.
"""
with open(filename, "a", encoding="utf-8") as outfile:
json.dump(self.data_ies, outfile, indent=4, ensure_ascii=False)
|
pavanad/emec-api
|
emec/emec.py
|
Python
|
mit
| 8,481 | 0.001061 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_garm_bel_iblis.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/mobile/shared_dressed_garm_bel_iblis.py
|
Python
|
mit
| 447 | 0.04698 |
# -*- coding: utf-8 -*-
"""
Data about OCA Projects, with a few helper functions.
OCA_PROJECTS: dictionary of OCA Projects mapped to the list of related
repository names, based on
https://community.odoo.com/page/website.projects_index
OCA_REPOSITORY_NAMES: list of OCA repository names
"""
from github_login import login
ALL = ['OCA_PROJECTS', 'OCA_REPOSITORY_NAMES', 'url']
OCA_PROJECTS = {
'accounting': ['account-analytic',
'account-budgeting',
'account-closing',
'account-consolidation',
'account-financial-tools',
'account-financial-reporting',
'account-invoice-reporting',
'account-invoicing',
'account-fiscal-rule',
],
# 'backport': ['OCB',
# ],
'banking': ['banking',
'bank-statement-reconcile',
'account-payment',
],
'community': ['maintainers-tools',
'maintainer-quality-tools',
'runbot-addons',
],
'connector': ['connector',
'connector-ecommerce',
],
'account edge connector': ['connector-accountedge'],
'connector LIMS': ['connector-lims'],
'connector CMIS': ['connector-cmis'],
'connector magento': ['connector-magento'],
'connector prestashop': ['connector-prestashop'],
'connector sage': ['connector-sage-50'],
'crm sales marketing': ['sale-workflow',
'crm',
'partner-contact',
'sale-financial',
'sale-reporting',
'commission',
'event',
'survey',
],
'document': ['knowledge'],
'ecommerce': ['e-commerce'],
'financial control': ['margin-analysis'],
'geospatial': ['geospatial'],
'hr': ['hr-timesheet',
'hr',
'department',
],
'intercompany': ['multi-company'],
'l10n-argentina': ['l10n-argentina'],
'l10n-belgium': ['l10n-belgium'],
'l10n-brazil': ['l10n-brazil'],
'l10n-canada': ['l10n-canada'],
'l10n-china': ['l10n-china'],
'l10n-colombia': ['l10n-colombia'],
'l10n-costa-rica': ['l10n-costa-rica'],
'l10n-finland': ['l10n-finland'],
'l10n-france': ['l10n-france'],
'l10n-germany': ['l10n-germany'],
'l10n-india': ['l10n-india'],
'l10n-iran': ['l10n-iran'],
'l10n-ireland': ['l10n-ireland'],
'l10n-italy': ['l10n-italy'],
'l10n-luxemburg': ['l10n-luxemburg'],
'l10n-mexico': ['l10n-mexico'],
'l10n-netherlands': ['l10n-netherlands'],
'l10n-norway': ['l10n-norway'],
'l10n-portugal': ['l10n-portugal'],
'l10n-romania': ['l10n-romania'],
'l10n-spain': ['l10n-spain'],
'l10n-switzerland': ['l10n-switzerland'],
'l10n-taiwan': ['l10n-taiwan'],
'l10n-usa': ['l10n-usa'],
'l10n-venezuela': ['l10n-venezuela'],
'logistics': ['carrier-delivery',
'stock-logistics-barcode',
'stock-logistics-workflow',
'stock-logistics-tracking',
'stock-logistics-warehouse',
'stock-logistics-reporting',
'rma',
],
'manufacturing': ['manufacture',
'manufacture-reporting',
],
'management system': ['management-system'],
'purchase': ['purchase-workflow',
'purchase-reporting',
],
'product': ['product-attribute',
'product-kitting',
'product-variant',
],
'project / services': ['project-reporting',
'project-service',
'contract',
'program',
],
'tools': ['reporting-engine',
'report-print-send',
'webkit-tools',
'server-tools',
'community-data-files',
],
'vertical hotel': ['vertical-hotel'],
'vertical ISP': ['vertical-isp'],
'vertical editing': ['vertical-editing'],
'vertical medical': ['vertical-medical'],
'vertical NGO': ['vertical-ngo',
# XXX
],
'vertical construction': ['vertical-construction'],
'vertical travel': ['vertical-travel'],
'web': ['web'],
}
def get_repositories():
ignored = set([
'odoo-community.org',
'community-data-files',
'contribute-md-template',
'website',
])
gh = login()
all_repos = [repo.name for repo in gh.iter_user_repos('OCA')
if repo not in ignored]
return all_repos
try:
OCA_REPOSITORY_NAMES = get_repositories()
except Exception as exc:
print exc
OCA_REPOSITORY_NAMES = []
for repos in OCA_PROJECTS.itervalues():
OCA_REPOSITORY_NAMES += repos
OCA_REPOSITORY_NAMES.sort()
_OCA_REPOSITORY_NAMES = set(OCA_REPOSITORY_NAMES)
_URL_MAPPINGS = {'git': 'git@github.com:%s/%s.git',
'https': 'https://github.com/%s/%s.git',
}
def url(project_name, protocol='git', org_name='OCA'):
"""get the URL for an OCA project repository"""
if project_name not in _OCA_REPOSITORY_NAMES:
raise ValueError('Unknown project', project_name)
return _URL_MAPPINGS[protocol] % (org_name, project_name)
|
osvalr/maintainer-tools
|
tools/oca_projects.py
|
Python
|
agpl-3.0
| 5,555 | 0.00018 |
"""
:mod: ReqClient
.. module: ReqClient
:synopsis: implementation of client for RequestDB using DISET framework
"""
import os
import time
import random
import json
import datetime
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.List import randomize, fromChar
from DIRAC.Core.Utilities.JEncode import strToIntDict
from DIRAC.Core.Utilities.DEncode import ignoreEncodeWarning
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.WorkloadManagementSystem.Client import JobStatus
from DIRAC.WorkloadManagementSystem.Client import JobMinorStatus
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
@createClient("RequestManagement/ReqManager")
class ReqClient(Client):
"""ReqClient is a class manipulating and operation on Requests."""
__requestProxiesDict = {}
__requestValidator = None
def __init__(self, url=None, **kwargs):
"""c'tor
:param self: self reference
:param url: url of the ReqManager
:param kwargs: forwarded to the Base Client class
"""
super(ReqClient, self).__init__(**kwargs)
self.serverURL = "RequestManagement/ReqManager" if not url else url
self.log = gLogger.getSubLogger("RequestManagement/ReqClient/pid_%s" % (os.getpid()))
def requestProxies(self, timeout=120):
"""get request proxies dict"""
# Forward all the connection options to the requestClient
# (e.g. the userDN to use)
kwargs = self.getClientKWArgs()
kwargs["timeout"] = timeout
if not self.__requestProxiesDict:
self.__requestProxiesDict = {}
proxiesURLs = fromChar(PathFinder.getServiceURL("RequestManagement/ReqProxyURLs"))
if not proxiesURLs:
self.log.warn("CS option RequestManagement/ReqProxyURLs is not set!")
for proxyURL in proxiesURLs:
self.log.debug("creating RequestProxy for url = %s" % proxyURL)
pc = Client(**kwargs)
pc.setServer(proxyURL)
self.__requestProxiesDict[proxyURL] = pc
return self.__requestProxiesDict
def requestValidator(self):
"""get request validator"""
if not self.__requestValidator:
self.__requestValidator = RequestValidator()
return self.__requestValidator
def putRequest(self, request, useFailoverProxy=True, retryMainService=0):
"""Put request to RequestManager
:param self: self reference
:param ~Request.Request request: Request instance
:param bool useFailoverProxy: if False, will not attempt to forward the request to ReqProxies
:param int retryMainService: Amount of time we retry on the main ReqHandler in case of failures
:return: S_OK/S_ERROR
"""
errorsDict = {"OK": False}
valid = self.requestValidator().validate(request)
if not valid["OK"]:
self.log.error("putRequest: request not valid", "%s" % valid["Message"])
return valid
# # dump to json
requestJSON = request.toJSON()
if not requestJSON["OK"]:
return requestJSON
requestJSON = requestJSON["Value"]
retryMainService += 1
while retryMainService:
retryMainService -= 1
setRequestMgr = self._getRPC().putRequest(requestJSON)
if setRequestMgr["OK"]:
return setRequestMgr
errorsDict["RequestManager"] = setRequestMgr["Message"]
# sleep a bit
time.sleep(random.randint(1, 5))
self.log.warn(
"putRequest: unable to set request '%s' at RequestManager" % request.RequestName, setRequestMgr["Message"]
)
proxies = self.requestProxies() if useFailoverProxy else {}
for proxyURL in randomize(proxies.keys()):
proxyClient = proxies[proxyURL]
self.log.debug("putRequest: trying RequestProxy at %s" % proxyURL)
setRequestProxy = proxyClient.putRequest(requestJSON)
if setRequestProxy["OK"]:
if setRequestProxy["Value"]["set"]:
self.log.info(
"putRequest: request '%s' successfully set using RequestProxy %s"
% (request.RequestName, proxyURL)
)
elif setRequestProxy["Value"]["saved"]:
self.log.info(
"putRequest: request '%s' successfully forwarded to RequestProxy %s"
% (request.RequestName, proxyURL)
)
return setRequestProxy
else:
self.log.warn(
"putRequest: unable to set request using RequestProxy %s: %s"
% (proxyURL, setRequestProxy["Message"])
)
errorsDict["RequestProxy(%s)" % proxyURL] = setRequestProxy["Message"]
# # if we're here neither requestManager nor requestProxy were successful
self.log.error("putRequest: unable to set request", "'%s'" % request.RequestName)
errorsDict["Message"] = "ReqClient.putRequest: unable to set request '%s'" % request.RequestName
return errorsDict
def getRequest(self, requestID=0):
"""Get request from RequestDB
:param self: self reference
:param int requestID: ID of the request. If 0, choice is made for you
:return: S_OK( Request instance ) or S_OK() or S_ERROR
"""
self.log.debug("getRequest: attempting to get request.")
getRequest = self._getRPC().getRequest(requestID)
if not getRequest["OK"]:
self.log.error("getRequest: unable to get request", "'%s' %s" % (requestID, getRequest["Message"]))
return getRequest
if not getRequest["Value"]:
return getRequest
return S_OK(Request(getRequest["Value"]))
@ignoreEncodeWarning
def getBulkRequests(self, numberOfRequest=10, assigned=True):
"""get bulk requests from RequestDB
:param self: self reference
:param str numberOfRequest: size of the bulk (default 10)
:return: S_OK( Successful : { requestID, RequestInstance }, Failed : message ) or S_ERROR
"""
self.log.debug("getRequests: attempting to get request.")
getRequests = self._getRPC().getBulkRequests(numberOfRequest, assigned)
if not getRequests["OK"]:
self.log.error("getRequests: unable to get '%s' requests: %s" % (numberOfRequest, getRequests["Message"]))
return getRequests
# No Request returned
if not getRequests["Value"]:
return getRequests
# No successful Request
if not getRequests["Value"]["Successful"]:
return getRequests
jsonReq = getRequests["Value"]["Successful"]
# Do not forget to cast back str keys to int
reqInstances = {int(rId): Request(jsonReq[rId]) for rId in jsonReq}
failed = strToIntDict(getRequests["Value"]["Failed"])
return S_OK({"Successful": reqInstances, "Failed": failed})
def peekRequest(self, requestID):
"""peek request"""
self.log.debug("peekRequest: attempting to get request.")
peekRequest = self._getRPC().peekRequest(int(requestID))
if not peekRequest["OK"]:
self.log.error(
"peekRequest: unable to peek request", "request: '%s' %s" % (requestID, peekRequest["Message"])
)
return peekRequest
if not peekRequest["Value"]:
return peekRequest
return S_OK(Request(peekRequest["Value"]))
def deleteRequest(self, requestID):
"""delete request given it's ID
:param self: self reference
:param str requestID: request ID
"""
requestID = int(requestID)
self.log.debug("deleteRequest: attempt to delete '%s' request" % requestID)
deleteRequest = self._getRPC().deleteRequest(requestID)
if not deleteRequest["OK"]:
self.log.error(
"deleteRequest: unable to delete request", "'%s' request: %s" % (requestID, deleteRequest["Message"])
)
return deleteRequest
def getRequestIDsList(self, statusList=None, limit=None, since=None, until=None, getJobID=False):
"""get at most :limit: request ids with statuses in :statusList:"""
statusList = statusList if statusList else list(Request.FINAL_STATES)
limit = limit if limit else 100
since = since.strftime("%Y-%m-%d") if since else ""
until = until.strftime("%Y-%m-%d") if until else ""
return self._getRPC().getRequestIDsList(statusList, limit, since, until, getJobID)
def getScheduledRequest(self, operationID):
"""get scheduled request given its scheduled OperationID"""
self.log.debug("getScheduledRequest: attempt to get scheduled request...")
scheduled = self._getRPC().getScheduledRequest(operationID)
if not scheduled["OK"]:
self.log.error("getScheduledRequest failed", scheduled["Message"])
return scheduled
if scheduled["Value"]:
return S_OK(Request(scheduled["Value"]))
return scheduled
def getDBSummary(self):
"""Get the summary of requests in the RequestDBs."""
self.log.debug("getDBSummary: attempting to get RequestDB summary.")
dbSummary = self._getRPC().getDBSummary()
if not dbSummary["OK"]:
self.log.error("getDBSummary: unable to get RequestDB summary", dbSummary["Message"])
return dbSummary
def getDigest(self, requestID):
"""Get the request digest given a request ID.
:param self: self reference
:param str requestID: request id
"""
self.log.debug("getDigest: attempting to get digest for '%s' request." % requestID)
digest = self._getRPC().getDigest(int(requestID))
if not digest["OK"]:
self.log.error(
"getDigest: unable to get digest for request", "request: '%s' %s" % (requestID, digest["Message"])
)
return digest
def getRequestStatus(self, requestID):
"""Get the request status given a request id.
:param self: self reference
:param int requestID: id of the request
"""
if isinstance(requestID, str):
requestID = int(requestID)
self.log.debug("getRequestStatus: attempting to get status for '%d' request." % requestID)
requestStatus = self._getRPC().getRequestStatus(requestID)
if not requestStatus["OK"]:
self.log.error(
"getRequestStatus: unable to get status for request",
": '%d' %s" % (requestID, requestStatus["Message"]),
)
return requestStatus
# def getRequestName( self, requestID ):
# """ get request name for a given requestID """
# return self._getRPC().getRequestName( requestID )
def getRequestInfo(self, requestID):
"""The the request info given a request id.
:param self: self reference
:param int requestID: request nid
"""
self.log.debug("getRequestInfo: attempting to get info for '%s' request." % requestID)
requestInfo = self._getRPC().getRequestInfo(int(requestID))
if not requestInfo["OK"]:
self.log.error(
"getRequestInfo: unable to get status for request",
"request: '%s' %s" % (requestID, requestInfo["Message"]),
)
return requestInfo
def getRequestFileStatus(self, requestID, lfns):
"""Get file status for request given a request id.
:param self: self reference
:param int requestID: request id
:param lfns: list of LFNs
:type lfns: python:list
"""
self.log.debug("getRequestFileStatus: attempting to get file statuses for '%s' request." % requestID)
fileStatus = self._getRPC().getRequestFileStatus(int(requestID), lfns)
if not fileStatus["OK"]:
self.log.verbose(
"getRequestFileStatus: unable to get file status for request",
"request: '%s' %s" % (requestID, fileStatus["Message"]),
)
return fileStatus
def finalizeRequest(self, requestID, jobID, useCertificates=True):
"""check request status and perform finalization if necessary
update the request status and the corresponding job parameter
:param self: self reference
:param str requestID: request id
:param int jobID: job id
"""
stateServer = JobStateUpdateClient(useCertificates=useCertificates)
# Checking if to update the job status - we should fail here, so it will be re-tried later
# Checking the state, first
res = self.getRequestStatus(requestID)
if not res["OK"]:
self.log.error(
"finalizeRequest: failed to get request", "request: %s status: %s" % (requestID, res["Message"])
)
return res
if res["Value"] != "Done":
return S_ERROR(
"The request %s isn't 'Done' but '%s', this should never happen, why are we here?"
% (requestID, res["Value"])
)
# The request is 'Done', let's update the job status. If we fail, we should re-try later
monitorServer = JobMonitoringClient(useCertificates=useCertificates)
res = monitorServer.getJobSummary(int(jobID))
if not res["OK"]:
self.log.error("finalizeRequest: Failed to get job status", "JobID: %d" % jobID)
return res
elif not res["Value"]:
self.log.info("finalizeRequest: job %d does not exist (anymore): finalizing" % jobID)
return S_OK()
else:
jobStatus = res["Value"]["Status"]
jobMinorStatus = res["Value"]["MinorStatus"]
jobAppStatus = ""
newJobStatus = ""
if jobStatus == JobStatus.STALLED:
# If job is stalled, find the previous status from the logging info
res = monitorServer.getJobLoggingInfo(int(jobID))
if not res["OK"]:
self.log.error("finalizeRequest: Failed to get job logging info", "JobID: %d" % jobID)
return res
# Check the last status was Stalled and get the one before
if len(res["Value"]) >= 2 and res["Value"][-1][0] == JobStatus.STALLED:
jobStatus, jobMinorStatus, jobAppStatus = res["Value"][-2][:3]
newJobStatus = jobStatus
# update the job pending request digest in any case since it is modified
self.log.info("finalizeRequest: Updating request digest for job %d" % jobID)
digest = self.getDigest(requestID)
if digest["OK"]:
digest = digest["Value"]
self.log.verbose(digest)
res = stateServer.setJobParameter(jobID, "PendingRequest", digest)
if not res["OK"]:
self.log.info("finalizeRequest: Failed to set job %d parameter: %s" % (jobID, res["Message"]))
return res
else:
self.log.error(
"finalizeRequest: Failed to get request digest for %s: %s" % (requestID, digest["Message"])
)
if jobStatus == JobStatus.COMPLETED:
# What to do? Depends on what we have in the minorStatus
if jobMinorStatus == JobMinorStatus.PENDING_REQUESTS:
newJobStatus = JobStatus.DONE
elif jobMinorStatus == JobMinorStatus.APP_ERRORS:
newJobStatus = JobStatus.FAILED
elif jobMinorStatus == JobMinorStatus.MARKED_FOR_TERMINATION:
# If the job has been Killed, set it Killed
newJobStatus = JobStatus.KILLED
else:
self.log.error(
"finalizeRequest: Unexpected jobMinorStatus", "for %d (got %s)" % (jobID, jobMinorStatus)
)
return S_ERROR("Unexpected jobMinorStatus")
if newJobStatus:
self.log.info(
"finalizeRequest: Updating job status",
"for %d to '%s/%s'" % (jobID, newJobStatus, JobMinorStatus.REQUESTS_DONE),
)
else:
self.log.info(
"finalizeRequest: Updating job minor status",
"for %d to '%s' (current status is %s)" % (jobID, JobMinorStatus.REQUESTS_DONE, jobStatus),
)
stateUpdate = stateServer.setJobStatus(jobID, newJobStatus, JobMinorStatus.REQUESTS_DONE, "RMS")
if jobAppStatus and stateUpdate["OK"]:
stateUpdate = stateServer.setJobApplicationStatus(jobID, jobAppStatus, "RMS")
if not stateUpdate["OK"]:
self.log.error(
"finalizeRequest: Failed to set job status",
"JobID: %d, error: %s" % (jobID, stateUpdate["Message"]),
)
return stateUpdate
return S_OK(newJobStatus)
@ignoreEncodeWarning
def getRequestIDsForJobs(self, jobIDs):
"""get the request ids for the supplied jobIDs.
:param self: self reference
:param list jobIDs: list of job IDs (integers)
:return: S_ERROR or S_OK( "Successful": { jobID1: reqID1, jobID2: requID2, ... },
"Failed" : { jobIDn: errMsg, jobIDm: errMsg, ...} )
"""
self.log.verbose("getRequestIDsForJobs: attempt to get request(s) for jobs", "(n=%d)" % len(jobIDs))
res = self._getRPC().getRequestIDsForJobs(jobIDs)
if not res["OK"]:
self.log.error(
"getRequestIDsForJobs: unable to get request(s) for jobs", "%s: %s" % (jobIDs, res["Message"])
)
return res
# Cast the JobIDs back to int
successful = strToIntDict(res["Value"]["Successful"])
failed = strToIntDict(res["Value"]["Failed"])
return S_OK({"Successful": successful, "Failed": failed})
@ignoreEncodeWarning
def readRequestsForJobs(self, jobIDs):
"""read requests for jobs
:param jobIDs: list with jobIDs
:type jobIDs: python:list
:return: S_OK( { "Successful" : { jobID1 : Request, ... },
"Failed" : { jobIDn : "Fail reason" } } )
"""
readReqsForJobs = self._getRPC().readRequestsForJobs(jobIDs)
if not readReqsForJobs["OK"]:
return readReqsForJobs
ret = readReqsForJobs["Value"]
# # create Requests out of JSONs for successful reads
# Do not forget to cast back str keys to int
successful = {int(jobID): Request(jsonReq) for jobID, jsonReq in ret["Successful"].items()}
failed = strToIntDict(ret["Failed"])
return S_OK({"Successful": successful, "Failed": failed})
def resetFailedRequest(self, requestID, allR=False):
"""Reset a failed request to "Waiting" status"""
# # we can safely only peek the request as it is Failed and therefore not owned by an agent
res = self.peekRequest(requestID)
if not res["OK"]:
return res
req = res["Value"]
if allR or recoverableRequest(req):
# Only reset requests that can be recovered
if req.Status != "Failed":
gLogger.notice("Reset NotBefore time, was %s" % str(req.NotBefore))
else:
for i, op in enumerate(req):
op.Error = ""
if op.Status == "Failed":
printOperation((i, op), onlyFailed=True)
for fi in op:
if fi.Status == "Failed":
fi.Attempt = 1
fi.Error = ""
fi.Status = "Waiting"
if op.Status == "Failed":
op.Status = "Waiting"
# Reset also NotBefore
req.NotBefore = datetime.datetime.utcnow().replace(microsecond=0)
return self.putRequest(req)
return S_OK("Not reset")
# ============= Some useful functions to be shared ===========
output = ""
def prettyPrint(mainItem, key="", offset=0):
global output
if key:
key += ": "
blanks = offset * " "
if mainItem and isinstance(mainItem, dict):
output += "%s%s%s\n" % (blanks, key, "{") if blanks or key else ""
for key in sorted(mainItem):
prettyPrint(mainItem[key], key=key, offset=offset)
output += "%s%s\n" % (blanks, "}") if blanks else ""
elif mainItem and isinstance(mainItem, list) or isinstance(mainItem, tuple):
output += "%s%s%s\n" % (blanks, key, "[" if isinstance(mainItem, list) else "(")
for item in mainItem:
prettyPrint(item, offset=offset + 2)
output += "%s%s\n" % (blanks, "]" if isinstance(mainItem, list) else ")")
elif isinstance(mainItem, str):
if "\n" in mainItem:
prettyPrint(mainItem.strip("\n").split("\n"), offset=offset)
else:
output += "%s%s'%s'\n" % (blanks, key, mainItem)
else:
output += "%s%s%s\n" % (blanks, key, str(mainItem))
output = (
output.replace("[\n%s{" % blanks, "[{")
.replace("}\n%s]" % blanks, "}]")
.replace("(\n%s{" % blanks, "({")
.replace("}\n%s)" % blanks, "})")
.replace("(\n%s(" % blanks, "((")
.replace(")\n%s)" % blanks, "))")
.replace("(\n%s[" % blanks, "[")
.replace("]\n%s)" % blanks, "]")
)
def printFTSJobs(request):
"""Prints the FTSJobs associated to a request
:param request: Request object
"""
try:
if request.RequestID:
# We try first the new FTS3 system
from DIRAC.DataManagementSystem.Client.FTS3Client import FTS3Client
fts3Client = FTS3Client()
res = fts3Client.ping()
if res["OK"]:
associatedFTS3Jobs = []
for op in request:
res = fts3Client.getOperationsFromRMSOpID(op.OperationID)
if res["OK"]:
for fts3Op in res["Value"]:
associatedFTS3Jobs.extend(fts3Op.ftsJobs)
if associatedFTS3Jobs:
# Display the direct url and the status
gLogger.always(
"\n\nFTS3 jobs associated: \n%s"
% "\n".join(
"%s/fts3/ftsmon/#/job/%s (%s)"
% (
job.ftsServer.replace(":8446", ":8449"), # Submission port is 8446, web port is 8449
job.ftsGUID,
job.status,
)
for job in associatedFTS3Jobs
)
)
return
# AttributeError can be thrown because the deserialization will not have
# happened correctly on the new fts3 (CC7 typically), and the error is not
# properly propagated
except AttributeError as err:
gLogger.debug("Could not instantiate FtsClient because of Exception", repr(err))
def printRequest(request, status=None, full=False, verbose=True, terse=False):
global output
if full:
output = ""
prettyPrint(json.loads(request.toJSON()["Value"]))
gLogger.always(output)
else:
if not status:
status = request.Status
gLogger.always(
"Request name='%s' ID=%s Status='%s'%s%s%s"
% (
request.RequestName,
request.RequestID if hasattr(request, "RequestID") else "(not set yet)",
request.Status,
" ('%s' in DB)" % status if status != request.Status else "",
(" Error='%s'" % request.Error) if request.Error and request.Error.strip() else "",
(" Job=%s" % request.JobID) if request.JobID else "",
)
)
gLogger.always(
"Created %s, Updated %s%s"
% (
request.CreationTime,
request.LastUpdate,
(", NotBefore %s" % request.NotBefore) if request.NotBefore else "",
)
)
if request.OwnerDN:
gLogger.always("Owner: '%s', Group: %s" % (request.OwnerDN, request.OwnerGroup))
for indexOperation in enumerate(request):
op = indexOperation[1]
if not terse or op.Status == "Failed":
printOperation(indexOperation, verbose, onlyFailed=terse)
if not terse:
printFTSJobs(request)
def printOperation(indexOperation, verbose=True, onlyFailed=False):
global output
i, op = indexOperation
prStr = ""
if op.SourceSE:
prStr += "SourceSE: %s" % op.SourceSE
if op.TargetSE:
prStr += (" - " if prStr else "") + "TargetSE: %s" % op.TargetSE
if prStr:
prStr += " - "
prStr += "Created %s, Updated %s" % (op.CreationTime, op.LastUpdate)
if op.Type == "ForwardDISET" and op.Arguments:
from DIRAC.Core.Utilities import DEncode
decode, _length = DEncode.decode(op.Arguments)
if verbose:
output = ""
prettyPrint(decode, offset=10)
prStr += "\n Arguments:\n" + output.strip("\n")
else:
prStr += "\n Service: %s" % decode[0][0]
gLogger.always(
" [%s] Operation Type='%s' ID=%s Order=%s Status='%s'%s%s"
% (
i,
op.Type,
op.OperationID if hasattr(op, "OperationID") else "(not set yet)",
op.Order,
op.Status,
(" Error='%s'" % op.Error) if op.Error and op.Error.strip() else "",
(" Catalog=%s" % op.Catalog) if op.Catalog else "",
)
)
if prStr:
gLogger.always(" %s" % prStr)
for indexFile in enumerate(op):
if not onlyFailed or indexFile[1].Status == "Failed":
printFile(indexFile)
def printFile(indexFile):
ind, fi = indexFile
gLogger.always(
" [%02d] ID=%s LFN='%s' Status='%s'%s%s%s"
% (
ind + 1,
fi.FileID if hasattr(fi, "FileID") else "(not set yet)",
fi.LFN,
fi.Status,
(" Checksum='%s'" % fi.Checksum) if fi.Checksum or (fi.Error and "checksum" in fi.Error.lower()) else "",
(" Error='%s'" % fi.Error) if fi.Error and fi.Error.strip() else "",
(" Attempts=%d" % fi.Attempt) if fi.Attempt > 1 else "",
)
)
def recoverableRequest(request):
excludedErrors = (
"File does not exist",
"No such file or directory",
"sourceSURL equals to targetSURL",
"Max attempts limit reached",
"Max attempts reached",
)
operationErrorsOK = ("is banned for", "Failed to perform exists from any catalog")
for op in request:
if op.Status == "Failed" and (
not op.Error or not [errStr for errStr in operationErrorsOK if errStr in op.Error]
):
for fi in op:
if fi.Status == "Failed":
if [errStr for errStr in excludedErrors if errStr in fi.Error]:
return False
return True
return True
|
DIRACGrid/DIRAC
|
src/DIRAC/RequestManagementSystem/Client/ReqClient.py
|
Python
|
gpl-3.0
| 28,083 | 0.002635 |
#!/usr/bin/python3
"""
Given a function rand7 which generates a uniform random integer in the range 1
to 7, write a function rand10 which generates a uniform random integer in the
range 1 to 10.
Do NOT use system's Math.random().
"""
# The rand7() API is already defined for you.
def rand7():
return 0
class Solution:
def rand10(self):
"""
generate 7 twice, (rv1, rv2), 49 combination
assign 40 combinations for the 1 to 10 respectively
7-ary system
:rtype: int
"""
while True:
rv1 = rand7()
rv2 = rand7()
s = (rv1 - 1) * 7 + (rv2 - 1) # make it start from 0
if s < 40: # s \in [0, 40)
return s % 10 + 1 # since I make it start from 0
|
algorhythms/LeetCode
|
470 Implement Rand10() Using Rand7().py
|
Python
|
mit
| 773 | 0 |
# author David Sanchez david.sanchez@lapp.in2p3.fr
# ------ Imports --------------- #
import numpy
from Plot.PlotLibrary import *
from Catalog.ReadFermiCatalog import *
from environ import FERMI_CATALOG_DIR
# ------------------------------ #
#look for this 2FGL source
source = "2FGL J1015.1+4925"
#source = "1FHL J2158.8-3013"
#source = "3FGL J2158.8-3013"
Cat = FermiCatalogReader(source,FERMI_CATALOG_DIR,"e2dnde","TeV")
#print some information
print "2FGL association ",Cat.Association('3FGL')
print "3FGL Name ",Cat.Association('2FHL','3FGL_name')
print "3FGL Var Index ",Cat.GetVarIndex("3FGL")
#create a spectrum for a given catalog and compute the model+butterfly
Cat.MakeSpectrum("3FGL",1e-4,0.3)
enerbut,but,enerphi,phi = Cat.Plot("3FGL")
Cat.MakeSpectrum("2FGL",1e-4,0.3)
enerbut2FGL,but2FGL,enerphi2FGL,phi2FGL = Cat.Plot("2FGL")
Cat.MakeSpectrum("2FHL",5e-2,2)
enerbut2FHL,but2FHL,enerphi2FHL,phi2FHL = Cat.Plot("2FHL")
# read DATA Point
em,ep,flux,dflux = Cat.GetDataPoints('3FGL') #energy in TeV since the user ask for that in the call of Cat
ener = numpy.sqrt(em*ep)
dem = ener-em
dep = ep-ener
c=Cat.ReadPL('3FGL')[3]
dnde = (-c+1)*flux*numpy.power(ener*1e6,-c+2)/(numpy.power((ep*1e6),-c+1)-numpy.power((em*1e6),-c+1))*1.6e-6
ddnde = dnde*dflux/flux
#plot
import matplotlib.pyplot as plt
plt.loglog()
plt.plot(enerbut, but, 'b-',label = "3FGL")
plt.plot(enerphi,phi, 'b-')
plt.plot(enerbut2FGL,but2FGL,'g-',label = "2FGL")
plt.plot(enerphi2FGL,phi2FGL,'g-')
plt.plot(enerbut2FHL,but2FHL,'r-',label = "2FHL")
plt.plot(enerphi2FHL,phi2FHL,'r-')
plt.errorbar(ener, dnde, xerr= [dem,dep], yerr = ddnde,fmt='o')
plt.legend(loc = 3)
plt.ylabel('E2dN/dE(erg.cm-2.s-1)')
plt.xlabel('energy (TeV)')
plt.show()
|
qpiel/python_estimation_source
|
Example/ExReadFermiCatalog.py
|
Python
|
gpl-3.0
| 1,737 | 0.039724 |
#!/usr/bin/env python
#! -O
#
# python equivalent for grouplisten, works same way
#
#EIBD client library
#Copyright (C) 2006 Tony Przygienda, Z2 GmbH
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#In addition to the permissions in the GNU General Public License,
#you may link the compiled version of this file into combinations
#with other programs, and distribute those combinations without any
#restriction coming from the use of this file. (The General Public
#License restrictions do apply in other respects; for example, they
#cover modification of the file, and distribution when not linked into
#a combine executable.)
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import os
import getopt
import eibclient.eibclient
from eibclient.common import *
if __name__ == "__main__":
if len(sys.argv) != 3:
print "usage: %s url <group address>" % sys.argv[0];
sys.exit(1);
try:
con = eibclient.eibclient.EIBSocketURL (sys.argv[1])
except (Exception), e:
print e
for i in sys.argv[2:]:
dest = readgaddr (i)
if (eibclient.eibclient.EIBOpenT_Group (con, dest, 0) == -1):
print "Connect failed"
sys.exit(1);
if con == None:
print "Open failed";
sys.exit(1);
while 1:
(result, buf, src) = eibclient.eibclient.EIBGetAPDU_Src (con)
if len(buf) < 2:
print "Read failed"
sys.exit(1)
if (ord(buf[0]) & 0x3 or (ord(buf[1]) & 0xc0) == 0xc0):
print"Unknown APDU from %s" % individual2string(src)
ps = ""
if (ord(buf[1]) & 0xC0) == 0:
ps = ps + "Read"
elif (ord(buf[1]) & 0xC0) == 0x40:
ps = ps + "Response"
elif (ord(buf[1]) & 0xC0) == 0x80:
ps = ps + "Write"
else:
ps = ps + "???"
ps = ps + " from "
ps = ps + individual2string (src);
if (ord(buf[1]) & 0xC0):
ps = ps + ": "
if result == 2:
ps = ps + ( "%02X" % (ord(buf[1]) & 0x3F) )
else:
printHex (len - 2, buf + 2);
print ps;
eibclient.EIBClose (con)
|
Makki1/old-svn
|
avr/sketchbook/GiraRM_Debug/freebus/freebus_ets/software/freebus-ets/eibnet/grouplisten.py
|
Python
|
gpl-3.0
| 2,687 | 0.040194 |
#!/usr/bin/python2.4
#
# Copyright 2009 Empeeric LTD. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils import simplejson
import urllib,urllib2
import urlparse
import string
BITLY_BASE_URL = "http://api.bit.ly/"
BITLY_API_VERSION = "2.0.1"
VERBS_PARAM = {
'shorten':'longUrl',
'expand':'shortUrl',
'info':'shortUrl',
'stats':'shortUrl',
'errors':'',
}
class BitlyError(Exception):
'''Base class for bitly errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Api():
""" API class for bit.ly """
def __init__(self, login, apikey):
self.login = login
self.apikey = apikey
self._urllib = urllib2
def shorten(self,longURL):
"""
Takes either:
A long URL string and returns shortened URL string
Or a list of long URL strings and returnes a list of shortened URL strings.
"""
if not isinstance(longURL, list):
longURL = [longURL]
for index,url in enumerate(longURL):
if not url.startswith("http"):
longURL[index] = "http://" + url
request = self._getURL("shorten",longURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
res = []
for item in json['results'].values():
if item['shortKeywordUrl'] == "":
res.append(item['shortUrl'])
else:
res.append(item['shortKeywordUrl'])
if len(res) == 1:
return res[0]
else:
return res
def expand(self,shortURL):
""" Given a bit.ly url or hash, return long source url """
request = self._getURL("expand",shortURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return json['results'][string.split(shortURL, '/')[-1]]['longUrl']
def info(self,shortURL):
"""
Given a bit.ly url or hash,
return information about that page,
such as the long source url
"""
request = self._getURL("info",shortURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return json['results'][string.split(shortURL, '/')[-1]]
def stats(self,shortURL):
""" Given a bit.ly url or hash, return traffic and referrer data. """
request = self._getURL("stats",shortURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return Stats.NewFromJsonDict(json['results'])
def errors(self):
""" Get a list of bit.ly API error codes. """
request = self._getURL("errors","")
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return json['results']
def setUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib: an instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def _getURL(self,verb,paramVal):
if not isinstance(paramVal, list):
paramVal = [paramVal]
params = [
('version',BITLY_API_VERSION),
('format','json'),
('login',self.login),
('apiKey',self.apikey),
]
verbParam = VERBS_PARAM[verb]
if verbParam:
for val in paramVal:
params.append(( verbParam,val ))
encoded_params = urllib.urlencode(params)
return "%s%s?%s" % (BITLY_BASE_URL,verb,encoded_params)
def _fetchUrl(self,url):
'''Fetch a URL
Args:
url: The URL to retrieve
Returns:
A string containing the body of the response.
'''
# Open and return the URL
url_data = self._urllib.urlopen(url).read()
return url_data
def _CheckForError(self, data):
"""Raises a BitlyError if bitly returns an error message.
Args:
data: A python dict created from the bitly json response
Raises:
BitlyError wrapping the bitly error message if one exists.
"""
# bitly errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'ERROR' in data or data['statusCode'] == 'ERROR':
raise BitlyError, data['errorMessage']
for key in data['results']:
if type(data['results']) is dict and type(data['results'][key]) is dict:
if 'statusCode' in data['results'][key] and data['results'][key]['statusCode'] == 'ERROR':
raise BitlyError, data['results'][key]['errorMessage']
class Stats(object):
'''A class representing the Statistics returned by the bitly api.
The Stats structure exposes the following properties:
status.user_clicks # read only
status.clicks # read only
'''
def __init__(self,user_clicks=None,total_clicks=None):
self.user_clicks = user_clicks
self.total_clicks = total_clicks
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the bitly API
Returns:
A bitly.Stats instance
'''
return Stats(user_clicks=data.get('userClicks', None),
total_clicks=data.get('clicks', None))
if __name__ == '__main__':
testURL1="www.yahoo.com"
testURL2="www.cnn.com"
a=Api(login="pythonbitly",apikey="R_06871db6b7fd31a4242709acaf1b6648")
short=a.shorten(testURL1)
print "Short URL = %s" % short
urlList=[testURL1,testURL2]
shortList=a.shorten(urlList)
print "Short URL list = %s" % shortList
long=a.expand(short)
print "Expanded URL = %s" % long
info=a.info(short)
print "Info: %s" % info
stats=a.stats(short)
print "User clicks %s, total clicks: %s" % (stats.user_clicks,stats.total_clicks)
errors=a.errors()
print "Errors: %s" % errors
|
poeks/twitterbelle
|
lib/bitly.py
|
Python
|
apache-2.0
| 7,225 | 0.01301 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUEgressAuditACLEntryTemplatesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUEgressAuditACLTemplate(NURESTObject):
""" Represents a EgressAuditACLTemplate in the VSD
Notes:
An egress audit policy is a set of rules defining how network traffic is monitored and mirrored from a domain for Audit purposes
"""
__rest_name__ = "egressauditacltemplate"
__resource_name__ = "egressauditacltemplates"
## Constants
CONST_POLICY_STATE_DRAFT = "DRAFT"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PRIORITY_TYPE_TOP_AUDIT = "TOP_AUDIT"
CONST_POLICY_STATE_LIVE = "LIVE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a EgressAuditACLTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> egressauditacltemplate = NUEgressAuditACLTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'EgressAuditACLTemplate')
>>> egressauditacltemplate = NUEgressAuditACLTemplate(data=my_dict)
"""
super(NUEgressAuditACLTemplate, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._default_install_acl_implicit_rules = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._policy_state = None
self._creation_date = None
self._priority = None
self._priority_type = None
self._associated_live_entity_id = None
self._associated_virtual_firewall_policy_id = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_install_acl_implicit_rules", remote_name="defaultInstallACLImplicitRules", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=True)
self.expose_attribute(local_name="priority_type", remote_name="priorityType", attribute_type=str, is_required=False, is_unique=True, choices=[u'TOP_AUDIT'])
self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_virtual_firewall_policy_id", remote_name="associatedVirtualFirewallPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.egress_audit_acl_entry_templates = NUEgressAuditACLEntryTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name of the entity
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the entity
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def active(self):
""" Get active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
return self._active
@active.setter
def active(self, value):
""" Set active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
self._active = value
@property
def default_allow_ip(self):
""" Get default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
""" Set default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
""" Get default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
""" Set default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
self._default_allow_non_ip = value
@property
def default_install_acl_implicit_rules(self):
""" Get default_install_acl_implicit_rules value.
Notes:
If enabled, implicit rule will allow intra domain traffic by default
This attribute is named `defaultInstallACLImplicitRules` in VSD API.
"""
return self._default_install_acl_implicit_rules
@default_install_acl_implicit_rules.setter
def default_install_acl_implicit_rules(self, value):
""" Set default_install_acl_implicit_rules value.
Notes:
If enabled, implicit rule will allow intra domain traffic by default
This attribute is named `defaultInstallACLImplicitRules` in VSD API.
"""
self._default_install_acl_implicit_rules = value
@property
def description(self):
""" Get description value.
Notes:
A description of the entity
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the entity
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def policy_state(self):
""" Get policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
return self._policy_state
@policy_state.setter
def policy_state(self, value):
""" Set policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
self._policy_state = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
self._priority = value
@property
def priority_type(self):
""" Get priority_type value.
Notes:
Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack
This attribute is named `priorityType` in VSD API.
"""
return self._priority_type
@priority_type.setter
def priority_type(self, value):
""" Set priority_type value.
Notes:
Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack
This attribute is named `priorityType` in VSD API.
"""
self._priority_type = value
@property
def associated_live_entity_id(self):
""" Get associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
return self._associated_live_entity_id
@associated_live_entity_id.setter
def associated_live_entity_id(self, value):
""" Set associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
self._associated_live_entity_id = value
@property
def associated_virtual_firewall_policy_id(self):
""" Get associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
return self._associated_virtual_firewall_policy_id
@associated_virtual_firewall_policy_id.setter
def associated_virtual_firewall_policy_id(self, value):
""" Set associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
self._associated_virtual_firewall_policy_id = value
@property
def auto_generate_priority(self):
""" Get auto_generate_priority value.
Notes:
This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries.
This attribute is named `autoGeneratePriority` in VSD API.
"""
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
""" Set auto_generate_priority value.
Notes:
This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries.
This attribute is named `autoGeneratePriority` in VSD API.
"""
self._auto_generate_priority = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
nuagenetworks/vspk-python
|
vspk/v6/nuegressauditacltemplate.py
|
Python
|
bsd-3-clause
| 21,660 | 0.008726 |
# redis-import-set
import sys
from csv import reader
from itertools import count, groupby, islice
import redis
if __name__ == '__main__':
r = redis.Redis()
pipeline_redis = r.pipeline()
count = 0
try:
keyname = sys.argv[1]
except IndexError:
raise Exception("You must specify the name for the Set")
for k, _ in groupby(reader(sys.stdin, delimiter='\t'),
lambda x:x[0]):
pipeline_redis.sadd(keyname, k)
count += 1
if not count % 10000:
pipeline_redis.execute()
|
unbracketed/RedLine
|
redline/examples/redis-import-set_groupby.py
|
Python
|
mit
| 554 | 0.018051 |
from textwrap import dedent
import inspect
from collections import OrderedDict
from clusterjob import JobScript
import pytest
import logging
try:
from ConfigParser import Error as ConfigParserError
except ImportError:
from configparser import Error as ConfigParserError
# built-in fixtures: tmpdir
# pytest-capturelog fixutres: caplog
def get_methods(obj):
"""Get list of methods of object or class"""
return sorted([k for (k, v) in inspect.getmembers(obj, inspect.isroutine)])
# isroutine works in Python 2 and Python 3, while ismethod does not work in
# Python 3 if obj is a class (since the methods are not *bound*)
def get_attributes(obj, hidden=False):
"""Get list of attributes of object"""
methods = get_methods(obj)
attribs = sorted([k for k in obj.__dict__ if k not in methods])
if hidden:
return attribs
else:
return [attr for attr in attribs if not attr.startswith('_')]
def default_class_attr_val(attr):
"""Return the default value for the given class attribute"""
defaults = JobScript._attributes.copy()
defaults.update(JobScript._protected_attributes)
try:
return defaults[attr]
except KeyError:
if attr == 'resources':
return OrderedDict()
else:
return None
def check_attributes(obj, expected):
for key in expected:
assert getattr(obj, key) == expected[key]
def check_resources(obj, expected):
for key in expected:
assert obj.resources[key] == expected[key]
def example_inidata():
inidata = dedent(r'''
[Attributes]
remote = login.cluster.edu
backend = pbs
shell = /bin/sh
cache_folder = cache
prologue =
ssh {remote} 'mkdir -p {rootdir}/{workdir}'
rsync -av {workdir}/ {remote}:{rootdir}/{workdir}
epilogue = rsync -av {remote}:{rootdir}/{workdir}/ {workdir}
rootdir = ~/jobs/
workdir = run001
max_sleep_interval = 60
# the following is a new attribute
text = Hello World
[Resources]
queue = exec
nodes = 1
threads = 12
mem = 10000
''')
expected_attribs = {
'remote': 'login.cluster.edu',
'backend': 'pbs',
'shell': '/bin/sh',
'prologue' : "ssh {remote} 'mkdir -p {rootdir}/{workdir}'\n"
"rsync -av {workdir}/ {remote}:{rootdir}/{workdir}",
'epilogue': "rsync -av {remote}:{rootdir}/{workdir}/ {workdir}",
'rootdir': '~/jobs',
'workdir': 'run001',
'max_sleep_interval': 60,
'text': "Hello World"
}
expected_resources = {
'queue': 'exec',
'nodes': 1,
'threads': 12,
'mem': 10000,
}
return inidata, expected_attribs, expected_resources
def test_read_inifile(tmpdir):
p = tmpdir.join("default.ini")
ini_filename = str(p)
attribs = {}
resources = {}
def attr_setter(k,v):
attribs[k] = v
def rsrc_setter(k,v):
resources[k] = v
inidata = ''
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "must contain at least one of the sections" in str(exc_info.value)
inidata = dedent(r'''
max_sleep_interval = 60
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "File contains no section headers" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
max_sleep_interval = 60
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
inidata = dedent(r'''
[Resources]
threads = 2
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert resources['threads'] == 2
inidata = dedent(r'''
[Attributes]
shell = /bin/bash
[Resources]
nodes = 1
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert attribs['shell'] == '/bin/bash'
assert resources['threads'] == 2
assert resources['nodes'] == 1
# both section headers and keys are case sensitive
inidata = dedent(r'''
[Attributes]
Max_Sleep_Interval = 120
Shell = /bin/bash
[Resources]
Nodes = 1
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert attribs['Max_Sleep_Interval'] == '120' # no conversion to int!
inidata = dedent(r'''
[Attributes]
shell = /bin/bash
[Resources]
nodes = 1
[Schedulers]
cluster = login.cluster.com
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "Invalid section 'Schedulers'" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
resources = {1:2}
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "not allowed" in str(exc_info.value)
# quotes are not stripped out!
inidata = dedent(r'''
[Attributes]
text = "This is a text"
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['text'] == '"This is a text"'
def test_read_defaults(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo 'Hello'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
assert get_attributes(jobscript.__class__) == ['backend', 'backends',
'cache_folder', 'cache_prefix', 'epilogue', 'filename',
'max_sleep_interval', 'prologue', 'remote', 'resources', 'rootdir',
'scp', 'shell', 'ssh', 'workdir']
for attr in get_attributes(jobscript.__class__):
if attr not in ['resources', 'backends']:
assert getattr(jobscript, attr) == default_class_attr_val(attr)
inidata, expected_attribs, expected_resources = example_inidata()
p = tmpdir.join("default.ini")
p.write(inidata)
ini_filename = str(p)
# Setting class defaults before instantiation sets both the attributes and
# the resources
JobScript.read_defaults(ini_filename)
jobscript = JobScript(body="echo '{text}'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
check_attributes(jobscript, expected_attribs)
check_resources(jobscript, expected_resources)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -l nodes=1:ppn=12
#PBS -q exec
#PBS -l mem=10000m
#PBS -N test
echo 'Hello World'
''').strip()
# calling read_defaults without filename argument resets the class, and
# thus also changes the attributes of an existing instance
JobScript.read_defaults()
check_resources(jobscript, expected_resources)
for attr in get_attributes(jobscript.__class__):
if attr not in ['resources', 'backends']:
assert getattr(jobscript, attr) == default_class_attr_val(attr)
with pytest.raises(KeyError) as exc_info:
str(jobscript)
assert "no matching attribute or resource entry" in str(exc_info.value)
jobscript.text = 'Hello World' # instance attribute
assert str(jobscript) == dedent(r'''
#!/bin/bash
#SBATCH --partition=exec
#SBATCH --nodes=1
#SBATCH --cpus-per-task=12
#SBATCH --mem=10000
#SBATCH --job-name=test
echo 'Hello World'
''').strip()
# Setting class defaults after instantiation sets the attributes, but not
# the resources
jobscript = JobScript(body="echo '{text}'", jobname="test")
JobScript.read_defaults(ini_filename)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -N test
echo 'Hello World'
''').strip()
def test_read_settings(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo '{text}'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
jobscript2 = JobScript(body="echo 'Hello'", jobname="test2")
inidata, expected_attribs, expected_resources = example_inidata()
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(AttributeError) as excinfo:
jobscript.read_settings(ini_filename)
assert "'cache_folder' can only be set as a class attribute" \
in str(excinfo.value)
inidata = inidata.replace("cache_folder = cache\n", "")
p.write(inidata)
jobscript.read_settings(ini_filename)
assert get_attributes(jobscript) == ['aux_scripts', 'backend', 'body',
'epilogue', 'max_sleep_interval', 'prologue', 'remote',
'resources', 'rootdir', 'shell', 'text', 'workdir']
# class attributes remain unaffected
for attr in get_attributes(JobScript):
if attr not in ['resources', 'backends']:
assert getattr(JobScript, attr) == default_class_attr_val(attr)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -l nodes=1:ppn=12
#PBS -N test
#PBS -q exec
#PBS -l mem=10000m
echo 'Hello World'
''').strip()
# the second jobscript is unaffected
assert str(jobscript2) == dedent(r'''
#!/bin/bash
#SBATCH --job-name=test2
echo 'Hello'
''').strip()
def test_read_invalid_attribute(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo '{text}'", jobname="test")
inidata = dedent(r'''
[Attributes]
_remote = login.cluster.edu
''')
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(ConfigParserError) as exc_info:
jobscript.read_settings(ini_filename)
assert "Key '_remote' is invalid" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
key with spaces = bla
''')
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(ConfigParserError) as exc_info:
jobscript.read_settings(ini_filename)
assert "Key 'key with spaces' is invalid" in str(exc_info.value)
|
goerz/clusterjob
|
tests/test_inifile.py
|
Python
|
mit
| 10,660 | 0.001689 |
#!/usr/bin/python
# ZetCode PyGTK tutorial
#
# This example shows how to use
# the Alignment widget
#
# author: jan bodnar
# website: zetcode.com
# last edited: February 2009
import gtk
import gobject
class PyApp(gtk.Window):
def __init__(self):
super(PyApp, self).__init__()
self.set_title("Alignment")
self.set_size_request(260, 150)
self.set_position(gtk.WIN_POS_CENTER)
vbox = gtk.VBox(False, 5)
hbox = gtk.HBox(True, 3)
valign = gtk.Alignment(0, 1, 0, 0)
vbox.pack_start(valign)
ok = gtk.Button("OK")
ok.set_size_request(70, 30)
close = gtk.Button("Close")
hbox.add(ok)
hbox.add(close)
halign = gtk.Alignment(1, 0, 0, 0)
halign.add(hbox)
vbox.pack_start(halign, False, False, 3)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
PyApp()
gtk.main()
|
HPPTECH/hpp_IOSTressTest
|
Refer/Alignment.py
|
Python
|
mit
| 995 | 0.009045 |
__all__ = ["machsuite", "shoc", "datatypes", "params"]
|
andrewfu0325/gem5-aladdin
|
sweeps/benchmarks/__init__.py
|
Python
|
bsd-3-clause
| 55 | 0 |
# lint-amnesty, pylint: disable=missing-module-docstring
import logging
from functools import partial
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseBadRequest
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.clickjacking import xframe_options_exempt
from opaque_keys.edx.keys import UsageKey
from web_fragments.fragment import Fragment
from xblock.django.request import django_to_webob_request, webob_to_django_response
from xblock.exceptions import NoSuchHandlerError
from xblock.runtime import KvsFieldData
from cms.djangoapps.xblock_config.models import StudioConfig
from cms.lib.xblock.field_data import CmsFieldData
from common.djangoapps import static_replace
from common.djangoapps.edxmako.shortcuts import render_to_string
from common.djangoapps.xblock_django.user_service import DjangoXBlockUserService
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from openedx.core.lib.license import wrap_with_license
from openedx.core.lib.xblock_utils import (
replace_static_urls,
request_token,
wrap_fragment,
wrap_xblock,
wrap_xblock_aside,
xblock_local_resource_url
)
from xmodule.contentstore.django import contentstore
from xmodule.error_module import ErrorBlock
from xmodule.exceptions import NotFoundError, ProcessingError
from xmodule.modulestore.django import ModuleI18nService, modulestore
from xmodule.partitions.partitions_service import PartitionService
from xmodule.services import SettingsService
from xmodule.studio_editable import has_author_view
from xmodule.util.sandboxing import can_execute_unsafe_code, get_python_lib_zip
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.x_module import AUTHOR_VIEW, PREVIEW_VIEWS, STUDENT_VIEW, ModuleSystem, XModule, XModuleDescriptor
from ..utils import get_visibility_partition_info
from .access import get_user_role
from .helpers import render_from_lms
from .session_kv_store import SessionKeyValueStore
__all__ = ['preview_handler']
log = logging.getLogger(__name__)
@login_required
@xframe_options_exempt
def preview_handler(request, usage_key_string, handler, suffix=''):
"""
Dispatch an AJAX action to an xblock
usage_key_string: The usage_key_string-id of the block to dispatch to, passed through `quote_slashes`
handler: The handler to execute
suffix: The remainder of the url to be passed to the handler
"""
usage_key = UsageKey.from_string(usage_key_string)
descriptor = modulestore().get_item(usage_key)
instance = _load_preview_module(request, descriptor)
# Let the module handle the AJAX
req = django_to_webob_request(request)
try:
resp = instance.handle(handler, req, suffix)
except NoSuchHandlerError:
log.exception("XBlock %s attempted to access missing handler %r", instance, handler)
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
except NotFoundError:
log.exception("Module indicating to user that request doesn't exist")
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
except ProcessingError:
log.warning("Module raised an error while processing AJAX request",
exc_info=True)
return HttpResponseBadRequest()
except Exception:
log.exception("error processing ajax call")
raise
return webob_to_django_response(resp)
class PreviewModuleSystem(ModuleSystem): # pylint: disable=abstract-method
"""
An XModule ModuleSystem for use in Studio previews
"""
# xmodules can check for this attribute during rendering to determine if
# they are being rendered for preview (i.e. in Studio)
is_author_mode = True
def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):
return reverse('preview_handler', kwargs={
'usage_key_string': str(block.scope_ids.usage_id),
'handler': handler_name,
'suffix': suffix,
}) + '?' + query
def local_resource_url(self, block, uri):
return xblock_local_resource_url(block, uri)
def applicable_aside_types(self, block):
"""
Remove acid_aside and honor the config record
"""
if not StudioConfig.asides_enabled(block.scope_ids.block_type):
return []
# TODO: aside_type != 'acid_aside' check should be removed once AcidBlock is only installed during tests
# (see https://openedx.atlassian.net/browse/TE-811)
return [
aside_type
for aside_type in super().applicable_aside_types(block)
if aside_type != 'acid_aside'
]
def render_child_placeholder(self, block, view_name, context):
"""
Renders a placeholder XBlock.
"""
return self.wrap_xblock(block, view_name, Fragment(), context)
def layout_asides(self, block, context, frag, view_name, aside_frag_fns):
position_for_asides = '<!-- footer for xblock_aside -->'
result = Fragment()
result.add_fragment_resources(frag)
for aside, aside_fn in aside_frag_fns:
aside_frag = aside_fn(block, context)
if aside_frag.content != '':
aside_frag_wrapped = self.wrap_aside(block, aside, view_name, aside_frag, context)
aside.save()
result.add_fragment_resources(aside_frag_wrapped)
replacement = position_for_asides + aside_frag_wrapped.content
frag.content = frag.content.replace(position_for_asides, replacement)
result.add_content(frag.content)
return result
def _preview_module_system(request, descriptor, field_data):
"""
Returns a ModuleSystem for the specified descriptor that is specialized for
rendering module previews.
request: The active django request
descriptor: An XModuleDescriptor
"""
course_id = descriptor.location.course_key
display_name_only = (descriptor.category == 'static_tab')
wrappers = [
# This wrapper wraps the module in the template specified above
partial(
wrap_xblock,
'PreviewRuntime',
display_name_only=display_name_only,
usage_id_serializer=str,
request_token=request_token(request)
),
# This wrapper replaces urls in the output that start with /static
# with the correct course-specific url for the static content
partial(replace_static_urls, None, course_id=course_id),
_studio_wrap_xblock,
]
wrappers_asides = [
partial(
wrap_xblock_aside,
'PreviewRuntime',
usage_id_serializer=str,
request_token=request_token(request)
)
]
if settings.FEATURES.get("LICENSING", False):
# stick the license wrapper in front
wrappers.insert(0, wrap_with_license)
return PreviewModuleSystem(
static_url=settings.STATIC_URL,
# TODO (cpennington): Do we want to track how instructors are using the preview problems?
track_function=lambda event_type, event: None,
filestore=descriptor.runtime.resources_fs,
get_module=partial(_load_preview_module, request),
render_template=render_from_lms,
debug=True,
replace_urls=partial(static_replace.replace_static_urls, data_directory=None, course_id=course_id),
user=request.user,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)),
mixins=settings.XBLOCK_MIXINS,
course_id=course_id,
anonymous_student_id='student',
# Set up functions to modify the fragment produced by student_view
wrappers=wrappers,
wrappers_asides=wrappers_asides,
error_descriptor_class=ErrorBlock,
get_user_role=lambda: get_user_role(request.user, course_id),
# Get the raw DescriptorSystem, not the CombinedSystem
descriptor_runtime=descriptor._runtime, # pylint: disable=protected-access
services={
"field-data": field_data,
"i18n": ModuleI18nService,
"settings": SettingsService(),
"user": DjangoXBlockUserService(request.user),
"partitions": StudioPartitionService(course_id=course_id)
},
)
class StudioPartitionService(PartitionService):
"""
A runtime mixin to allow the display and editing of component visibility based on user partitions.
"""
def get_user_group_id_for_partition(self, user, user_partition_id):
"""
Override this method to return None, as the split_test_module calls this
to determine which group a user should see, but is robust to getting a return
value of None meaning that all groups should be shown.
"""
return None
def _load_preview_module(request, descriptor):
"""
Return a preview XModule instantiated from the supplied descriptor. Will use mutable fields
if XModule supports an author_view. Otherwise, will use immutable fields and student_view.
request: The active django request
descriptor: An XModuleDescriptor
"""
student_data = KvsFieldData(SessionKeyValueStore(request))
if has_author_view(descriptor):
wrapper = partial(CmsFieldData, student_data=student_data)
else:
wrapper = partial(LmsFieldData, student_data=student_data)
# wrap the _field_data upfront to pass to _preview_module_system
wrapped_field_data = wrapper(descriptor._field_data) # pylint: disable=protected-access
preview_runtime = _preview_module_system(request, descriptor, wrapped_field_data)
descriptor.bind_for_student(
preview_runtime,
request.user.id,
[wrapper]
)
return descriptor
def _is_xblock_reorderable(xblock, context):
"""
Returns true if the specified xblock is in the set of reorderable xblocks
otherwise returns false.
"""
try:
return xblock.location in context['reorderable_items']
except KeyError:
return False
# pylint: disable=unused-argument
def _studio_wrap_xblock(xblock, view, frag, context, display_name_only=False):
"""
Wraps the results of rendering an XBlock view in a div which adds a header and Studio action buttons.
"""
# Only add the Studio wrapper when on the container page. The "Pages" page will remain as is for now.
if not context.get('is_pages_view', None) and view in PREVIEW_VIEWS:
root_xblock = context.get('root_xblock')
is_root = root_xblock and xblock.location == root_xblock.location
is_reorderable = _is_xblock_reorderable(xblock, context)
selected_groups_label = get_visibility_partition_info(xblock)['selected_groups_label']
if selected_groups_label:
selected_groups_label = _('Access restricted to: {list_of_groups}').format(list_of_groups=selected_groups_label) # lint-amnesty, pylint: disable=line-too-long
course = modulestore().get_course(xblock.location.course_key)
template_context = {
'xblock_context': context,
'xblock': xblock,
'show_preview': context.get('show_preview', True),
'content': frag.content,
'is_root': is_root,
'is_reorderable': is_reorderable,
'can_edit': context.get('can_edit', True),
'can_edit_visibility': context.get('can_edit_visibility', xblock.scope_ids.usage_id.context_key.is_course),
'selected_groups_label': selected_groups_label,
'can_add': context.get('can_add', True),
'can_move': context.get('can_move', xblock.scope_ids.usage_id.context_key.is_course),
'language': getattr(course, 'language', None)
}
if isinstance(xblock, (XModule, XModuleDescriptor)):
# Add the webpackified asset tags
class_name = getattr(xblock.__class__, 'unmixed_class', xblock.__class__).__name__
add_webpack_to_fragment(frag, class_name)
add_webpack_to_fragment(frag, "js/factories/xblock_validation")
html = render_to_string('studio_xblock_wrapper.html', template_context)
frag = wrap_fragment(frag, html)
return frag
def get_preview_fragment(request, descriptor, context):
"""
Returns the HTML returned by the XModule's student_view or author_view (if available),
specified by the descriptor and idx.
"""
module = _load_preview_module(request, descriptor)
preview_view = AUTHOR_VIEW if has_author_view(module) else STUDENT_VIEW
try:
fragment = module.render(preview_view, context)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to render %s for %r", preview_view, module, exc_info=True)
fragment = Fragment(render_to_string('html_error.html', {'message': str(exc)}))
return fragment
|
eduNEXT/edunext-platform
|
cms/djangoapps/contentstore/views/preview.py
|
Python
|
agpl-3.0
| 13,137 | 0.00236 |
from django.db import models
from lxml import html
import requests
from ..core.models import UUIDModel
from ..teams.models import FleaOwner
STAT_VARS = [ # Should be ordered accordingly
'stat_fgpct100', 'stat_ftpct100', 'stat_3pt', 'stat_reb',
'stat_stl', 'stat_blk', 'stat_ast', 'stat_to', 'stat_pts',
]
class RedditLeague(UUIDModel):
name = models.CharField(max_length=128)
url = models.URLField(unique=True)
last_updated = models.DateTimeField(auto_now=True)
class RedditLeagueDivision(UUIDModel):
name = models.CharField(max_length=128)
url = models.URLField(blank=True, null=True)
league = models.ForeignKey('RedditLeague', related_name='divisions')
last_updated = models.DateTimeField(auto_now=True)
class FleaLeague(UUIDModel):
name = models.CharField(max_length=128)
url = models.URLField(unique=True, null=False, blank=False)
division = models.ForeignKey('RedditLeagueDivision', related_name='leagues')
last_updated = models.DateTimeField(auto_now=True)
def fetch_league(self):
flea = requests.get(self.url)
assert flea.ok
tree = html.fromstring(flea.content) # MOVE THIS for memoization to work
self.name = tree.xpath('//*[@id="top-bar"]/ul/li[2]/text()')[0]
# Fetch teams
teams = tree.xpath('//*[@id="table_0"]/tr')
for team in teams:
team_name = team.xpath('td/div[contains(@class, "league-name")]/a/text()')[0]
team_url = team.xpath('td/div[contains(@class, "league-name")]/a/@href')[0]
if team.xpath('td/a/text()') == ['Take Over']:
# Team is not owned
team_owner_name = ''
team_owner_url = team.xpath('td/a/@href')[0]
team_owner_active = False
else:
team_owner_name = team.xpath('td/span/a[contains(@class, "user-name")]/text()')[0]
team_owner_url = team.xpath('td/span/a[contains(@class, "user-name")]/@href')[0]
team_owner_active = team.xpath(
'td/span/a[contains(@class, "user-name")]/@class'
)[0].find('inactive') == -1
(team_owner, _) = FleaOwner.objects.update_or_create( # update because active could change
url=team_owner_url,
defaults={
'name': team_owner_name,
'active': team_owner_active,
},
)
stat_from_ff = team.xpath('td[contains(@class, "right")]/span/text()')
# Transform (Remove commas and decimals (only for percentages))
for idx, val in enumerate(STAT_VARS):
stat = stat_from_ff[idx].replace(',', '')
if val in ['stat_fgpct100', 'stat_ftpct100']:
stat_from_ff[idx] = int(float(stat) * 100)
else:
stat_from_ff[idx] = int(stat)
stats = dict(zip(
STAT_VARS,
stat_from_ff,
))
self.teams.update_or_create(
url=team_url,
defaults={
'name': team_name,
'owner': team_owner,
**stats,
},
)
self.save()
|
lightning18/rcj-leaderboards
|
leaderboards/leagues/models.py
|
Python
|
mit
| 3,288 | 0.002433 |
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def main():
pathReadme = srcPath + "/creator/README"
if not neededFilePresent(pathReadme):
return
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
invokeMenuItem("File", "Open File or Project...")
selectFromFileDialog(pathReadme)
invokeMenuItem("Tools", "Git", "Actions on Commits...")
pathEdit = waitForObject(":Select a Git Commit.workingDirectoryEdit_QLineEdit")
revEdit = waitForObject(":Select a Git Commit.changeNumberEdit_Utils::CompletingLineEdit")
test.compare(str(pathEdit.displayText), os.path.join(srcPath, "creator").replace("\\", "/"))
test.compare(str(revEdit.displayText), "HEAD")
replaceEditorContent(revEdit, "05c35356abc31549c5db6eba31fb608c0365c2a0") # Initial import
detailsEdit = waitForObject(":Select a Git Commit.detailsText_QPlainTextEdit")
test.verify(detailsEdit.readOnly, "Details view is read only?")
waitFor("str(detailsEdit.plainText) != 'Fetching commit data...'")
commitDetails = str(detailsEdit.plainText)
test.verify("commit 05c35356abc31549c5db6eba31fb608c0365c2a0\n" \
"Author: con <qtc-commiter@nokia.com>" in commitDetails,
"Information header in details view?")
test.verify("Initial import" in commitDetails, "Commit message in details view?")
test.verify("src/plugins/debugger/gdbengine.cpp | 4035 ++++++++++++++++++++"
in commitDetails, "Text file in details view?")
test.verify("src/plugins/find/images/expand.png | Bin 0 -> 931 bytes"
in commitDetails, "Binary file in details view?")
test.verify(" files changed, 229938 insertions(+)" in commitDetails,
"Summary in details view?")
clickButton(waitForObject(":Select a Git Commit.Show_QPushButton"))
changedEdit = waitForObject(":Qt Creator_DiffEditor::SideDiffEditorWidget")
waitFor("len(str(changedEdit.plainText)) > 0 and "
"str(changedEdit.plainText) != 'Waiting for data...'", 20000)
diffPlainText = str(changedEdit.plainText)
test.verify("# This file is used to ignore files which are generated" in diffPlainText,
"Comment from .gitignore in diff?")
test.verify("SharedTools::QtSingleApplication app((QLatin1String(appNameC)), argc, argv);"
in diffPlainText, "main function in diff?")
invokeMenuItem("File", "Exit")
|
martyone/sailfish-qtcreator
|
tests/system/suite_tools/tst_git_first_commit/test.py
|
Python
|
lgpl-2.1
| 4,000 | 0.00875 |
from sys import byteorder
from array import array
from struct import pack
from multiprocessing import Process
import unreal_engine as ue
import pyaudio
import wave
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
self.uobject.get_world().world_tick()
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
else:
num_silent = 0
if snd_started and num_silent > 30:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
class Hero:
# this is called on game start
def begin_play(self):
ue.print_string("please speak a word into the microphone")
p = Thread(target=record_to_file, args=('demo.wav',))
p.start()
p.join()
ue.print_string("done - result written to demo.wav")
# this is called at every 'tick'
def tick(self, delta_time):
# get current location
location = self.uobject.get_actor_location()
# increase Z honouring delta_time
location.z += 100 * delta_time
# set new location
self.uobject.set_actor_location(location)
|
jbecke/VR-Vendor
|
AmazonCompetition/Content/Scripts/record.py
|
Python
|
mit
| 3,740 | 0.004813 |
# assign inputs
_skymtx, _analysisGrids, _analysisType_, _vmtxPar_, _dmtxPar_, reuseVmtx_, reuseDmtx_ = IN
analysisRecipe = None
#import honeybee
#reload(honeybee.radiance.recipe.daylightcoeff.gridbased)
#reload(honeybee.radiance.recipe.threephase.gridbased)
#reload(honeybee.radiance.recipe.fivephase.gridbased)
try:
from honeybee.radiance.recipe.fivephase.gridbased import FivePhaseGridBased
except ImportError as e:
raise ImportError('\nFailed to import honeybee:\n\t{}'.format(e))
if _skymtx and _analysisGrids:
reuseVmtx_ = bool(reuseVmtx_)
reuseDmtx_ = bool(reuseDmtx_)
assert _analysisType_ == 0, \
ValueError('3Phase recipe currently only supports illuminance simulation.')
analysisRecipe = FivePhaseGridBased(
_skymtx, _analysisGrids, _analysisType_, _vmtxPar_, _dmtxPar_,
reuseVmtx_, reuseDmtx_)
# assign outputs to OUT
OUT = (analysisRecipe,)
|
ladybug-analysis-tools/honeybee-plus
|
plugin/src/fivephasegbrecipe_node.py
|
Python
|
gpl-3.0
| 906 | 0.007726 |
#
# nestml_error_listener.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from antlr4 import DiagnosticErrorListener, Utils as AntlrUtil
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.meta_model.ast_source_location import ASTSourceLocation
import os
class NestMLErrorListener(DiagnosticErrorListener):
def __init__(self, report_ambiguity = False):
super(NestMLErrorListener, self).__init__()
self.report_ambiguity = report_ambiguity
def syntaxError(self, recognizer, offending_symbol, line, column, msg, e):
if offending_symbol is not None:
code, message = Messages.get_syntax_error_in_model("%s at: %s" % (msg, offending_symbol.text))
else:
code, message = Messages.get_syntax_error_in_model("%s" % msg)
try:
_, file_name = os.path.split(recognizer._input.fileName)
except AttributeError:
file_name = None
Logger.log_message(code=code, message=message, error_position=ASTSourceLocation(line, column, line, column),
log_level=LoggingLevel.ERROR, neuron=file_name)
def reportAmbiguity(self, recognizer, dfa, start_index,
stop_index, exact, ambig_alts, configs):
if self.report_ambiguity:
msg = u"reportAmbiguity d="
msg += self.getDecisionDescription(recognizer, dfa)
msg += u": ambigAlts="
msg += AntlrUtil.str_set(self.getConflictingAlts(ambig_alts, configs))
msg += u", input='"
msg += recognizer.getTokenStream().getText((start_index, stop_index))
msg += u"'"
code, message = Messages.get_syntax_warning_in_model(msg)
Logger.log_message(code=code, message=message, error_position=ASTSourceLocation(start_index, stop_index,
start_index, stop_index),
log_level=LoggingLevel.ERROR)
def reportAttemptingFullContext(self, recognizer, dfa, start_index,
stop_index, conflicting_alts, configs):
if self.report_ambiguity:
msg = u"reportAttemptingFullContext d="
msg += self.getDecisionDescription(recognizer, dfa)
msg += u", input='"
msg += recognizer.getTokenStream().getText((start_index, stop_index))
msg += u"'"
code, message = Messages.get_syntax_warning_in_model(msg)
Logger.log_message(code=code, message=message, error_position=ASTSourceLocation(start_index, stop_index,
start_index, stop_index),
log_level=LoggingLevel.ERROR)
def reportContextSensitivity(self, recognizer, dfa, start_index,
stop_index, prediction, configs):
if self.report_ambiguity:
msg = u"reportContextSensitivity d="
msg += self.getDecisionDescription(recognizer, dfa)
msg += u", input='"
msg += recognizer.getTokenStream().getText((start_index, stop_index))
msg += u"'"
code, message = Messages.get_syntax_warning_in_model(msg)
Logger.log_message(code=code, message=message, error_position=ASTSourceLocation(start_index, stop_index,
start_index, stop_index),
log_level=LoggingLevel.ERROR)
|
kperun/nestml
|
pynestml/frontend/nestml_error_listener.py
|
Python
|
gpl-2.0
| 4,293 | 0.003261 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""List and compare most used OpenStack cloud resources."""
import argparse
import json
import subprocess
import sys
from rally.common.plugin import discover
from rally import consts
from rally import osclients
class ResourceManager(object):
REQUIRED_SERVICE = None
REPR_KEYS = ("id", "name", "tenant_id", "zone", "zoneName", "pool")
def __init__(self, clients):
self.clients = clients
def is_available(self):
if self.REQUIRED_SERVICE:
return self.REQUIRED_SERVICE in self.clients.services().values()
return True
@property
def client(self):
return getattr(self.clients, self.__class__.__name__.lower())()
def get_resources(self):
all_resources = []
cls = self.__class__.__name__.lower()
for prop in dir(self):
if not prop.startswith("list_"):
continue
f = getattr(self, prop)
resources = f() or []
resource_name = prop[5:][:-1]
for res in resources:
res_repr = []
for key in self.REPR_KEYS + (resource_name,):
if isinstance(res, dict):
value = res.get(key)
else:
value = getattr(res, key, None)
if value:
res_repr.append("%s:%s" % (key, value))
if not res_repr:
raise ValueError("Failed to represent resource %r" % res)
all_resources.append(
"%s %s %s" % (cls, resource_name, " ".join(res_repr)))
return all_resources
class Keystone(ResourceManager):
def list_users(self):
return self.client.users.list()
def list_tenants(self):
return self.client.tenants.list()
def list_roles(self):
return self.client.roles.list()
class Nova(ResourceManager):
def list_flavors(self):
return self.client.flavors.list()
def list_floating_ip_pools(self):
return self.client.floating_ip_pools.list()
def list_floating_ips(self):
return self.client.floating_ips.list()
def list_images(self):
return self.client.images.list()
def list_keypairs(self):
return self.client.keypairs.list()
def list_networks(self):
return self.client.networks.list()
def list_security_groups(self):
return self.client.security_groups.list(
search_opts={"all_tenants": True})
def list_servers(self):
return self.client.servers.list(
search_opts={"all_tenants": True})
def list_services(self):
return self.client.services.list()
def list_availability_zones(self):
return self.client.availability_zones.list()
class Neutron(ResourceManager):
REQUIRED_SERVICE = consts.Service.NEUTRON
def has_extension(self, name):
extensions = self.client.list_extensions().get("extensions", [])
return any(ext.get("alias") == name for ext in extensions)
def list_networks(self):
return self.client.list_networks()["networks"]
def list_subnets(self):
return self.client.list_subnets()["subnets"]
def list_routers(self):
return self.client.list_routers()["routers"]
def list_ports(self):
return self.client.list_ports()["ports"]
def list_floatingips(self):
return self.client.list_floatingips()["floatingips"]
def list_security_groups(self):
return self.client.list_security_groups()["security_groups"]
def list_health_monitors(self):
if self.has_extension("lbaas"):
return self.client.list_health_monitors()["health_monitors"]
def list_pools(self):
if self.has_extension("lbaas"):
return self.client.list_pools()["pools"]
def list_vips(self):
if self.has_extension("lbaas"):
return self.client.list_vips()["vips"]
class Glance(ResourceManager):
def list_images(self):
return self.client.images.list()
class Heat(ResourceManager):
REQUIRED_SERVICE = consts.Service.HEAT
def list_resource_types(self):
return self.client.resource_types.list()
def list_stacks(self):
return self.client.stacks.list()
class Cinder(ResourceManager):
def list_availability_zones(self):
return self.client.availability_zones.list()
def list_backups(self):
return self.client.backups.list()
def list_volume_snapshots(self):
return self.client.volume_snapshots.list()
def list_volume_types(self):
return self.client.volume_types.list()
def list_volumes(self):
return self.client.volumes.list(
search_opts={"all_tenants": True})
class CloudResources(object):
"""List and compare cloud resources.
resources = CloudResources(auth_url=..., ...)
saved_list = resources.list()
# Do something with the cloud ...
changes = resources.compare(saved_list)
has_changed = any(changes)
removed, added = changes
"""
def __init__(self, **kwargs):
endpoint = osclients.objects.Endpoint(**kwargs)
self.clients = osclients.Clients(endpoint)
def _deduplicate(self, lst):
"""Change list duplicates to make all items unique.
>>> resources._deduplicate(["a", "b", "c", "b", "b"])
>>> ['a', 'b', 'c', 'b (duplicate 1)', 'b (duplicate 2)'
"""
deduplicated_list = []
for value in lst:
if value in deduplicated_list:
ctr = 0
try_value = value
while try_value in deduplicated_list:
ctr += 1
try_value = "%s (duplicate %i)" % (value, ctr)
value = try_value
deduplicated_list.append(value)
return deduplicated_list
def list(self):
managers_classes = discover.itersubclasses(ResourceManager)
resources = []
for cls in managers_classes:
manager = cls(self.clients)
if manager.is_available():
resources.extend(manager.get_resources())
return sorted(self._deduplicate(resources))
def compare(self, with_list):
saved_resources = set(with_list)
current_resources = set(self.list())
removed = saved_resources - current_resources
added = current_resources - saved_resources
return sorted(list(removed)), sorted(list(added))
def main():
parser = argparse.ArgumentParser(
description=("Save list of OpenStack cloud resources or compare "
"with previously saved list."))
parser.add_argument("--credentials",
type=argparse.FileType("r"),
metavar="<path/to/credentials.json>",
help="cloud credentials in JSON format")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--dump-list",
type=argparse.FileType("w"),
metavar="<path/to/output/list.json>",
help="dump resources to given file in JSON format")
group.add_argument("--compare-with-list",
type=argparse.FileType("r"),
metavar="<path/to/existent/list.json>",
help=("compare current resources with a list from "
"given JSON file"))
args = parser.parse_args()
if args.credentials:
config = json.load(args.credentials)
else:
config = json.loads(subprocess.check_output(["rally", "deployment",
"config"]))
config.update(config.pop("admin"))
del config["type"]
resources = CloudResources(**config)
if args.dump_list:
resources_list = resources.list()
json.dump(resources_list, args.dump_list, indent=2)
elif args.compare_with_list:
given_list = json.load(args.compare_with_list)
changes = resources.compare(with_list=given_list)
removed, added = changes
sys.stdout.write(
json.dumps({"removed": removed, "added": added}, indent=2))
if any(changes):
return 0 # `1' will fail gate job
return 0
if __name__ == "__main__":
sys.exit(main())
|
group-policy/rally
|
tests/ci/osresources.py
|
Python
|
apache-2.0
| 8,981 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.