text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import logging
import io
from pathlib import Path
from rdflib import URIRef, RDF
from dipper.graph.RDFGraph import RDFGraph
LOG = logging.getLogger(__name__)
class TestUtils:
@staticmethod
def test_graph_equality(turtlish, graph):
"""
:param turtlish: file path or string of triples in turtle
format without prefix header
:param graph: Graph object to test against
:return: Boolean, True if graphs contain same
set of triples
"""
turtle_graph = RDFGraph()
turtle_graph.bind_all_namespaces()
prefixes = "\n".join(
["@prefix {}: <{}> .".format(
n[0], n[1]) for n in turtle_graph.namespace_manager.namespaces()]
)
headless_ttl = ''
try:
if Path(turtlish).exists():
headless_ttl = Path(turtlish).read_text()
else:
raise OSError
except OSError:
if isinstance(turtlish, str):
headless_ttl = turtlish
else:
raise ValueError("turtlish must be filepath or string")
turtle_string = prefixes + headless_ttl
mock_file = io.StringIO(turtle_string)
turtle_graph.parse(mock_file, format="turtle")
TestUtils.remove_ontology_axioms(graph)
turtle_triples = set(list(turtle_graph))
ref_triples = set(list(graph))
equality = turtle_triples == ref_triples
if not equality:
LOG.warning(
"Triples do not match\n"
"\tLeft hand difference: %s\n"
"\tRight hand difference: %s",
sorted(turtle_triples - ref_triples),
sorted(ref_triples - turtle_triples)
)
return equality
@staticmethod
def remove_ontology_axioms(graph):
"""
Given an rdflib graph, remove any triples
connected to an ontology node:
{} a owl:Ontology
:param graph: RDFGraph
:return: None
"""
ontology_iri = URIRef("http://www.w3.org/2002/07/owl#Ontology")
for subject in graph.subjects(RDF.type, ontology_iri):
for predicate, obj in graph.predicate_objects(subject):
graph.remove((subject, predicate, obj))
graph.remove((subject, RDF.type, ontology_iri))
|
TomConlin/dipper
|
dipper/utils/TestUtils.py
|
Python
|
bsd-3-clause
| 2,394 | 0.000418 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
def read_file(path):
try:
with open(path, 'r') as f:
return f.read()
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: unable to read required file %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return None
def main():
ORIGINAL_FILE = 'requirements.txt'
VENDORED_COPY = 'test/lib/ansible_test/_data/requirements/ansible.txt'
original_requirements = read_file(ORIGINAL_FILE)
vendored_requirements = read_file(VENDORED_COPY)
if original_requirements is not None and vendored_requirements is not None:
if original_requirements != vendored_requirements:
print('%s:%d:%d: must be identical to %s' % (VENDORED_COPY, 0, 0, ORIGINAL_FILE))
if __name__ == '__main__':
main()
|
nitzmahone/ansible
|
test/sanity/code-smell/ansible-requirements.py
|
Python
|
gpl-3.0
| 898 | 0.002227 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting unit and lesson editing."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import logging
import urllib
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from modules.oeditor import oeditor
from tools import verify
import filer
import messages
DRAFT_TEXT = 'Private'
PUBLISHED_TEXT = 'Public'
# The editor has severe limitations for editing nested lists of objects. First,
# it does not allow one to move a lesson from one unit to another. We need a way
# of doing that. Second, JSON schema specification does not seem to support a
# type-safe array, which has objects of different types. We also want that
# badly :). All in all - using generic schema-based object editor for editing
# nested arrayable polymorphic attributes is a pain...
def create_status_annotation():
return oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', DRAFT_TEXT,
PUBLISHED_TEXT, class_name='split-from-main-group')
class CourseOutlineRights(object):
"""Manages view/edit rights for course outline."""
@classmethod
def can_view(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class UnitLessonEditor(ApplicationHandler):
"""An editor for the unit and lesson titles."""
def get_import_course(self):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
self.render_page(template_values)
return
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit_lesson(self):
"""Shows editor for the list of unit and lesson titles."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
UnitLessonTitleRESTHandler.SCHEMA_JSON,
UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Course Outline')
template_values[
'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def post_add_lesson(self):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(self)
first_unit = None
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
first_unit = unit
break
if first_unit:
lesson = course.add_lesson(first_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
self.redirect(self.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
self.redirect('/dashboard')
def post_add_unit(self):
"""Adds new unit to a course."""
course = courses.Course(self)
unit = course.add_unit()
course.save()
self.redirect(self.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
def post_add_link(self):
"""Adds new link to a course."""
course = courses.Course(self)
link = course.add_link()
course.save()
self.redirect(self.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
def post_add_assessment(self):
"""Adds new assessment to a course."""
course = courses.Course(self)
assessment = course.add_assessment()
course.save()
self.redirect(self.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
def _render_edit_form_for(
self, rest_handler_cls, title, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None):
"""Renders an editor form for a given REST handler class."""
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
rest_handler_cls.SCHEMA_JSON,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not filer.is_editable_fs(self.app_context),
required_modules=rest_handler_cls.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION)
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION)
def get_edit_lesson(self):
"""Shows the lesson/activity editor."""
self._render_edit_form_for(
LessonRESTHandler, 'Lessons and Activities',
annotations_dict=LessonRESTHandler.get_schema_annotations_dict(
courses.Course(self).get_units()),
delete_xsrf_token='delete-lesson')
class CommonUnitRESTHandler(BaseRESTHandler):
"""A common super class for all unit REST handlers."""
def unit_to_dict(self, unused_unit):
"""Converts a unit to a dictionary representation."""
raise Exception('Not implemented')
def apply_updates(
self, unused_unit, unused_updated_unit_dict, unused_errors):
"""Applies changes to a unit; modifies unit input argument."""
raise Exception('Not implemented')
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=self.unit_to_dict(unit),
xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
errors = []
self.apply_updates(unit, updated_unit_dict, errors)
if not errors:
course = courses.Course(self)
assert course.update_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
SCHEMA_JSON = """
{
"id": "Unit Entity",
"type": "object",
"description": "Unit",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Unit'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
create_status_annotation()]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'U'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.now_available = not updated_unit_dict.get('is_draft')
class LinkRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to link."""
URI = '/rest/course/link'
SCHEMA_JSON = """
{
"id": "Link Entity",
"type": "object",
"description": "Link",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"url": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Link'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'url', '_inputex'], {
'label': 'URL',
'description': messages.LINK_EDITOR_URL_DESCRIPTION}),
create_status_annotation()]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'O'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'url': unit.href,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.href = updated_unit_dict.get('url')
unit.now_available = not updated_unit_dict.get('is_draft')
class ImportCourseRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to course import."""
URI = '/rest/course/import'
SCHEMA_JSON = """
{
"id": "Import Course Entity",
"type": "object",
"description": "Import Course",
"properties": {
"course" : {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
@classmethod
def SCHEMA_ANNOTATIONS_DICT(cls): # pylint: disable-msg=g-bad-name
"""Schema annotations are dynamic and include a list of courses."""
# Make a list of courses user has the rights to.
course_list = []
for acourse in sites.get_all_courses():
if not roles.Roles.is_course_admin(acourse):
continue
if acourse == sites.get_course_for_current_request():
continue
course_list.append({
'value': acourse.raw,
'label': acourse.get_title()})
if not course_list:
return None
# Format annotations.
return [
(['title'], 'Import Course'),
(
['properties', 'course', '_inputex'],
{
'label': 'Available Courses',
'_type': 'select',
'choices': course_list})]
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={'course': None},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'unit-lesson-reorder'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
request = transforms.loads(self.request.get('request'))
payload = request.get('payload')
course_raw = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)['course']
source = None
for acourse in sites.get_all_courses():
if acourse.raw == course_raw:
source = acourse
break
if not source:
transforms.send_json_response(
self, 404, 'Object not found.', {'raw': course_raw})
return
course = courses.Course(self)
errors = []
try:
course.import_from(source, errors)
except Exception as e: # pylint: disable-msg=broad-except
logging.exception(e)
errors.append('Import failed: %s' % e)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
course.save()
transforms.send_json_response(self, 200, 'Imported.')
class AssessmentRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to assessment."""
URI = '/rest/course/assessment'
SCHEMA_JSON = """
{
"id": "Assessment Entity",
"type": "object",
"description": "Assessment",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"weight": {"optional": true, "type": "string"},
"content": {"optional": true, "type": "text"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Assessment'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'weight', '_inputex'], {'label': 'Weight'}),
(['properties', 'content', '_inputex'], {'label': 'Content'}),
create_status_annotation()]
REQUIRED_MODULES = [
'inputex-select', 'inputex-string', 'inputex-textarea',
'inputex-uneditable']
def _get_assessment_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_assessment_filename(unit.unit_id))
def unit_to_dict(self, unit):
"""Assemble a dict with the unit data fields."""
assert unit.type == 'A'
path = self._get_assessment_path(unit)
fs = self.app_context.fs
if fs.isfile(path):
content = fs.get(path)
else:
content = ''
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'weight': unit.weight if hasattr(unit, 'weight') else 0,
'content': content,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, errors):
"""Store the updated assignment."""
unit.title = updated_unit_dict.get('title')
try:
unit.weight = int(updated_unit_dict.get('weight'))
if unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
unit.now_available = not updated_unit_dict.get('is_draft')
courses.Course(
None, app_context=self.app_context).set_assessment_content(
unit, updated_unit_dict.get('content'), errors=errors)
class UnitLessonTitleRESTHandler(BaseRESTHandler):
"""Provides REST API to unit and lesson titles."""
URI = '/rest/course/outline'
SCHEMA_JSON = """
{
"type": "object",
"description": "Course Outline",
"properties": {
"outline": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"},
"lessons": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"}
}
}
}
}
}
}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Course Outline'),
(['properties', 'outline', '_inputex'], {
'sortable': 'true',
'label': ''}),
([
'properties', 'outline', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'id', '_inputex'], {
'_type': 'hidden'}),
(['properties', 'outline', 'items', 'properties', 'lessons',
'_inputex'], {
'sortable': 'true',
'label': '',
'listAddLabel': 'Add a new lesson',
'listRemoveLabel': 'Delete'}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'id', '_inputex'], {
'_type': 'hidden'})
]
REQUIRED_MODULES = [
'inputex-hidden', 'inputex-list', 'inputex-string',
'inputex-uneditable']
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
course = courses.Course(self)
outline_data = []
for unit in course.get_units():
lesson_data = []
for lesson in course.get_lessons(unit.unit_id):
lesson_data.append({
'title': lesson.title,
'id': lesson.lesson_id})
unit_title = unit.title
if verify.UNIT_TYPE_UNIT == unit.type:
unit_title = 'Unit %s - %s' % (unit.index, unit.title)
outline_data.append({
'title': unit_title,
'id': unit.unit_id,
'lessons': lesson_data})
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={'outline': outline_data},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'unit-lesson-reorder'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'unit-lesson-reorder', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
payload_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
course = courses.Course(self)
course.reorder_units(payload_dict['outline'])
course.save()
transforms.send_json_response(self, 200, 'Saved.')
class LessonRESTHandler(BaseRESTHandler):
"""Provides REST API to handle lessons and activities."""
URI = '/rest/course/lesson'
SCHEMA_JSON = """
{
"id": "Lesson Entity",
"type": "object",
"description": "Lesson",
"properties": {
"key" : {"type": "string"},
"title" : {"type": "string"},
"unit_id": {"type": "string"},
"video" : {"type": "string", "optional": true},
"objectives" : {
"type": "string", "format": "html", "optional": true},
"notes" : {"type": "string", "optional": true},
"activity_title" : {"type": "string", "optional": true},
"activity": {"type": "string", "format": "text", "optional": true},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'inputex-rte', 'inputex-select', 'inputex-textarea',
'inputex-uneditable']
@classmethod
def get_schema_annotations_dict(cls, units):
unit_list = []
for unit in units:
if unit.type == 'U':
unit_list.append({
'label': 'Unit %s - %s' % (unit.index, unit.title),
'value': unit.unit_id})
return [
(['title'], 'Lesson'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'unit_id', '_inputex'], {
'label': 'Parent Unit', '_type': 'select',
'choices': unit_list}),
(['properties', 'objectives', '_inputex'], {
'label': 'Objectives',
'editorType': 'simple',
'description': messages.LESSON_OBJECTIVES_DESCRIPTION}),
(['properties', 'video', '_inputex'], {
'label': 'Video ID',
'description': messages.LESSON_VIDEO_ID_DESCRIPTION}),
(['properties', 'notes', '_inputex'], {
'label': 'Notes',
'description': messages.LESSON_NOTES_DESCRIPTION}),
(['properties', 'activity_title', '_inputex'], {
'label': 'Activity Title',
'description': messages.LESSON_ACTIVITY_TITLE_DESCRIPTION}),
(['properties', 'activity', '_inputex'], {
'label': 'Activity',
'description': messages.LESSON_ACTIVITY_DESCRIPTION}),
create_status_annotation()]
def get(self):
"""Handles GET REST verb and returns lesson object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
key = self.request.get('key')
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
assert lesson
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if lesson.has_activity and fs.isfile(path):
activity = fs.get(path)
else:
activity = ''
payload_dict = {
'key': key,
'title': lesson.title,
'unit_id': lesson.unit_id,
'objectives': lesson.objectives,
'video': lesson.video,
'notes': lesson.notes,
'activity_title': lesson.activity_title,
'activity': activity,
'is_draft': not lesson.now_available
}
message = ['Success.']
if self.request.get('is_newly_created'):
message.append('New lesson has been created and saved.')
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))
def put(self):
"""Handles PUT REST verb to save lesson and associated activity."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'lesson-edit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updates_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
lesson.title = updates_dict['title']
lesson.unit_id = updates_dict['unit_id']
lesson.objectives = updates_dict['objectives']
lesson.video = updates_dict['video']
lesson.notes = updates_dict['notes']
lesson.activity_title = updates_dict['activity_title']
lesson.now_available = not updates_dict['is_draft']
activity = updates_dict.get('activity', '').strip()
errors = []
if activity:
lesson.has_activity = True
course.set_activity_content(lesson, activity, errors=errors)
else:
lesson.has_activity = False
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if fs.isfile(path):
fs.delete(path)
if not errors:
assert course.update_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-lesson', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
assert course.delete_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
|
henrymp/coursebuilder
|
modules/dashboard/unit_lesson_editor.py
|
Python
|
apache-2.0
| 31,883 | 0.000157 |
"""
Support for Eneco Slimmer stekkers (Smart Plugs).
This provides controls for the z-wave smart plugs Toon can control.
"""
import logging
from homeassistant.components.switch import SwitchDevice
import custom_components.toon as toon_main
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup discovered Smart Plugs."""
_toon_main = hass.data[toon_main.TOON_HANDLE]
switch_items = []
for plug in _toon_main.toon.smartplugs:
switch_items.append(EnecoSmartPlug(hass, plug))
add_devices_callback(switch_items)
class EnecoSmartPlug(SwitchDevice):
"""Representation of a Smart Plug."""
def __init__(self, hass, plug):
"""Initialize the Smart Plug."""
self.smartplug = plug
self.toon_data_store = hass.data[toon_main.TOON_HANDLE]
@property
def should_poll(self):
"""No polling needed with subscriptions."""
return True
@property
def unique_id(self):
"""Return the ID of this switch."""
return self.smartplug.device_uuid
@property
def name(self):
"""Return the name of the switch if any."""
return self.smartplug.name
@property
def current_power_w(self):
"""Current power usage in W."""
return self.toon_data_store.get_data('current_power', self.name)
@property
def today_energy_kwh(self):
"""Today total energy usage in kWh."""
return self.toon_data_store.get_data('today_energy', self.name)
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self.toon_data_store.get_data('current_state', self.name)
@property
def available(self):
"""True if switch is available."""
return self.smartplug.can_toggle
def turn_on(self, **kwargs):
"""Turn the switch on."""
return self.smartplug.turn_on()
def turn_off(self):
"""Turn the switch off."""
return self.smartplug.turn_off()
def update(self):
"""Update state."""
self.toon_data_store.update()
|
krocat/ToonHA
|
toon/switch.py
|
Python
|
apache-2.0
| 2,141 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script re-constucts the Qt Creator installation to include Qt libraries, plugins, QtQuick.
Windows:
|-- bin
| |-- qt.conf -> Prefix=..
| |-- qt dlls
| |-- csdataquick executibles
| |-- csdataquick dlls
| |-- qt-creator executibles
| |-- qt-creator dlls
|
|-- lib
| |-- qtcreator
| |-- plugins
|
|-- plugins
| |-- qt plugins
| |-- csdaquick plguins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
Linux:
|-- bin
| |-- qt.conf -> Prefix=..
| |-- csdataquick executibles
| |-- qtcreator
|
|-- lib
| |-- qt shared libraries
| |-- csdataquick shared libraries
| |-- qtcreator
| |-- qtcreator shared libraries
| |-- plugins
|
|-- libexec
| |-- qtcreator
| |-- qt.conf -> Prefix=../..
| |-- qml2puppet
|
|-- plugins
| |-- csdaquick plguins
| |-- qt plugins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
macOS:
|-- bin
| |-- csdataquick app bundles
| | |-- Contents
| | |-- Resources
| | |-- qt.conf -> Prefix=../../../..
| |-- Qt Creator.app
| |-- Contents
| |-- Resources
| |-- qt.conf -> Prefix=../../../..
| |-- qmldesigner
| |-- qt.conf -> Prefix=../../../../..
|
|-- lib
| |-- qt frameworks
| |-- csdataquick shared libraries
|
|-- plugins
| |-- qt plugins
| |-- csdataquick plugins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
"""
import argparse
import glob
import os
import platform
import re
import sys
import shutil
import subprocess
if sys.hexversion < 0x03000000:
if sys.hexversion < 0x02070000:
subprocess.getoutput = lambda cmd: subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
else:
subprocess.getoutput = lambda cmd: subprocess.check_output(cmd, shell=True)
parser = argparse.ArgumentParser(description='Fixup Qt and Qt Creator for packaging')
parser.add_argument('--target', required=True, help='target path')
parser.add_argument('--qtcreator', help='qt creator path')
parser.add_argument('--qmake', required=True, help='qmake file path')
args = parser.parse_args(sys.argv[1:])
qtcreator_path = args.qtcreator
target_path = args.target
qmake = args.qmake
bin_dir = os.path.join(target_path, 'bin')
lib_dir = os.path.join(target_path, 'lib')
libexec_dir = os.path.join(target_path, 'libexec')
plugins_dir = os.path.join(target_path, 'plugins')
qml_dir = os.path.join(target_path, 'qml')
def smartCopy(src, dst, follow_symlinks=True, ignore=None):
"""
same as shell cp command. If *src* is a file, it is copied into *dst* if dst is an existing directory
or as file *dst*. If *src* is a directory, it is copied recursively into *dst* if dst is an existing
directory or as as directory *dst*.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
if os.path.isdir(src):
shutil.copytree(src, dst, symlinks=not follow_symlinks, ignore=ignore)
else:
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return dst
def deployQtLibraries():
libs = ['Core', 'Gui', 'Widgets', 'Concurrent', 'Network', 'PrintSupport', 'Script',
'Qml', 'Quick', 'QuickWidgets', 'QuickControls2', 'QuickTemplates2', 'QuickParticles',
'Xml', 'Svg', 'Sql', 'Help']
qtlibs_dir = subprocess.getoutput('%s -query QT_INSTALL_LIBS' % qmake).strip()
dst_dir = lib_dir
lib_pattern = 'libQt5%s.so*'
ignore_pattern = None
if platform.system() == 'Darwin':
lib_pattern = 'Qt%s.framework'
ignore_pattern = shutil.ignore_patterns('Headers', '*_debug', '*.prl')
elif platform.system() == 'Windows':
qtlibs_dir = subprocess.getoutput('%s -query QT_INSTALL_BINS' % qmake).strip()
dst_dir = bin_dir
lib_pattern = 'Qt5%s.dll'
elif platform.system() == 'Linux':
libs += ['XcbQpa', 'DBus']
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for lib in libs:
for file in glob.glob(os.path.join(qtlibs_dir, lib_pattern%lib)):
smartCopy(file, dst_dir, follow_symlinks=False, ignore=ignore_pattern)
if platform.system() == 'Windows':
for lib in ['libEGL.dll', 'libGLESv2.dll']:
smartCopy(os.path.join(qtlibs_dir, lib), dst_dir)
def deployQtPlugins():
plugins = ['bearer', 'designer', 'iconengines', 'imageformats',
'platforms', 'sqldrivers']
qtplugins_dir = subprocess.getoutput('%s -query QT_INSTALL_PLUGINS' % qmake).strip()
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
if platform.system() == 'Linux':
plugins += ['xcbglintegrations']
for plugin in plugins:
if not os.path.exists(os.path.join(qtplugins_dir, plugin)):
print('plugin "%s" does not exist' % plugin)
continue
shutil.copytree(os.path.join(qtplugins_dir, plugin),
os.path.join(plugins_dir, plugin),
symlinks=True,
ignore=shutil.ignore_patterns('*_debug.dylib', '*.dylib.dSYM', '*.pdb'))
# remove debug version on windows
if platform.system() == 'Windows':
# After sorting the debug version "<pluginname>d.dll" will be
# immedietly after the release version "<pluginname>.dll".
# It is then quick to remove every 2nd file from this list.
dlls = sorted(os.listdir(os.path.join(plugins_dir, plugin)))[1::2]
for dll in dlls:
os.remove(os.path.join(plugins_dir, plugin, dll))
def deployQtQuick():
qtqml_dir = subprocess.getoutput('%s -query QT_INSTALL_QML' % qmake).strip()
if not os.path.exists(qml_dir):
os.makedirs(qml_dir)
for qml in ['Qt', 'QtQml', 'QtGraphicalEffects', 'QtQuick', 'QtQuick.2']:
if not os.path.exists(os.path.join(qtqml_dir, qml)):
print('qml module "%s" does not exist' % qml)
continue
shutil.copytree(os.path.join(qtqml_dir, qml),
os.path.join(qml_dir, qml),
symlinks=True,
ignore=shutil.ignore_patterns('*_debug.dylib', '*.dylib.dSYM', '*plugind.dll','*.pdb'))
def deployQt():
# Copy Qt libraries
deployQtLibraries()
# Copy Qt plugins
deployQtPlugins()
# Copy QtQuick modules
deployQtQuick()
def restruct_macos():
bundle_name = os.path.basename(qtcreator_path)
if not bundle_name.endswith('.app'):
print('Not a valid app bundle')
return
# Copy the app bundle to bin
if not os.path.exists(bin_dir):
os.makedirs(bin_dir)
shutil.copytree(qtcreator_path, os.path.join(bin_dir, bundle_name), symlinks=True)
# Fix rpath
for root, dirs, files in os.walk(os.path.join(bin_dir, bundle_name)):
for file in files:
fname = os.path.join(root, file)
if os.path.islink(fname):
continue
if file == 'qml2puppet' or os.path.basename(root) == 'MacOS' or os.path.splitext(file)[1] == '.dylib':
cmd = 'install_name_tool -add_rpath "@loader_path/%s" "%s"' % (os.path.relpath(lib_dir, root), fname)
subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
# Fix qt.conf
open(os.path.join(bin_dir, bundle_name, 'Contents', 'Resources', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../../..\n')
open(os.path.join(bin_dir, bundle_name, 'Contents', 'Resources', 'qmldesigner', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../../../../..\n')
def restruct_windows():
# Copy the entire directory
for d in ['bin', 'lib', 'share']:
shutil.copytree(os.path.join(qtcreator_path, d), os.path.join(target_path, d))
# Fix qt.conf
open(os.path.join(bin_dir, 'qt.conf'), 'w').write('[Paths]\nPrefix = ..\n')
def restruct_linux():
# Copy the entire directory
for d in ['bin', 'lib', 'libexec', 'share']:
shutil.copytree(os.path.join(qtcreator_path, d),
os.path.join(target_path, d),
symlinks=True,
ignore=shutil.ignore_patterns('Makefile*'))
# Fix rpath of qtcreator and executibles under libexec
cmd = "chrpath -r '$ORIGIN/../lib/qtcreator:$ORIGIN/../lib:' " + os.path.join(bin_dir, 'qtcreator')
subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
for f in os.listdir(os.path.join(libexec_dir, 'qtcreator')):
cmd = "chrpath -r '$ORIGIN/../../lib/qtcreator:$ORIGIN/../../lib:' " + os.path.join(libexec_dir, 'qtcreator', f)
subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
# Fix qt.conf
open(os.path.join(bin_dir, 'qt.conf'), 'w').write('[Paths]\nPrefix = ..\n')
open(os.path.join(libexec_dir, 'qtcreator', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../..\n')
if qtcreator_path and os.path.exists(qtcreator_path):
if platform.system() == 'Darwin':
restruct_macos()
elif platform.system() == 'Windows':
restruct_windows()
elif platform.system() == 'Linux':
restruct_linux()
# Fixup Qt
deployQt()
|
xiaoqiangwang/CSDataQuick
|
tools/fixup_qtcreator.py
|
Python
|
gpl-2.0
| 9,584 | 0.003861 |
from data_vault import VaultEnvironment
class KeeperApiHelper:
_expected_commands = []
_vault_env = VaultEnvironment()
@staticmethod
def communicate_expect(actions):
# type: (list) -> None
KeeperApiHelper._expected_commands.clear()
KeeperApiHelper._expected_commands.extend(actions)
@staticmethod
def is_expect_empty():
# type: () -> bool
return len(KeeperApiHelper._expected_commands) == 0
@staticmethod
def communicate_command(_, request):
# type: (any, dict) -> dict
rs = {
'result': 'success',
'result_code': '',
'message': ''
}
action = KeeperApiHelper._expected_commands.pop(0)
if callable(action):
props = action(request)
if type(props) == dict:
rs.update(props)
return rs
if type(action) == str:
if action == request['command']:
return rs
raise Exception()
|
Keeper-Security/Commander
|
unit-tests/helper.py
|
Python
|
mit
| 1,017 | 0 |
import os
import shutil
class BasicOperations_TestClass:
TEST_ROOT =' __test_root__'
def setUp(self):
self.regenerate_root
print(self.TEST_ROOT)
assert os.path.isdir(self.TEST_ROOT)
def tearDown(self):
return True
def test_test(self):
assert self.bar == 1
def regenerate_root(self):
if os.path.isdir(self.TEST_ROOT):
shutil.rmtree(self.TEST_ROOTT)
os.makedirs(self.TEST_ROOT)
|
aleksclark/replfs
|
nosetests/basic_operations_tests.py
|
Python
|
bsd-3-clause
| 469 | 0.004264 |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
name = "R"
identifier = "org.vistrails.vistrails.rpy"
version = "0.1.2"
old_identifiers = ["edu.utah.sci.vistrails.rpy"]
|
Nikea/VisTrails
|
vistrails/packages/rpy/__init__.py
|
Python
|
bsd-3-clause
| 2,002 | 0.021978 |
# Import everything needed to edit video clips
from moviepy.editor import *
# Load myHolidays.mp4 and select the subclip 00:00:50 - 00:00:60
clip = VideoFileClip("myHolidays.mp4").subclip(50,60)
# Reduce the audio volume (volume x 0.8)
clip = clip.volumex(0.8)
# Generate a text clip. You can customize the font, color, etc.
txt_clip = TextClip("My Holidays 2013",fontsize=70,color='white')
# Say that you want it to appear 10s at the center of the screen
txt_clip = txt_clip.set_pos('center').set_duration(10)
# Overlay the text clip on the first video clip
video = CompositeVideoClip([clip, txt_clip])
# Write the result to a file (many options available !)
video.write_videofile("myHolidays_edited.webm")
|
ttm/socialLegacy
|
tests/legacy/testAnim2.py
|
Python
|
mit
| 714 | 0.004202 |
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from io import BytesIO
import shutil
import time
import itertools
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, network_thread_start
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
class BIP9SoftForksTest(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
self.setup_clean_chain = True
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = self.mocktime + 1
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 1-A
# check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
# Test 1-B
# check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
# Test 1-C
# finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
# check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# ...continue with Test 3
test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 720)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.clear_all_connections()
self.stop_nodes()
self.nodes = []
shutil.rmtree(self.options.tmpdir + "/node0")
self.setup_chain()
self.setup_network()
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.test_nodes[0].wait_for_verack()
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
"""Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
"""
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
"""Modify the nSequence to make it fails once sequence lock rule is
activated (high timespan).
"""
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
"""Modify the nLockTime to make it fails once MTP rule is activated."""
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
|
nmarley/dash
|
test/functional/bip9-softforks.py
|
Python
|
mit
| 12,924 | 0.004643 |
#!/usr/bin/python
import glob, os
from cjh.cli import Cli, ListPrompt
from cjh.lists import ItemList
class Fileman(object):
@classmethod
def pwd(cls, getstr=False):
"""
Emulate 'pwd' command
"""
string = os.getcwd()
if getstr:
return string
else: print(string)
@classmethod
def mc(cls):
list_prompt = ListPrompt(['..'] + cls.ls(opts=['B'], get_list=True))
if len(list_prompt) > Cli.height():
Cli.less(str(list_prompt))
response = Cli.make_page(header=cls.pwd(getstr=True), func=list_prompt.input)
if response == 1:
os.chdir(list_prompt[response - 1])
cls.mc()
elif list_prompt[response - 1].endswith('/'):
os.chdir(list_prompt[response - 1][:-1])
cls.mc()
else: return list_prompt[response - 1]
@staticmethod
def ls(*args, **kwargs):
"""
Emulate 'ls' command
"""
if len(args) == 0:
cwd = os.getcwd()
file_list = os.listdir(cwd)
else:
file_list = []
for arg in args:
file_list += glob.glob(arg)
if 'opts' in kwargs and 'B' in kwargs['opts']:
file_list = [
file_ for file_ in file_list if not file_.endswith('~')
]
file_list.sort(key=str.lower)
dir_list = []
if 'opts' in kwargs and 'F' in kwargs['opts']:
for index, file_ in enumerate(file_list):
if os.path.isdir(file_):
dir_list.append(file_ + '/')
del file_list[index]
elif os.access(file_, os.X_OK):
file_list[index] = file_ + '*'
if 'get_list' not in kwargs or kwargs['get_list'] is not True:
string = ''
for dir_ in dir_list:
string += (dir_ + '\n')
for file_ in file_list:
string += (file_ + '\n')
if len(dir_list) + len(file_list) + 1 > Cli.height():
Cli.less(string)
else: Cli.write(string.strip())
else:
return dir_list + file_list
|
hammerhorn/hammerhorn-jive
|
igo/cjh/files.py
|
Python
|
gpl-2.0
| 2,267 | 0.003088 |
"""
Basically I'm trying to cover 4 options, one of each:
(Previously featured state?, Use previous positions?)
---------------------------------------------------------
(no, no) = get_initial_featuring
(yes, no) = get_particle_featuring
(yes, yes) = translate_featuring
(no, yes) = feature_from_pos_rad
These do:
(use globals to start, start from nothing)
(use positions to start, start from trackpy)
"""
from future import standard_library
standard_library.install_aliases()
from builtins import range
import os
try:
import tkinter as tk
import tkinter.filedialog as tkfd
except ImportError:
import Tkinter as tk
import tkFileDialog as tkfd
import numpy as np
import peri
from peri import initializers, util, models, states, logger
from peri.comp import ilms
import peri.opt.optimize as opt
import peri.opt.addsubtract as addsub
RLOG = logger.log.getChild('runner')
def locate_spheres(image, feature_rad, dofilter=False, order=(3 ,3, 3),
trim_edge=True, **kwargs):
"""
Get an initial featuring of sphere positions in an image.
Parameters
-----------
image : :class:`peri.util.Image` object
Image object which defines the image file as well as the region.
feature_rad : float
Radius of objects to find, in pixels. This is a featuring radius
and not a real radius, so a better value is frequently smaller
than the real radius (half the actual radius is good). If ``use_tp``
is True, then the twice ``feature_rad`` is passed as trackpy's
``diameter`` keyword.
dofilter : boolean, optional
Whether to remove the background before featuring. Doing so can
often greatly increase the success of initial featuring and
decrease later optimization time. Filtering functions by fitting
the image to a low-order polynomial and featuring the residuals.
In doing so, this will change the mean intensity of the featured
image and hence the good value of ``minmass`` will change when
``dofilter`` is True. Default is False.
order : 3-element tuple, optional
If `dofilter`, the 2+1D Leg Poly approximation to the background
illumination field. Default is (3,3,3).
Other Parameters
----------------
invert : boolean, optional
Whether to invert the image for featuring. Set to True if the
image is dark particles on a bright background. Default is True
minmass : Float or None, optional
The minimum mass/masscut of a particle. Default is None, which
calculates internally.
use_tp : Bool, optional
Whether or not to use trackpy. Default is False, since trackpy
cuts out particles at the edge.
Returns
--------
positions : np.ndarray [N,3]
Positions of the particles in order (z,y,x) in image pixel units.
Notes
-----
Optionally filters the image by fitting the image I(x,y,z) to a
polynomial, then subtracts this fitted intensity variation and uses
centroid methods to find the particles.
"""
# We just want a smoothed field model of the image so that the residuals
# are simply the particles without other complications
m = models.SmoothFieldModel()
I = ilms.LegendrePoly2P1D(order=order, constval=image.get_image().mean())
s = states.ImageState(image, [I], pad=0, mdl=m)
if dofilter:
opt.do_levmarq(s, s.params)
pos = addsub.feature_guess(s, feature_rad, trim_edge=trim_edge, **kwargs)[0]
return pos
def get_initial_featuring(statemaker, feature_rad, actual_rad=None,
im_name=None, tile=None, invert=True, desc='', use_full_path=False,
featuring_params={}, statemaker_kwargs={}, **kwargs):
"""
Completely optimizes a state from an image of roughly monodisperse
particles.
The user can interactively select the image. The state is periodically
saved during optimization, with different filename for different stages
of the optimization.
Parameters
----------
statemaker : Function
A statemaker function. Given arguments `im` (a
:class:`~peri.util.Image`), `pos` (numpy.ndarray), `rad` (ndarray),
and any additional `statemaker_kwargs`, must return a
:class:`~peri.states.ImageState`. There is an example function in
scripts/statemaker_example.py
feature_rad : Int, odd
The particle radius for featuring, as passed to locate_spheres.
actual_rad : Float, optional
The actual radius of the particles. Default is feature_rad
im_name : string, optional
The file name of the image to load. If not set, it is selected
interactively through Tk.
tile : :class:`peri.util.Tile`, optional
The tile of the raw image to be analyzed. Default is None, the
entire image.
invert : Bool, optional
Whether to invert the image for featuring, as passed to trackpy.
Default is True.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
use_full_path : Bool, optional
Set to True to use the full path name for the image. Default
is False.
featuring_params : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``.
Default is ``{}``.
statemaker_kwargs : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
the statemaker function. Default is ``{}``.
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
rz_order : int, optional
If nonzero, the order of an additional augmented rscl(z)
parameter for optimization. Default is 0; i.e. no rscl(z)
optimization.
zscale : Float, optional
The zscale of the image. Default is 1.0
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state.
See Also
--------
feature_from_pos_rad : Using a previous state's globals and
user-provided positions and radii as an initial guess,
completely optimizes a state.
get_particle_featuring : Using a previous state's globals and
positions as an initial guess, completely optimizes a state.
translate_featuring : Use a previous state's globals and
centroids methods for an initial particle guess, completely
optimizes a state.
Notes
-----
Proceeds by centroid-featuring the image for an initial guess of
particle positions, then optimizing the globals + positions until
termination as called in _optimize_from_centroid.
The ``Other Parameters`` are passed to _optimize_from_centroid.
"""
if actual_rad is None:
actual_rad = feature_rad
_, im_name = _pick_state_im_name('', im_name, use_full_path=use_full_path)
im = util.RawImage(im_name, tile=tile)
pos = locate_spheres(im, feature_rad, invert=invert, **featuring_params)
if np.size(pos) == 0:
msg = 'No particles found. Try using a smaller `feature_rad`.'
raise ValueError(msg)
rad = np.ones(pos.shape[0], dtype='float') * actual_rad
s = statemaker(im, pos, rad, **statemaker_kwargs)
RLOG.info('State Created.')
if desc is not None:
states.save(s, desc=desc+'initial')
optimize_from_initial(s, invert=invert, desc=desc, **kwargs)
return s
def feature_from_pos_rad(statemaker, pos, rad, im_name=None, tile=None,
desc='', use_full_path=False, statemaker_kwargs={}, **kwargs):
"""
Gets a completely-optimized state from an image and an initial guess of
particle positions and radii.
The state is periodically saved during optimization, with different
filename for different stages of the optimization. The user can select
the image.
Parameters
----------
statemaker : Function
A statemaker function. Given arguments `im` (a
:class:`~peri.util.Image`), `pos` (numpy.ndarray), `rad` (ndarray),
and any additional `statemaker_kwargs`, must return a
:class:`~peri.states.ImageState`. There is an example function in
scripts/statemaker_example.py
pos : [N,3] element numpy.ndarray.
The initial guess for the N particle positions.
rad : N element numpy.ndarray.
The initial guess for the N particle radii.
im_name : string or None, optional
The filename of the image to feature. Default is None, in which
the user selects the image.
tile : :class:`peri.util.Tile`, optional
A tile of the sub-region of the image to feature. Default is
None, i.e. entire image.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
use_full_path : Bool, optional
Set to True to use the full path name for the image. Default
is False.
statemaker_kwargs : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
the statemaker function. Default is ``{}``.
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
invert : {'guess', True, False}
Whether to invert the image for featuring, as passed to
addsubtract.add_subtract. Default is to guess from the
current state's particle positions.
rz_order : int, optional
If nonzero, the order of an additional augmented rscl(z)
parameter for optimization. Default is 0; i.e. no rscl(z)
optimization.
zscale : Float, optional
The zscale of the image. Default is 1.0
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state.
See Also
--------
get_initial_featuring : Features an image from scratch, using
centroid methods as initial particle locations.
get_particle_featuring : Using a previous state's globals and
positions as an initial guess, completely optimizes a state.
translate_featuring : Use a previous state's globals and
centroids methods for an initial particle guess, completely
optimizes a state.
Notes
-----
The ``Other Parameters`` are passed to _optimize_from_centroid.
Proceeds by centroid-featuring the image for an initial guess of
particle positions, then optimizing the globals + positions until
termination as called in _optimize_from_centroid.
"""
if np.size(pos) == 0:
raise ValueError('`pos` is an empty array.')
elif np.shape(pos)[1] != 3:
raise ValueError('`pos` must be an [N,3] element numpy.ndarray.')
_, im_name = _pick_state_im_name('', im_name, use_full_path=use_full_path)
im = util.RawImage(im_name, tile=tile)
s = statemaker(im, pos, rad, **statemaker_kwargs)
RLOG.info('State Created.')
if desc is not None:
states.save(s, desc=desc+'initial')
optimize_from_initial(s, desc=desc, **kwargs)
return s
def optimize_from_initial(s, max_mem=1e9, invert='guess', desc='', rz_order=3,
min_rad=None, max_rad=None):
"""
Optimizes a state from an initial set of positions and radii, without
any known microscope parameters.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize. It is modified internally and returned.
max_mem : Numeric, optional
The maximum memory for the optimizer to use. Default is 1e9 (bytes)
invert : Bool or `'guess'`, optional
Set to True if the image is dark particles on a bright
background, False otherwise. Used for add-subtract. The
default is to guess from the state's current particles.
desc : String, optional
An additional description to infix for periodic saving along the
way. Default is the null string ``''``.
rz_order : int, optional
``rz_order`` as passed to opt.burn. Default is 3
min_rad : Float or None, optional
The minimum radius to identify a particles as bad, as passed to
add-subtract. Default is None, which picks half the median radii.
If your sample is not monodisperse you should pick a different
value.
max_rad : Float or None, optional
The maximum radius to identify a particles as bad, as passed to
add-subtract. Default is None, which picks 1.5x the median radii.
If your sample is not monodisperse you should pick a different
value.
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state, which is the same as the input ``s`` but
modified in-place.
"""
RLOG.info('Initial burn:')
if desc is not None:
desc_burn = desc + 'initial-burn'
desc_polish = desc + 'addsub-polish'
else:
desc_burn, desc_polish = [None] * 2
opt.burn(s, mode='burn', n_loop=3, fractol=0.1, desc=desc_burn,
max_mem=max_mem, include_rad=False, dowarn=False)
opt.burn(s, mode='burn', n_loop=3, fractol=0.1, desc=desc_burn,
max_mem=max_mem, include_rad=True, dowarn=False)
RLOG.info('Start add-subtract')
rad = s.obj_get_radii()
if min_rad is None:
min_rad = 0.5 * np.median(rad)
if max_rad is None:
max_rad = 1.5 * np.median(rad)
addsub.add_subtract(s, tries=30, min_rad=min_rad, max_rad=max_rad,
invert=invert)
if desc is not None:
states.save(s, desc=desc + 'initial-addsub')
RLOG.info('Final polish:')
d = opt.burn(s, mode='polish', n_loop=8, fractol=3e-4, desc=desc_polish,
max_mem=max_mem, rz_order=rz_order, dowarn=False)
if not d['converged']:
RLOG.warn('Optimization did not converge; consider re-running')
return s
def translate_featuring(state_name=None, im_name=None, use_full_path=False,
**kwargs):
"""
Translates one optimized state into another image where the particles
have moved by a small amount (~1 particle radius).
Returns a completely-optimized state. The user can interactively
selects the initial state and the second raw image. The state is
periodically saved during optimization, with different filename for
different stages of the optimization.
Parameters
----------
state_name : String or None, optional
The name of the initially-optimized state. Default is None,
which prompts the user to select the name interactively
through a Tk window.
im_name : String or None, optional
The name of the new image to optimize. Default is None,
which prompts the user to select the name interactively
through a Tk window.
use_full_path : Bool, optional
Set to True to use the full path of the state instead of
partial path names (e.g. /full/path/name/state.pkl vs
state.pkl). Default is False
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
invert : {True, False, 'guess'}
Whether to invert the image for featuring, as passed to
addsubtract.add_subtract. Default is to guess from the
state's current particles.
rz_order : int, optional
If nonzero, the order of an additional augmented rscl(z)
parameter for optimization. Default is 0; i.e. no rscl(z)
optimization.
do_polish : Bool, optional
Set to False to only optimize the particles and add-subtract.
Default is True, which then runs a polish afterwards.
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state.
See Also
--------
get_initial_featuring : Features an image from scratch, using
centroid methods as initial particle locations.
feature_from_pos_rad : Using a previous state's globals and
user-provided positions and radii as an initial guess,
completely optimizes a state.
get_particle_featuring : Using a previous state's globals and
positions as an initial guess, completely optimizes a state.
Notes
-----
The ``Other Parameters`` are passed to _translate_particles.
Proceeds by:
1. Optimize particle positions only.
2. Optimize particle positions and radii only.
3. Add-subtract missing and bad particles.
4. If polish, optimize the illumination, background, and particles.
5. If polish, optimize everything.
"""
state_name, im_name = _pick_state_im_name(
state_name, im_name, use_full_path=use_full_path)
s = states.load(state_name)
im = util.RawImage(im_name, tile=s.image.tile)
s.set_image(im)
_translate_particles(s, **kwargs)
return s
def get_particles_featuring(feature_rad, state_name=None, im_name=None,
use_full_path=False, actual_rad=None, invert=True, featuring_params={},
**kwargs):
"""
Combines centroid featuring with the globals from a previous state.
Runs trackpy.locate on an image, sets the globals from a previous state,
calls _translate_particles
Parameters
----------
feature_rad : Int, odd
The particle radius for featuring, as passed to locate_spheres.
state_name : String or None, optional
The name of the initially-optimized state. Default is None,
which prompts the user to select the name interactively
through a Tk window.
im_name : String or None, optional
The name of the new image to optimize. Default is None,
which prompts the user to select the name interactively
through a Tk window.
use_full_path : Bool, optional
Set to True to use the full path of the state instead of
partial path names (e.g. /full/path/name/state.pkl vs
state.pkl). Default is False
actual_rad : Float or None, optional
The initial guess for the particle radii. Default is the median
of the previous state.
invert : Bool
Whether to invert the image for featuring, as passed to
addsubtract.add_subtract and locate_spheres. Set to False
if the image is bright particles on a dark background.
Default is True (dark particles on bright background).
featuring_params : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``.
Default is ``{}``.
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
rz_order : int, optional
If nonzero, the order of an additional augmented rscl(z)
parameter for optimization. Default is 0; i.e. no rscl(z)
optimization.
do_polish : Bool, optional
Set to False to only optimize the particles and add-subtract.
Default is True, which then runs a polish afterwards.
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state.
See Also
--------
get_initial_featuring : Features an image from scratch, using
centroid methods as initial particle locations.
feature_from_pos_rad : Using a previous state's globals and
user-provided positions and radii as an initial guess,
completely optimizes a state.
translate_featuring : Use a previous state's globals and
centroids methods for an initial particle guess, completely
optimizes a state.
Notes
-----
The ``Other Parameters`` are passed to _translate_particles.
Proceeds by:
1. Find a guess of the particle positions through centroid methods.
2. Optimize particle positions only.
3. Optimize particle positions and radii only.
4. Add-subtract missing and bad particles.
5. If polish, optimize the illumination, background, and particles.
6. If polish, optimize everything.
"""
state_name, im_name = _pick_state_im_name(
state_name, im_name, use_full_path=use_full_path)
s = states.load(state_name)
if actual_rad is None:
actual_rad = np.median(s.obj_get_radii())
im = util.RawImage(im_name, tile=s.image.tile)
pos = locate_spheres(im, feature_rad, invert=invert, **featuring_params)
_ = s.obj_remove_particle(np.arange(s.obj_get_radii().size))
s.obj_add_particle(pos, np.ones(pos.shape[0])*actual_rad)
s.set_image(im)
_translate_particles(s, invert=invert, **kwargs)
return s
def _pick_state_im_name(state_name, im_name, use_full_path=False):
"""
If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path).
"""
initial_dir = os.getcwd()
if (state_name is None) or (im_name is None):
wid = tk.Tk()
wid.withdraw()
if state_name is None:
state_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select pre-featured state')
os.chdir(os.path.dirname(state_name))
if im_name is None:
im_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select new image')
if (not use_full_path) and (os.path.dirname(im_name) != ''):
im_path = os.path.dirname(im_name)
os.chdir(im_path)
im_name = os.path.basename(im_name)
else:
os.chdir(initial_dir)
return state_name, im_name
def _translate_particles(s, max_mem=1e9, desc='', min_rad='calc',
max_rad='calc', invert='guess', rz_order=0, do_polish=True):
"""
Workhorse for translating particles. See get_particles_featuring for docs.
"""
if desc is not None:
desc_trans = desc + 'translate-particles'
desc_burn = desc + 'addsub_burn'
desc_polish = desc + 'addsub_polish'
else:
desc_trans, desc_burn, desc_polish = [None]*3
RLOG.info('Translate Particles:')
opt.burn(s, mode='do-particles', n_loop=4, fractol=0.1, desc=desc_trans,
max_mem=max_mem, include_rad=False, dowarn=False)
opt.burn(s, mode='do-particles', n_loop=4, fractol=0.05, desc=desc_trans,
max_mem=max_mem, include_rad=True, dowarn=False)
RLOG.info('Start add-subtract')
addsub.add_subtract(s, tries=30, min_rad=min_rad, max_rad=max_rad,
invert=invert)
if desc is not None:
states.save(s, desc=desc + 'translate-addsub')
if do_polish:
RLOG.info('Final Burn:')
opt.burn(s, mode='burn', n_loop=3, fractol=3e-4, desc=desc_burn,
max_mem=max_mem, rz_order=rz_order,dowarn=False)
RLOG.info('Final Polish:')
d = opt.burn(s, mode='polish', n_loop=4, fractol=3e-4, desc=desc_polish,
max_mem=max_mem, rz_order=rz_order, dowarn=False)
if not d['converged']:
RLOG.warn('Optimization did not converge; consider re-running')
def link_zscale(st):
"""Links the state ``st`` psf zscale with the global zscale"""
# FIXME should be made more generic to other parameters and categories
psf = st.get('psf')
psf.param_dict['zscale'] = psf.param_dict['psf-zscale']
psf.params[psf.params.index('psf-zscale')] = 'zscale'
psf.global_zscale = True
psf.param_dict.pop('psf-zscale')
st.trigger_parameter_change()
st.reset()
def finish_state(st, desc='finish-state', invert='guess'):
"""
Final optimization for the best-possible state.
Runs a local add-subtract to capture any difficult-to-feature particles,
then does another set of optimization designed to get to the best
possible fit.
Parameters
----------
st : :class:`peri.states.ImageState`
The state to finish
desc : String, optional
Description to intermittently save the state as, as passed to
state.save. Default is `'finish-state'`.
invert : {'guess', True, False}
Whether to invert the image for featuring, as passed to
addsubtract.add_subtract. Default is to guess from the
state's current particles.
See Also
--------
`peri.opt.addsubtract.add_subtract_locally`
`peri.opt.optimize.finish`
"""
for minmass in [None, 0]:
for _ in range(3):
npart, poses = addsub.add_subtract_locally(st, region_depth=7,
minmass=minmass, invert=invert)
if npart == 0:
break
opt.finish(st, n_loop=1, separate_psf=True, desc=desc, dowarn=False)
opt.burn(st, mode='polish', desc=desc, n_loop=2, dowarn=False)
d = opt.finish(st, desc=desc, n_loop=4, dowarn=False)
if not d['converged']:
RLOG.warn('Optimization did not converge; consider re-running')
|
peri-source/peri
|
peri/runner.py
|
Python
|
mit
| 29,237 | 0.001163 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""DataGroupXanes: work with XANES data sets
============================================
- DataGroup
- DataGroup1D
- DataGroupXanes
"""
from .datagroup import MODNAME
from .datagroup1D import DataGroup1D
class DataGroupXanes(DataGroup1D):
"""DataGroup for XANES scans"""
def __init__(self, kwsd=None, _larch=None):
super(DataGroupXanes, self).__init__(kwsd=kwsd, _larch=_larch)
### LARCH ###
def datagroup_xan(kwsd=None, _larch=None):
"""utility to perform wrapped operations on a list of XANES data
groups"""
return DataGroupXanes(kwsd=kwsd, _larch=_larch)
def registerLarchPlugin():
return (MODNAME, {'datagroup_xan' : datagroup_xan})
if __name__ == '__main__':
pass
|
maurov/xraysloth
|
sloth/collects/datagroup_xan.py
|
Python
|
bsd-3-clause
| 767 | 0.009126 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Assign Addresses to interfaces """
from datetime import datetime
import re
from sqlalchemy import (Column, Integer, DateTime, ForeignKey, Sequence,
UniqueConstraint, Index)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relation, backref, deferred, validates
from sqlalchemy.sql import and_
from aquilon.exceptions_ import InternalError
from aquilon.aqdb.column_types import IP, AqStr, EmptyStr
from aquilon.aqdb.model import Base, Interface, ARecord, Network
from aquilon.aqdb.model.a_record import dns_fqdn_mapper
_TN = 'address_assignment'
_ABV = 'addr_assign'
class AddressAssignment(Base):
"""
Assignment of IP addresses to network interfaces.
It's kept as an association map to model the linkage, since we need to
have maximum ability to provide potentially complex configuration
scenarios, such as advertising certain VIP addresses from some, but not
all of the network interfaces on a machine (to be used for backup
servers, cluster filesystem servers, NetApp filers, etc.). While in
most cases we can assume VIPs are broadcast out all interfaces on the
box we still need to have the underlying model as the more complex
many to many relationship implemented here.
"""
__tablename__ = _TN
_label_check = re.compile('^[a-z0-9]{0,16}$')
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
assignment_type = Column(AqStr(32), nullable=False)
interface_id = Column(ForeignKey(Interface.id, ondelete='CASCADE'),
nullable=False)
label = Column(EmptyStr(16), nullable=False)
ip = Column(IP, nullable=False)
network_id = Column(ForeignKey(Network.id), nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
interface = relation(Interface, innerjoin=True,
backref=backref('assignments', order_by=[label],
cascade='all, delete-orphan'))
# Setting viewonly is very important here as we do not want the removal of
# an AddressAssignment record to change the linked DNS record(s)
# Can't use backref or back_populates due to the different mappers
dns_records = relation(dns_fqdn_mapper,
primaryjoin=and_(network_id == dns_fqdn_mapper.c.network_id,
ip == dns_fqdn_mapper.c.ip),
foreign_keys=[dns_fqdn_mapper.c.ip,
dns_fqdn_mapper.c.network_id],
viewonly=True)
fqdns = association_proxy('dns_records', 'fqdn')
network = relation(Network, innerjoin=True,
backref=backref('assignments', passive_deletes=True,
order_by=[ip]))
__table_args__ = (UniqueConstraint(interface_id, ip),
UniqueConstraint(interface_id, label),
Index("%s_network_ip_idx" % _ABV, network_id, ip))
__mapper_args__ = {'polymorphic_on': assignment_type,
'polymorphic_identity': 'standard'}
@property
def logical_name(self):
"""
Compute an OS-agnostic name for this interface/address combo.
BIG FAT WARNING: do _NOT_ assume that this name really exist on the
host!
There are external systems like DSDB that can not handle having multiple
addresses on the same interface. Because of that this function generates
an unique name for every interface/address tuple.
"""
# Use the Linux naming convention because people are familiar with that
# and it is easy to parse if needed
name = self.interface.name
if self.label:
name += ":%s" % self.label
return name
@property
def is_shared(self):
return False
def __init__(self, label=None, network=None, **kwargs):
if not label:
label = ""
elif not self._label_check.match(label): # pragma: no cover
raise ValueError("Illegal address label '%s'." % label)
# Right now network_id is nullable due to how refresh_network works, so
# verify the network here
if not network: # pragma: no cover
raise InternalError("AddressAssignment needs a network")
super(AddressAssignment, self).__init__(label=label, network=network,
**kwargs)
def __repr__(self):
return "<Address %s on %s/%s>" % (self.ip,
self.interface.hardware_entity.label,
self.logical_name)
# Assigned to external classes here to avoid circular dependencies.
Interface.addresses = association_proxy('assignments', 'ip')
# Can't use backref or back_populates due to the different mappers
# This relation gives us the two other sides of the triangle mentioned above
# Do NOT consider the DNS environment here - whether the IP is used or not does
# not depend on its visibility in DNS
ARecord.assignments = relation(
AddressAssignment,
primaryjoin=and_(AddressAssignment.network_id == ARecord.network_id,
AddressAssignment.ip == ARecord.ip),
foreign_keys=[AddressAssignment.ip, AddressAssignment.network_id],
viewonly=True)
class SharedAddressAssignment(AddressAssignment):
priority = Column(Integer)
# As priority is an additional col we cannot make it non-null
@validates('priority')
def _validate_priority(self, key, value): # pylint: disable=W0613
if not value:
raise ValueError("Shared addresses require a priority")
return value
@property
def is_shared(self):
return True
__mapper_args__ = {'polymorphic_identity': 'shared'}
|
quattor/aquilon
|
lib/aquilon/aqdb/model/address_assignment.py
|
Python
|
apache-2.0
| 6,715 | 0.000596 |
from __future__ import print_function
import time
import argparse
import grpc
from jaeger_client import Config
from grpc_opentracing import open_tracing_client_interceptor
from grpc_opentracing.grpcext import intercept_channel
import command_line_pb2
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='trivial-client')
tracer = config.initialize_tracer()
tracer_interceptor = open_tracing_client_interceptor(
tracer, log_payloads=args.log_payloads)
channel = grpc.insecure_channel('localhost:50051')
channel = intercept_channel(channel, tracer_interceptor)
stub = command_line_pb2.CommandLineStub(channel)
response = stub.Echo(command_line_pb2.CommandRequest(text='Hello, hello'))
print(response.text)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
run()
|
johnbelamaric/themis
|
vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/trivial/trivial_client.py
|
Python
|
apache-2.0
| 1,218 | 0 |
from collections import Counter
c = Counter(input())
print(min(c['t'], c['r'], c['e']//2))
|
knuu/competitive-programming
|
yukicoder/yuki279.py
|
Python
|
mit
| 91 | 0 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to install ARM root image for cross building of ARM chrome on linux.
This script can be run manually but is more often run as part of gclient
hooks. When run from hooks this script should be a no-op on non-linux
platforms.
The sysroot image could be constructed from scratch based on the current
state or precise/arm but for consistency we currently use a pre-built root
image which was originally designed for building trusted NaCl code. The image
will normally need to be rebuilt every time chrome's build dependancies are
changed.
Steps to rebuild the arm sysroot image:
- cd $SRC/native_client
- ./tools/trusted_cross_toolchains/trusted-toolchain-creator.armel.precise.sh \
UpdatePackageLists
- ./tools/trusted_cross_toolchains/trusted-toolchain-creator.armel.precise.sh \
BuildJail $SRC/out/arm-sysroot.tar.gz
- gsutil cp -a public-read $SRC/out/arm-sysroot.tar.gz \
nativeclient-archive2/toolchain/$NACL_REV/sysroot-arm-trusted.tgz
"""
import os
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://storage.googleapis.com'
URL_PATH = 'nativeclient-archive2/toolchain'
REVISION = 13035
TARBALL = 'sysroot-arm-trusted.tgz'
def main(args):
if '--linux-only' in args:
# This argument is passed when run from the gclient hooks.
# In this case we return early on non-linux platforms
# or if GYP_DEFINES doesn't include target_arch=arm
if not sys.platform.startswith('linux'):
return 0
if "target_arch=arm" not in os.environ.get('GYP_DEFINES', ''):
return 0
src_root = os.path.dirname(os.path.dirname(SCRIPT_DIR))
sysroot = os.path.join(src_root, 'arm-sysroot')
url = "%s/%s/%s/%s" % (URL_PREFIX, URL_PATH, REVISION, TARBALL)
stamp = os.path.join(sysroot, ".stamp")
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
print "ARM root image already up-to-date: %s" % sysroot
return 0
print "Installing ARM root image: %s" % sysroot
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, TARBALL)
curl = ['curl', '--fail', '-L', url, '-o', tarball]
if os.isatty(sys.stdout.fileno()):
curl.append('--progress')
else:
curl.append('--silent')
subprocess.check_call(curl)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
AndroidOpenDevelopment/android_external_chromium_org
|
build/linux/install-arm-sysroot.py
|
Python
|
bsd-3-clause
| 2,718 | 0.008462 |
from .context import CorpusContext
from .audio import AudioContext
from .importable import ImportContext
from .lexical import LexicalContext
from .pause import PauseContext
from .utterance import UtteranceContext
from .structured import StructuredContext
from .syllabic import SyllabicContext
from .spoken import SpokenContext
|
samihuc/PolyglotDB
|
polyglotdb/corpus/__init__.py
|
Python
|
mit
| 328 | 0 |
from SCons.Script import *
def exists(env):
return (env["PLATFORM"]=="win32")
def ConvertNewlines(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return None
def ConvertNewlinesB(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write("\xef\xbb\xbf")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return None
def generate(env):
env["BUILDERS"]["ConvertNewlines"]=Builder(action=ConvertNewlines,suffix=".txt")
env["BUILDERS"]["ConvertNewlinesB"]=Builder(action=ConvertNewlinesB,suffix=".txt")
|
iakov/RHVoice
|
site_scons/site_tools/newlines.py
|
Python
|
gpl-3.0
| 816 | 0.035539 |
import requests
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
@csrf_exempt
@require_http_methods(["POST"])
def post_service_request(request):
payload = request.POST.copy()
outgoing = payload.dict()
if outgoing.get("internal_feedback", False):
if "internal_feedback" in outgoing:
del outgoing["internal_feedback"]
api_key = settings.OPEN311["INTERNAL_FEEDBACK_API_KEY"]
else:
api_key = settings.OPEN311["API_KEY"]
outgoing["api_key"] = api_key
url = settings.OPEN311["URL_BASE"]
session = requests.Session()
# Modify parameters for request in case of City of Turku
if "smbackend_turku" in settings.INSTALLED_APPS:
outgoing.pop("service_request_type")
outgoing.pop("can_be_published")
outgoing["address_string"] = "null"
outgoing["service_code"] = settings.OPEN311["SERVICE_CODE"]
r = session.post(url, data=outgoing)
if r.status_code != 200:
return HttpResponseBadRequest()
return HttpResponse(r.content, content_type="application/json")
|
City-of-Helsinki/smbackend
|
services/views.py
|
Python
|
agpl-3.0
| 1,229 | 0 |
from uuid import uuid4
from django.test import TestCase
from casexml.apps.case.cleanup import claim_case, get_first_claim
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.util import post_case_blocks
from corehq.apps.case_search.models import CLAIM_CASE_TYPE
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.ota.utils import get_restore_user
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.models import CommCareCase
DOMAIN = 'test_domain'
USERNAME = 'lina.stern@ras.ru'
PASSWORD = 'hemato-encephalic'
# https://en.wikipedia.org/wiki/Lina_Stern
def index_to_dict(instance):
keys = ('identifier', 'referenced_type', 'referenced_id', 'relationship')
return {k: str(getattr(instance, k)) for k in keys}
class CaseClaimTests(TestCase):
def setUp(self):
super(CaseClaimTests, self).setUp()
self.domain = create_domain(DOMAIN)
self.user = CommCareUser.create(DOMAIN, USERNAME, PASSWORD, None, None)
self.restore_user = get_restore_user(DOMAIN, self.user, None)
self.host_case_id = uuid4().hex
self.host_case_name = 'Dmitri Bashkirov'
self.host_case_type = 'person'
self.create_case()
def tearDown(self):
self.user.delete(self.domain.name, deleted_by=None)
self.domain.delete()
super(CaseClaimTests, self).tearDown()
def create_case(self):
case_block = CaseBlock.deprecated_init(
create=True,
case_id=self.host_case_id,
case_name=self.host_case_name,
case_type=self.host_case_type,
owner_id='in_soviet_russia_the_case_owns_you',
).as_xml()
post_case_blocks([case_block], {'domain': DOMAIN})
def assert_claim(self, claim=None, claim_id=None):
if claim is None:
claim_ids = CommCareCase.objects.get_case_ids_in_domain(DOMAIN, CLAIM_CASE_TYPE)
self.assertEqual(len(claim_ids), 1)
claim = CommCareCase.objects.get_case(claim_ids[0], DOMAIN)
if claim_id:
self.assertEqual(claim.case_id, claim_id)
self.assertEqual(claim.name, self.host_case_name)
self.assertEqual(claim.owner_id, self.user.user_id)
self.assertEqual([index_to_dict(i) for i in claim.indices], [{
'identifier': 'host',
'referenced_type': 'person',
'referenced_id': self.host_case_id,
'relationship': 'extension',
}])
def test_claim_case(self):
"""
claim_case should create an extension case
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
self.assert_claim(claim_id=claim_id)
def test_claim_case_id_only(self):
"""
claim_case should look up host case details if only ID is passed
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id)
self.assert_claim(claim_id=claim_id)
def test_first_claim_one(self):
"""
get_first_claim should return one claim
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assert_claim(claim, claim_id)
def test_first_claim_none(self):
"""
get_first_claim should return None if not found
"""
claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assertIsNone(claim)
def test_closed_claim(self):
"""
get_first_claim should return None if claim case is closed
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
self._close_case(claim_id)
first_claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assertIsNone(first_claim)
def test_claim_case_other_domain(self):
malicious_domain = 'malicious_domain'
domain_obj = create_domain(malicious_domain)
self.addCleanup(domain_obj.delete)
claim_id = claim_case(malicious_domain, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
with self.assertRaises(CaseNotFound):
CommCareCase.objects.get_case(claim_id, malicious_domain)
def _close_case(self, case_id):
case_block = CaseBlock.deprecated_init(
create=False,
case_id=case_id,
close=True
).as_xml()
post_case_blocks([case_block], {'domain': DOMAIN})
|
dimagi/commcare-hq
|
corehq/apps/ota/tests/test_claim.py
|
Python
|
bsd-3-clause
| 4,929 | 0.00142 |
# -*- coding: utf-8 -*-
# petgfunctions.py - This python "helper" script holds a lot of functions
# Copyright (c) 2012-2014 Harry van der Wolf. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence as published
# by the Free Software Foundation, either version 2 of the Licence, or
# version 3 of the Licence, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public Licence for more details.
# This file is part of pyexiftoolgui.
# pyexiftoolgui is a pySide script program that reads and writes
# gps tags from/to files. It can use a "reference" image to write the
# gps tags to a multiple set of files that are taken at the same
# location.
# pyexiftoolgui is a graphical frontend for the open source
# command line tool exiftool by Phil Harvey, but it's not
# a complete exiftool gui: not at all.
import os, sys, platform, shlex, subprocess, time, re, string, datetime, math
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
import programinfo
import programstrings
import petgfilehandling
from ui_create_args import Ui_Dialog_create_args
from ui_export_metadata import Ui_Dialog_export_metadata
from ui_remove_metadata import Ui_Dialog_remove_metadata
from ui_modifydatetime import Ui_DateTimeDialog
from ui_syncdatetime import Ui_SyncDateTimeTagsDialog
#------------------------------------------------------------------------
# All kind of functions
###################################################################################################################
# Start of Startup checks
###################################################################################################################
def remove_workspace( self ):
# Remove our temporary workspace
# try:
# fls = os.remove(self.tmpworkdir + "/*")
# except:
# print("No files in " + self.tmpworkdir + " or no folder at all")
# try:
# fldr = os.rmdir(self.tmpworkdir)
# except:
# print("Couldn't remove folder")
print(self.tmpworkdir)
if self.OSplatform == "Windows":
self.tmpworkdir = self.tmpworkdir.replace("/", "\\")
command_line = "rmdir /S /Q " + self.tmpworkdir
else:
command_line = "rm -rf " + self.tmpworkdir
p = os.system(command_line)
#args = shlex.split(command_line)
#print args
#p = subprocess.call(args, shell=True)
if p == 0:
print(("Removed " + self.tmpworkdir + " and it contents."))
else:
print(("Error removing " + self.tmpworkdir + " and it contents."))
def is_executable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def check_for_program(program):
exists = False
for path in os.environ["PATH"].split(os.pathsep):
#program = program.replace("\"", "")
path_plus_program = os.path.join(path, program)
#print("path_plus_program " + str(path_plus_program))
if is_executable(path_plus_program):
#print "program " + program + " found"
exists = True
return exists
# End of function check_for_program and is_executable (mini sub for check_for_program)
def exiftool_version_level_text(self):
if float(self.exiftoolversion) < 9.07:
self.statusbar.showMessage("I will disable the GPano options as exiftool >=9.07 is required. You have " + str(self.exiftoolversion))
exiftoolleveltext = "Your exiftool version is " + str(self.exiftoolversion) + " . You need >=9.07 to write to images.\n"
exiftoolleveltext += "Exiftool and therefore pyExifToolGUI can read the tags. See the View Data tab."
self.lbl_exiftool_leveltext.setText(exiftoolleveltext)
elif float(self.exiftoolversion) < 9.09:
#else:
exiftoolleveltext = "Your exiftool version is " + str(self.exiftoolversion) + " . Tags marked with * are obligatory. "
exiftoolleveltext += "\"Pose Heading Degrees\" is necessary to make it also function in Google Maps.\n Tags marked with *** are only writable with exiftool >= 9.09"
self.lbl_exiftool_leveltext.setText(exiftoolleveltext)
self.statusbar.showMessage("Your exiftoolversion is " + str(self.exiftoolversion))
else:
exiftoolleveltext = "Your exiftool version is " + str(self.exiftoolversion) + " . Tags marked with * are obligatory. "
exiftoolleveltext += "\"Pose Heading Degrees\" is necessary to make it also function in Google Maps. Tags marked with *** are only writable with exiftool >= 9.09"
self.lbl_exiftool_leveltext.setText(exiftoolleveltext)
self.statusbar.showMessage("Your exiftoolversion is " + str(self.exiftoolversion))
#print "exiftoolversion : " + self.exiftoolversion
def find_on_path(tool):
""" Find the first occurrence of a tool on the path."""
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
path = os.path.join(path, tool)
if os.path.exists(path):
return path
def tool_check( self ):
# We need this startup check as long as we don't have a package
# that deals with dependencies
if self.alternate_exiftool == True:
self.exiftoolprog = self.exiftooloption.text()
else:
self.exiftoolprog = "exiftool"
if (self.OSplatform in ("Windows", "win32")):
self.exiftoolprog = find_on_path("exiftool.exe")
elif self.OSplatform == "Darwin":
self.exiftoolprog = find_on_path("exiftool")
#else:
# self.exiftoolprog = find_on_path("exiftool")
# Check for exiftool, based on the setting or no setting above
if (self.OSplatform in ("Windows", "win32")):
if ("exiftool.exe" in self.exiftoolprog) or ("Exiftool.exe" in self.exiftoolprog) or not self.exiftoolprog:
#self.exiftool_dir = os.path.join(self.realfile_dir, "exiftool", "exiftool.exe")
#self.exiftoolprog = self.exiftool_dir + "\exiftool.exe"
if not os.path.isfile(self.exiftoolprog):
configure_message = "exiftool is missing or incorrectly configured in Preferences!\n"
configure_message += "This tool is an absolute must have!\nPlease set the correct location or install exiftool first.\n\n"
configure_message += "If your exiftool is named \"exiftool(-k).exe\", rename it to \"exiftool.exe\""
ret = QMessageBox.critical(self, "exiftool is missing or incorrectly configured", configure_message)
result = self.select_exiftool()
#print str(result)
if result == "":
ret = QMessageBox.critical(self, "Canceled exiftool selection", "You canceled the exiftool selection.\nThe program will quit!\nFirst install exiftool or restart this program and select the correct exiftool.\nI will now (try to) open the exiftool website.")
try:
webbrowser.open("http://www.sno.phy.queensu.ca/~phil/exiftool/")
finally:
sys.exit()
else:
self.exiftoolprog = result
#Check exiftool version
args = '"' + self.exiftoolprog + '" -ver'
self.exiftoolversion = subprocess.check_output(args, shell=True)
# now check for the supported languages
args = '"' + self.exiftoolprog + '" -lang'
self.exiftoollanguages = subprocess.check_output(args, shell=True)
else:
if not check_for_program(self.exiftoolprog):
configure_message = "exiftool is missing or incorrectly configured in Preferences!\n"
configure_message += "This tool is an absolute must have!\nPlease set the correct location or install exiftool first."
ret = QMessageBox.critical(self, "exiftool is missing or incorrectly configured", configure_message)
result = self.select_exiftool()
#print str(result)
if result == "":
ret = QMessageBox.critical(self, "Canceled exiftool selection", "You canceled the exiftool selection.\nThe program will quit!\nFirst install exiftool or restart this program and select the correct exiftool.\nI will now (try to) open the exiftool website.")
try:
webbrowser.open("http://www.sno.phy.queensu.ca/~phil/exiftool/")
finally:
sys.exit()
else:
self.exiftoolprog = result
#Check exiftool version
command_line = '"' + self.exiftoolprog + '" -ver'
args = shlex.split(command_line)
self.exiftoolversion = subprocess.check_output(args)
# now check for the supported languages
command_line = '"' + self.exiftoolprog + '" -lang'
args = shlex.split(command_line)
self.exiftoollanguages = subprocess.check_output(args)
# remove last character which is the final ending \n (where \ is only the escape character)
self.exiftoolversion = self.exiftoolversion[:-1]
exiftool_version_level_text(self)
# End of function tool_check
def exitool_languages(self):
# First remove first line
self.exiftoollanguages = self.exiftoollanguages.splitlines(True)[1:]
dropdownlanguages = []
self.longlanguages = []
self.longlanguages.append(" ")
for language in self.exiftoollanguages:
try:
shortlang, longlang = re.split(' - ',language,1)
shortlang = shortlang.strip()
dropdownlanguages.append(shortlang)
longlang = longlang.strip()
self.longlanguages.append(longlang.decode('utf-8'))
#print("shortlang: " + shortlang + "; longlang: " + longlang)
except:
print("last character doesn't work. Only here in case that happens.")
self.comboBox_languages.addItems(dropdownlanguages)
###################################################################################################################
# End of Startup checks
###################################################################################################################
#------------------------------------------------------------------------
# General help messagebox
def help_mbox(self,helptitle, helptext):
self.helpmbox = QMessageBox()
self.helpmbox.setWindowTitle(helptitle)
self.helpmbox.setText(helptext)
ret = self.helpmbox.exec_()
#------------------------------------------------------------------------
# language combobox changed
def comboBox_languageschanged(self):
# if the language in the box gets changed, display the long text.
self.label_longlanguage.setText("Display language for tags and info: " + self.longlanguages[self.comboBox_languages.currentIndex()])
#------------------------------------------------------------------------
# image functions
def images_dialog(self, qApp):
loadedimages = QFileDialog(self)
qApp.processEvents()
loadedimages.setFileMode(QFileDialog.ExistingFiles)
if self.LineEdit_def_startupfolder.text() == "":
if self.OSplatform == "Darwin":
loadedimages.setDirectory(os.path.expanduser('~/Pictures'))
elif self.OSplatform == "Linux":
loadedimages.setDirectory(os.path.expanduser('~/Pictures'))
elif self.OSplatform == "Windows":
loadedimages.setDirectory(os.path.expanduser('~/My Pictures'))
else:
# User has obviously specified a startup folder
loadedimages.setDirectory(self.LineEdit_def_startupfolder.text())
qApp.processEvents()
self.statusbar.showMessage("Loading images")
qApp.processEvents()
# loadedimages.setNameFilter("image files (*.jpg *.tif *.tiff *.png)\nAll Files (*.*)")
loadedimages.setNameFilter("image files (" + programstrings.SUPPORTEDIMAGES + ")\nsupported formats (" + programstrings.SUPPORTEDFORMATS + ")\nAll Files (*.*)")
loadedimages.setViewMode(QFileDialog.Detail)
if loadedimages.exec_():
fileNames = loadedimages.selectedFiles()
qApp.processEvents()
else:
# user canceled
self.statusbar.showMessage("you canceled loading the images.")
fileNames = ""
return (fileNames)
def imagegridsizes(self, numImages):
colswidth = 100
cols = self.MaintableWidget.width()/float(colswidth+8.0)
return cols, colswidth
def loadimages(self ,fileNames, qApp):
print("Loaded images = " + str(fileNames))
print("Loaded %d images " % len(fileNames))
if len(fileNames) < 1:
# user canceled loading images
if self.DebugMsg:
print("user canceled loading images")
else:
cols, colwidth = imagegridsizes(self, len(fileNames))
print(imagegridsizes(self, len(fileNames)))
self.fileNames = fileNames
imagestring = ""
rowcounter = 0
total_images = len(fileNames)
self.progressbar.setRange(0, total_images)
self.progressbar.setValue(0)
self.progressbar.show()
qApp.processEvents()
self.MaintableWidget.clearContents()
if self.images_view.currentText() == "by cells":
self.MaintableWidget.setSelectionBehavior(QAbstractItemView.SelectItems)
self.MaintableWidget.setRowCount(math.ceil(len(fileNames)/cols))
self.MaintableWidget.setColumnCount(cols)
cols = int(cols)
else:
self.MaintableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self.MaintableWidget.setRowCount(len(fileNames))
self.MaintableWidget.setColumnCount(2)
self.MaintableWidget.setColumnWidth(0,100)
self.MaintableWidget.setColumnWidth(1,225)
for loadedimage in fileNames:
if self.DebugMsg:
print(rowcounter)
print(loadedimage + "\n")
folder,imagefile = os.path.split(loadedimage)
#self.MaintableWidget.insertRow(rowcounter)
if self.images_view.currentText() == "by cells":
pass
else:
qtablefilename = QTableWidgetItem(imagefile)
self.MaintableWidget.setItem(rowcounter, 1, qtablefilename)
if self.pref_thumbnail_preview.isChecked():
# Now create the thumbnail to be displayed
thumbnail = QLabel(self)
thumbnail.setMargin(8)
image = QImage(loadedimage)
thumbnail.setPixmap(QPixmap.fromImage(image))
thumbnail.setScaledContents(True)
thumbnail.setToolTip(imagefile)
# Fill the table
if self.images_view.currentText() == "by cells":
self.MaintableWidget.setColumnWidth(int(rowcounter%cols),colwidth)
self.MaintableWidget.setRowHeight(int(rowcounter/cols),(colwidth*0.75))
self.MaintableWidget.setCellWidget(int(rowcounter/cols), int(rowcounter%cols), thumbnail)
else:
self.MaintableWidget.setRowHeight(rowcounter,75)
self.MaintableWidget.setCellWidget(rowcounter, 0, thumbnail)
else:
# Fill the table when thumbs are disabled
dis_thumb_string = QTableWidgetItem("disabled")
if self.images_view.currentText() == "by cells":
self.MaintableWidget.setItem(int(rowcounter/cols), int(rowcounter%cols), dis_thumb_string)
else:
self.MaintableWidget.setItem(rowcounter, 0, dis_thumb_string)
rowcounter += 1
self.progressbar.setValue(rowcounter)
self.statusbar.showMessage("Creating thumbnail of: " + os.path.basename(loadedimage))
qApp.processEvents()
imagestring += loadedimage + " "
self.image_folder = folder
self.MaintableWidget.setToolTip('image(s) folder: ' + folder)
if self.allDebugMsg:
QMessageBox.about(self, "file names", "images found \n %s" % imagestring)
# After loading the photos we will enable buttons and events
self.activate_buttons_events()
def imageinfo(self, qApp):
self.statusbar.showMessage("")
if self.images_view.currentText() == "by cells":
selected_row = self.MaintableWidget.currentRow()
selected_col = self.MaintableWidget.currentColumn()
selected_image = "\"" + self.fileNames[int((self.MaintableWidget.columnCount()*selected_row)+selected_col)] + "\""
else:
selected_row = self.MaintableWidget.currentRow()
selected_image = "\"" + self.fileNames[selected_row] + "\""
if self.radioButton_all.isChecked():
exiftool_params = ""
arguments = " -a "
header = "all tags"
elif self.radioButton_exif.isChecked():
exiftool_params = "-exif:all"
header = "EXIF tags"
elif self.radioButton_xmp.isChecked():
exiftool_params = "-xmp:all"
header = "XMP tags"
elif self.radioButton_iptc.isChecked():
exiftool_params = "-iptc:all"
header = "IPTC tags"
elif self.radioButton_iccprofile.isChecked():
exiftool_params = "-icc_profile:all"
header = "ICC profile tags"
elif self.radioButton_gps.isChecked():
exiftool_params = "-gps:all -xmp:GPSLatitude -xmp:GPSLongitude -xmp:Location -xmp:Country -xmp:State -xmp:City"
arguments = " -a -gps:all -xmp:GPSLatitude -xmp:GPSLongitude -xmp:Location -xmp:Country -xmp:State -xmp:City"
header = "GPS tags"
elif self.radioButton_gpano.isChecked():
exiftool_params = " -xmp:CroppedAreaImageHeightPixels -xmp:CroppedAreaImageWidthPixels -xmp:CroppedAreaLeftPixels -xmp:CroppedAreaTopPixels -xmp:FullPanoHeightPixels -xmp:FullPanoWidthPixels -xmp:ProjectionType -xmp:UsePanoramaViewer -xmp:PoseHeadingDegrees -xmp:InitialViewHeadingDegrees -xmp:InitialViewPitchDegrees -xmp:InitialViewRollDegrees -xmp:StitchingSoftware -xmp:InitialHorizontalFOVDegrees"
arguments = " -xmp:CroppedAreaImageHeightPixels -xmp:CroppedAreaImageWidthPixels -xmp:CroppedAreaLeftPixels -xmp:CroppedAreaTopPixels -xmp:FullPanoHeightPixels -xmp:FullPanoWidthPixels -xmp:ProjectionType -xmp:UsePanoramaViewer -xmp:PoseHeadingDegrees -xmp:InitialViewHeadingDegrees -xmp:InitialViewPitchDegrees -xmp:InitialViewRollDegrees -xmp:StitchingSoftware -xmp:InitialHorizontalFOVDegrees"
header = "GPano tags"
elif self.radioButton_makernotes.isChecked():
exiftool_params = "-makernotes:all"
header = "makernotes tags"
# Check if we need to display it in a specific language
if (self.comboBox_languages.currentText() == " ") or (self.comboBox_languages.currentText() == ""):
ETlang = ""
else:
ETlang = " -lang " + self.comboBox_languages.currentText() + " "
if self.OSplatform == "Windows":
selected_image = selected_image.replace("/", "\\")
args = "\"" + self.exiftoolprog + "\" -a " + ETlang + exiftool_params + " " + selected_image
p = subprocess.check_output(args, universal_newlines=True, shell=True)
else:
command_line = "\"" + self.exiftoolprog + "\" -a " + ETlang + exiftool_params + " " + selected_image
args = shlex.split(command_line)
p = subprocess.check_output(args, universal_newlines=True)
if len(p) == 0:
p = header + " : No data available\n"
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
self.exiftableWidget.clearContents()
self.exiftableWidget.setRowCount(0)
rowcounter = 0
for line in p_lines:
try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
descriptor = descriptor.decode('utf-8')
description = description.strip()
description = description.decode('utf-8')
#print "descriptor " + descriptor + " ;description " + description
self.exiftableWidget.insertRow(rowcounter)
self.exiftableWidget.setColumnWidth(0,225)
self.exiftableWidget.setColumnWidth(1,425)
self.exiftableWidget.setItem(rowcounter, 0, QTableWidgetItem(descriptor))
self.exiftableWidget.setItem(rowcounter, 1, QTableWidgetItem(description))
rowcounter += 1
qApp.processEvents()
except:
print("always the last line that doesn't work")
def copy_defaults(self, qApp, category):
if category == "exif":
self.exif_Artist.setText(self.def_creator.text())
self.exif_Copyright.setText(self.def_copyright.text())
elif category == "xmp":
self.xmp_creator.setText(self.def_creator.text())
self.xmp_rights.setText(self.def_copyright.text())
elif category == "iptc":
self.iptc_creator.setText(self.def_creator.text())
self.iptc_rights.setText(self.def_copyright.text())
#------------------------------------------------------------------------
# Edit -> Gps tab and actions
def convertLatLong(self, direction):
# only "int" at the latest moment or calculations go wrong
if direction == 'dms2d':
# first latitude
# Note that "South" latitudes and "West" longitudes convert to negative decimal numbers
if int(self.calc_lat_sec.text()) in range(0, 60):
latd = float(self.calc_lat_sec.text()) / float(60)
else:
ret = QMessageBox.critical(self, "seconds error", "seconds must fall in the range 0 to <60")
if int(self.calc_lat_min.text()) in range(0, 60):
latd = (int(self.calc_lat_min.text()) + latd) / float(60)
else:
ret = QMessageBox.critical(self, "minutes error", "minutes must fall in the range 0 to <60")
# check whether lat degrees falls within 0 and 89
if int(self.calc_lat_deg.text()) in range(0, 90):
latd = latd + int(self.calc_lat_deg.text())
else:
ret = QMessageBox.critical(self, "degrees error", "Latitude degrees must fall in the range 0 to 89")
if self.radioButton_calc_gpsS.isChecked(): # South
# this means a negative decimal latitude
latd = -(latd)
self.calc_latitude.setText(str(round(latd,6)))
# now longitude
if int(self.calc_lon_sec.text()) in range(0, 60):
lond = float(self.calc_lon_sec.text()) / float(60)
else:
ret = QMessageBox.critical(self, "seconds error", "seconds must fall in the range 0 to <60")
if int(self.calc_lon_min.text()) in range(0, 60):
lond = (int(self.calc_lon_min.text()) + lond) / float(60)
else:
ret = QMessageBox.critical(self, "minutes error", "minutes must fall in the range 0 to <60")
# check whether lon degrees falls within 0 and 179
if int(self.calc_lon_deg.text()) in range(0, 179):
lond = lond + int(self.calc_lon_deg.text())
else:
ret = QMessageBox.critical(self, "degrees error", "Longitude degrees must fall in the range 0 to 179")
if self.radioButton_calc_gpsW.isChecked(): # West
lond = -(lond)
# Update value in decimal latituted field
self.calc_longitude.setText(str(round(lond,6)))
else: # direction is d2dms
# First latitude
latitude = self.calc_latitude.text()
# First check on "," in string
if "," in latitude:
latitude = latitude.replace(',','.')
self.calc_latitude.setText(latitude)
# Now check whether we have a "." in our strong. If not we have an integer and re is not necessary
if "." in latitude:
latint, latremain = re.split('\.', latitude)
else:
latint = latitude
if int(latint) in range (-89, 89):
if (int(latint)) < 0:
latint = -(int(latint))
latitude = -(float(latitude))
self.radioButton_calc_gpsS.setChecked(1)
else:
self.radioButton_calc_gpsN.setChecked(1)
deg = str(latint)
self.calc_lat_deg.setText(deg)
min = (float(latitude) - int(deg)) * 60
self.calc_lat_min.setText(str(int(min)))
sec = int(round(((float(min) - int(min)) *60), 0))
self.calc_lat_sec.setText(str(sec))
else:
ret = QMessageBox.critical(self, "degrees error", "Latitude decimal must fall in the range -90 < degr < 90")
# Now longitude
longitude = self.calc_longitude.text()
# First check on "," in string
if "," in longitude:
longitude = longitude.replace(',','.')
self.calc_longitude.setText(longitude)
# Now check whether we have a "." in our strong. If not we have an integer and re is not necessary
if "." in longitude:
lonint, lonremain = re.split('\.',(self.calc_longitude.text()))
else:
lonint = longitude
if int(lonint) in range (-179, 179):
if (int(lonint)) < 0:
lonint = -(int(lonint))
longitude = -(float(longitude))
self.radioButton_calc_gpsW.setChecked(1)
else:
self.radioButton_calc_gpsE.setChecked(1)
#longitude = float(lonint) + (float(lonremain)/(10**multiplier))
deg = str(lonint)
self.calc_lon_deg.setText(deg)
min = (float(longitude) - int(deg)) * 60
self.calc_lon_min.setText(str(int(min)))
sec = int(round(((float(min) - int(min)) *60), 0))
self.calc_lon_sec.setText(str(sec))
else:
ret = QMessageBox.critical(self, "degrees error", "Longitude decimal must fall in the range -180 < degr < 180")
def clear_gps_fields(self):
self.calc_lat_deg.setText("")
self.calc_lat_min.setText("")
self.calc_lat_sec.setText("")
self.calc_latitude.setText("")
self.radioButton_calc_gpsN.setChecked(1)
self.calc_lon_deg.setText("")
self.calc_lon_min.setText("")
self.calc_lon_sec.setText("")
self.calc_longitude.setText("")
self.gps_lat_decimal.setText("")
self.gps_lon_decimal.setText("")
self.radioButton_calc_gpsE.setChecked(1)
self.gps_altitude.setText("")
self.chk_AboveSeaLevel.setChecked(1)
self.gps_lat_deg.setText("")
self.gps_lat_min.setText("")
self.gps_lat_sec.setText("")
self.gps_lon_deg.setText("")
self.gps_lon_min.setText("")
self.gps_lon_sec.setText("")
self.radioButton_gpsN.setChecked(1)
self.radioButton_gpsE.setChecked(1)
self.xmp_location.setText("")
self.xmp_country.setText("")
self.xmp_state.setText("")
self.xmp_city.setText("")
self.chk_xmp_location.setChecked(1)
self.chk_xmp_country.setChecked(1)
self.chk_xmp_state.setChecked(1)
self.chk_xmp_city.setChecked(1)
self.gps_timestamp.setText("")
self.gps_datestamp.setText("")
self.gps_versionid.setText("")
self.gps_mapdatum.setText("")
self.chk_gps_timestamp.setChecked(1)
self.chk_gps_datestamp.setChecked(1)
def copy_calc_to_gpsinput(self):
self.gps_lat_decimal.setText(self.calc_latitude.text())
self.gps_lon_decimal.setText(self.calc_longitude.text())
self.gps_lat_deg.setText(self.calc_lat_deg.text())
self.gps_lat_min.setText(self.calc_lat_min.text())
self.gps_lat_sec.setText(self.calc_lat_sec.text())
self.gps_lon_deg.setText(self.calc_lon_deg.text())
self.gps_lon_min.setText(self.calc_lon_min.text())
self.gps_lon_sec.setText(self.calc_lon_sec.text())
if self.radioButton_calc_gpsN.isChecked():
self.radioButton_gpsN.setChecked(1)
else:
self.radioButton_gpsS.setChecked(1)
if self.radioButton_calc_gpsE.isChecked():
self.radioButton_gpsE.setChecked(1)
else:
self.radioButton_gpsW.setChecked(1)
def d2dms(self, value, sort):
# This is a simplified one-way copy of the convertLatLong function above for the input read-only fields
# Both cold be integrated, more efficient, but this is faster to maintain (and I'm lazy)
value = abs(float(value))
deg = int(value)
min = (float(value) - int(deg)) * 60
sec = int(round(((float(min) - int(min)) *60), 0))
# only "int" at the latest moment or calculations go wrong
if sort == "lat":
self.gps_lat_deg.setText(str(deg))
self.gps_lat_min.setText(str(int(min)))
self.gps_lat_sec.setText(str(sec))
else:
self.gps_lon_deg.setText(str(deg))
self.gps_lon_min.setText(str(int(min)))
self.gps_lon_sec.setText(str(sec))
def copygpsfromselected(self, qApp):
# First clean input fields
clear_gps_fields(self)
exiftool_params = ' -e -n -a -gps:all -xmp:Location -xmp:Country -xmp:State -xmp:City -xmp:GPSLatitude -xmp:GPSLongitude '
data = True
p = read_image_info(self, exiftool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy the gps/location info from your source image, but your source image "
"doesn't contain data or doesn't seem to contain data (or you didn't select an image).</p>"
"<p>In case your camera has a GPS system, but only uses it's internal \"maker\" options "
"to store the gps data, I can't retrieve the data as it is stored differently "
"for every brand of camera.</p>"
"<p>If this is the case for your camera, your only option is to copy & paste the information out "
"of the table rows from the \"General\" tab.")
ret = QMessageBox.warning(self, "Error copying gps info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n', p)
rowcounter = 0
for line in p_lines:
# try:
descriptor, description = re.split(':', line, 1)
descriptor = descriptor.strip()
description = description.strip()
gpslat = 0
gpslon = 0
latref = 0
lonref = 0
if descriptor == "GPS Version ID":
self.gps_versionid.setText(description)
if descriptor == "GPS Latitude Ref":
latref = 1
latrefvalue = description
if description == "N":
self.radioButton_gpsN.setChecked(1)
else:
self.radioButton_gpsS.setChecked(1)
if descriptor == "GPS Latitude":
gpslat += 1
if gpslat == 2:
print("we have a xmp latitude")
gpslatvalue = description
self.gps_lat_decimal.setText(str(round(float(description), 6)))
if descriptor == "GPS Longitude Ref":
lonref = 1
lonrefvalue = description
if description == "E":
self.radioButton_gpsE.setChecked(1)
else:
self.radioButton_gpsW.setChecked(1)
if descriptor == "GPS Longitude":
gpslon += 1
if gpslon == 2:
print("we have an xmp longitude")
gpslonvalue = description
self.gps_lon_decimal.setText(str(round(float(description), 6)))
if descriptor == "GPS Altitude Ref":
if description == "0":
self.chk_AboveSeaLevel.setChecked(1)
else:
self.chk_AboveSeaLevel.setChecked(0)
if descriptor == "GPS Altitude":
self.gps_altitude.setText(str(round(float(description), 1)))
if descriptor == "Location":
self.xmp_location.setText(description)
if descriptor == "Country":
self.xmp_country.setText(description)
if descriptor == "State":
self.xmp_state.setText(description)
if descriptor == "City":
self.xmp_city.setText(description)
if descriptor == "GPS Time Stamp":
self.gps_timestamp.setText(description)
if descriptor == "GPS Date Stamp":
self.gps_datestamp.setText(description)
if descriptor == "GPS Map Datum":
self.gps_mapdatum.setText(description)
# print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
# qApp.processEvents()
# except:
# print "always the last line that doesn't work"
# We bluntly walk through all tags as we don't know whether they are complete.
# Now we need to check for neg/pos latitutes and longitudes by REF values as
# We do not know now whether we have exif decimal values (always positive)
# or xmp decimal values which can be negative or positive.
# That's not so elegant but much simpler then building internal checks.
if latref == 1:
value = self.gps_lat_decimal.text()
if latrefvalue == "N":
self.gps_lat_decimal.setText(str(abs(float(value))))
else: # E = negative
if value.count('-') == 0: # doesn't contain a - but should contain it.
self.gps_lat_decimal.setText('-' + value)
if lonref == 1:
value = self.gps_lon_decimal.text()
if latrefvalue == "E":
self.gps_lon_decimal.setText(str(abs(float(value))))
else: # W = negative
if value.count('-') == 0: # doesn't contain a - but should contain it.
self.gps_lon_decimal.setText('-' + value)
# Check whether we have xmp lat/lon
if data:
d2dms(self, gpslatvalue, "lat")
d2dms(self, gpslonvalue, "lon")
def savegpsdata(self, qApp):
exiftool_params=""
# Exif and xmp gps data
if self.chk_lat_lon_alt.isChecked():
exiftool_params = ' -exif:GPSLatitude="' + self.gps_lat_decimal.text() + '" '
value = float(self.gps_lat_decimal.text())
if value > 0:
exiftool_params += ' -exif:GPSLatitudeREF="N" '
else:
exiftool_params += ' -exif:GPSLatitudeREF="S" '
exiftool_params += ' -xmp:GPSLatitude="' + self.gps_lat_decimal.text() + '" '
exiftool_params += ' -exif:GPSLongitude="' + self.gps_lon_decimal.text() + '" '
value = float(self.gps_lon_decimal.text())
if value > 0:
exiftool_params += ' -exif:GPSLongitudeREF="E" '
else:
exiftool_params += ' -exif:GPSLongitudeREF="W" '
exiftool_params += ' -xmp:GPSLongitude="' + self.gps_lon_decimal.text() + '" '
exiftool_params += ' -exif:GPSAltitude="' + self.gps_altitude.text() + '" '
exiftool_params += ' -xmp:GPSAltitude="' + self.gps_altitude.text() + '" '
if self.chk_AboveSeaLevel.isChecked():
exiftool_params += ' -exif:GPSAltitudeRef="above" ' # Above sea level
else:
exiftool_params += ' -exif:GPSAltitudeRef="below" ' # Below sea level
# Location data for XMP and IPTC
if self.chk_xmp_location.isChecked():
exiftool_params += '-xmp:Location="' + self.xmp_location.text() + '" '
exiftool_params += '-iptc:Sub-location="' + self.xmp_location.text() + '" '
if self.chk_xmp_country.isChecked():
exiftool_params += '-xmp:Country="' + self.xmp_country.text() + '" '
exiftool_params += '-iptc:Country-PrimaryLocationName="' + self.xmp_country.text() + '" '
if self.chk_xmp_state.isChecked():
exiftool_params += '-xmp:State="' + self.xmp_state.text() + '" '
exiftool_params += '-iptc:Province-State="' + self.xmp_state.text() + '" '
if self.chk_xmp_city.isChecked():
exiftool_params += '-xmp:City="' + self.xmp_city.text() + '" '
exiftool_params += '-iptc:City="' + self.xmp_city.text() + '" '
# Map date/time and format stuff
if self.chk_gps_timestamp.isChecked():
exiftool_params += '-exif:Copyright="' + self.exif_Copyright.text() + '" '
if self.chk_gps_datestamp.isChecked():
exiftool_params += '-exif:UserComment="' + self.exif_UserComment.text() + '" '
if self.gps_mapdatum.text() == "":
exiftool_params += '-exif:GPSMapDatum="WGS-84" '
else:
exiftool_params += '-exif:GPSMapDatum="' + self.gps_mapdatum.text() + '" '
print(exiftool_params)
# Now write the data to the photo(s)
if self.chk_gps_backuporiginals.isChecked():
write_image_info(self, exiftool_params, qApp, True)
else:
write_image_info(self, exiftool_params, qApp, False)
#------------------------------------------------------------------------
# Edit -> Exif tab and actions
def clear_exif_fields(self):
self.exif_Make.setText("")
self.exif_Model.setText("")
self.exif_ModifyDate.setText("")
self.exif_DateTimeOriginal.setText("")
self.exif_CreateDate.setText("")
self.exif_Artist.setText("")
self.exif_Copyright.setText("")
self.exif_UserComment.setText("")
self.exif_ImageDescription.clear()
self.chk_exif_Make.setChecked(1)
self.chk_exif_Model.setChecked(1)
self.chk_exif_ModifyDate.setChecked(1)
self.chk_exif_DateTimeOriginal.setChecked(1)
self.chk_exif_CreateDate.setChecked(1)
self.chk_exif_Artist.setChecked(1)
self.chk_exif_Copyright.setChecked(1)
self.chk_exif_UserComment.setChecked(1)
self.chk_exif_ImageDescription.setChecked(1)
def copyexiffromselected(self,qApp):
# First clean input fields
clear_exif_fields(self)
exiftool_params = ' -e -n -exif:Make -exif:Model -exif:ModifyDate -exif:DateTimeOriginal -exif:CreateDate -exif:Artist -exif:Copyright -exif:UserComment -exif:ImageDescription '
p = read_image_info(self, exiftool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy exif info from your source image, but your source image "
"doesn't contain the specified exif data or doesn't seem to contain any exif data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying exif info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
gpslat = 0
gpslon = 0
if descriptor == "Make":
self.exif_Make.setText(description)
if descriptor == "Camera Model Name":
self.exif_Model.setText(description)
if descriptor == "Modify Date":
self.exif_ModifyDate.setText(description)
if descriptor == "Date/Time Original":
self.exif_DateTimeOriginal.setText(description)
if descriptor == "Create Date":
self.exif_CreateDate.setText(description)
if descriptor == "Artist":
self.exif_Artist.setText(description)
if descriptor == "Copyright":
self.exif_Copyright.setText(description)
if descriptor == "User Comment":
self.exif_UserComment.setText(description)
if descriptor == "Image Description":
self.exif_ImageDescription.insertPlainText(description)
#print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
def saveexifdata(self, qApp):
exiftool_params = ""
if self.chk_exif_Make.isChecked():
exiftool_params = ' -exif:Make="' + self.exif_Make.text() + '" '
if self.chk_exif_Model.isChecked():
exiftool_params += '-exif:Model="' + self.exif_Model.text() + '" '
if self.chk_exif_ModifyDate.isChecked():
exiftool_params += '-exif:ModifyDate="' + self.exif_ModifyDate.text() + '" '
if self.chk_exif_DateTimeOriginal.isChecked():
exiftool_params += '-exif:DateTimeOriginal="' + self.exif_DateTimeOriginal.text() + '" '
if self.chk_exif_CreateDate.isChecked():
exiftool_params += '-exif:CreateDate="' + self.exif_CreateDate.text() + '" '
if self.chk_exif_Artist.isChecked():
exiftool_params += '-exif:Artist="' + self.exif_Artist.text() + '" '
if self.chk_exif_Copyright.isChecked():
exiftool_params += '-exif:Copyright="' + self.exif_Copyright.text() + '" '
if self.chk_exif_UserComment.isChecked():
exiftool_params += '-exif:UserComment="' + self.exif_UserComment.text() + '" '
if self.chk_exif_ImageDescription.isChecked():
ImgDescr = self.exif_ImageDescription.toPlainText()
exiftool_params += '-exif:ImageDescription="' + ImgDescr + '" '
if self.chk_exif_backuporiginals.isChecked():
write_image_info(self, exiftool_params, qApp, True)
else:
write_image_info(self, exiftool_params, qApp, False)
#------------------------------------------------------------------------
# Edit -> xmp tab and actions
def clear_xmp_fields(self):
self.xmp_creator.setText("")
self.xmp_rights.setText("")
self.xmp_label.setText("")
self.xmp_subject.setText("")
self.xmp_title.setText("")
self.xmp_rating1.setChecked(1)
self.xmp_description.clear()
self.xmp_person.setText("")
self.chk_xmp_creator.setChecked(1)
self.chk_xmp_rights.setChecked(1)
self.chk_xmp_label.setChecked(1)
self.chk_xmp_subject.setChecked(1)
self.chk_xmp_title.setChecked(1)
self.chk_xmp_rating.setChecked(1)
self.chk_xmp_description.setChecked(1)
self.chk_xmp_person.setChecked(1)
def copyxmpfromselected(self,qApp):
# First clean input fields
clear_xmp_fields(self)
xmptool_params = ' -e -n -xmp:Creator -xmp:Rights -xmp:Label -xmp:Subject -xmp:Title -xmp:Rating -xmp:Description -xmp:Person -xmp:PersonInImage '
p = read_image_info(self, xmptool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy xmp info from your source image, but your source image "
"doesn't contain the specified xmp data or doesn't seem to contain any xmp data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying xmp info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
gpslat = 0
gpslon = 0
if descriptor == "Creator":
self.xmp_creator.setText(description)
if descriptor == "Rights":
self.xmp_rights.setText(description)
if descriptor == "Label":
self.xmp_label.setText(description)
if descriptor == "Subject":
self.xmp_subject.setText(description)
if descriptor == "Title":
self.xmp_title.setText(description)
if descriptor == "Rating":
if description == "1":
self.xmp_rating1.setChecked(1)
elif description == "2":
self.xmp_rating2.setChecked(2)
elif description == "3":
self.xmp_rating3.setChecked(3)
elif description == "4":
self.xmp_rating4.setChecked(4)
elif description == "5":
self.xmp_rating5.setChecked(5)
if descriptor == "Description":
self.xmp_description.insertPlainText(description)
if descriptor == "Person":
self.xmp_person.setText(description)
if descriptor == "Person In Image":
self.xmp_person.setText(description)
#print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
def savexmpdata(self, qApp):
xmptool_params = ""
if self.chk_xmp_creator.isChecked():
xmptool_params = ' -xmp:Creator="' + self.xmp_creator.text() + '" '
if self.chk_xmp_rights.isChecked():
xmptool_params += '-xmp:Rights="' + self.xmp_rights.text() + '" '
if self.chk_xmp_label.isChecked():
xmptool_params += '-xmp:Label="' + self.xmp_label.text() + '" '
if self.chk_xmp_subject.isChecked():
xmptool_params += '-xmp:Subject="' + self.xmp_subject.text() + '" '
if self.chk_xmp_title.isChecked():
xmptool_params += '-xmp:Title="' + self.xmp_title.text() + '" '
if self.chk_xmp_rating.isChecked():
if self.xmp_rating1.isChecked():
rating = "1"
elif self.xmp_rating2.isChecked():
rating = "2"
elif self.xmp_rating3.isChecked():
rating = "3"
elif self.xmp_rating4.isChecked():
rating = "4"
else:
rating = "5"
xmptool_params += '-xmp:Rating="' + rating + '" '
if self.chk_xmp_description.isChecked():
Descr = self.xmp_description.toPlainText()
xmptool_params += '-xmp:Description="' + Descr + '" '
if self.chk_xmp_person.isChecked():
xmptool_params += '-xmp:Person="' + self.xmp_person.text() + '" '
xmptool_params += '-xmp:PersonInImage="' + self.xmp_person.text() + '" '
if self.chk_xmp_backuporiginals.isChecked():
write_image_info(self, xmptool_params, qApp, True)
else:
write_image_info(self, xmptool_params, qApp, False)
#------------------------------------------------------------------------
# Edit -> GPano tab and actions
def clear_gpano_fields(self):
self.xmp_StitchingSoftware.setText("")
self.xmp_CroppedAreaImageHeightPixels.setText("")
self.xmp_CroppedAreaImageWidthPixels.setText("")
self.xmp_CroppedAreaLeftPixels.setText("")
self.xmp_CroppedAreaTopPixels.setText("")
self.xmp_FullPanoHeightPixels.setText("")
self.xmp_FullPanoWidthPixels.setText("")
self.xmp_ProjectionType.setCurrentIndex(0)
self.xmp_UsePanoramaViewer.setChecked(1)
self.xmp_PoseHeadingDegrees.setText("")
self.xmp_InitialViewHeadingDegrees.setText("")
self.xmp_InitialViewPitchDegrees.setText("")
self.xmp_InitialViewRollDegrees.setText("")
self.xmp_InitialHorizontalFOVDegrees.setText("")
self.chk_xmp_StitchingSoftware.setChecked(1)
self.chk_xmp_CroppedAreaImageHeightPixels.setChecked(1)
self.chk_xmp_CroppedAreaImageWidthPixels.setChecked(1)
self.chk_xmp_CroppedAreaLeftPixels.setChecked(1)
self.chk_xmp_CroppedAreaTopPixels.setChecked(1)
self.chk_xmp_FullPanoHeightPixels.setChecked(1)
self.chk_xmp_FullPanoWidthPixels.setChecked(1)
self.chk_xmp_ProjectionType.setChecked(1)
self.chk_xmp_UsePanoramaViewer.setChecked(1)
self.chk_xmp_PoseHeadingDegrees.setChecked(1)
self.chk_xmp_InitialViewHeadingDegrees.setChecked(1)
self.chk_xmp_InitialViewPitchDegrees.setChecked(1)
self.chk_xmp_InitialViewRollDegrees.setChecked(1)
self.chk_xmp_InitialHorizontalFOVDegrees.setChecked(1)
def copygpanofromselected(self,qApp):
# First clean input fields
clear_exif_fields(self)
exiftool_params = ' -e -n -xmp:CroppedAreaImageHeightPixels -xmp:CroppedAreaImageWidthPixels -xmp:CroppedAreaLeftPixels -xmp:CroppedAreaTopPixels -xmp:FullPanoHeightPixels -xmp:FullPanoWidthPixels -xmp:ProjectionType -xmp:UsePanoramaViewer -xmp:PoseHeadingDegrees -xmp:InitialViewHeadingDegrees -xmp:InitialViewRollDegrees -xmp:InitialViewPitchDegrees -xmp:StitchingSoftware -xmp:InitialHorizontalFOVDegrees '
p = read_image_info(self, exiftool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy GPano (Google Photosphere) info from your source image, but your source image "
"doesn't contain the specified GPano data or doesn't seem to contain any GPano data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying GPano info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
gpslat = 0
gpslon = 0
if descriptor == "Cropped Area Image Height Pixels":
self.xmp_CroppedAreaImageHeightPixels.setText(description)
if descriptor == "Cropped Area Image Width Pixels":
self.xmp_CroppedAreaImageWidthPixels.setText(description)
if descriptor == "Cropped Area Left Pixels":
self.xmp_CroppedAreaLeftPixels.setText(description)
if descriptor == "Cropped Area Top Pixels":
self.xmp_CroppedAreaTopPixels.setText(description)
if descriptor == "Full Pano Height Pixels":
self.xmp_FullPanoHeightPixels.setText(description)
if descriptor == "Full Pano Width Pixels":
self.xmp_FullPanoWidthPixels.setText(description)
if descriptor == "Projection Type":
if description == "equirectangular":
self.xmp_ProjectionType.setCurrentIndex(0)
elif description == "equirectangular":
self.xmp_ProjectionType.setCurrentIndex(1)
elif description == "rectilinear":
self.xmp_ProjectionType.setCurrentIndex(2)
if descriptor == "Use Panorama Viewer":
if description == "True":
self.xmp_UsePanoramaViewer.setChecked(1)
else:
self.xmp_UsePanoramaViewer.setChecked(0)
if descriptor == "Pose Heading Degrees":
self.xmp_PoseHeadingDegrees.setText(description)
if descriptor == "Initial View Heading Degrees":
self.xmp_InitialViewHeadingDegrees.setText(description)
if descriptor == "Initial View Pitch Degrees":
self.xmp_InitialViewPitchDegrees.setText(description)
if descriptor == "Initial View Roll Degrees":
self.xmp_InitialViewRollDegrees.setText(description)
if descriptor == "Stitching Software":
self.xmp_StitchingSoftware.setText(description)
if descriptor == "Initial Horizontal FOV Degrees":
self.xmp_InitialHorizontalFOVDegrees.setText(description)
#print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
def savegpanodata(self, qApp):
exiftool_params = ""
if self.chk_xmp_CroppedAreaImageHeightPixels.isChecked():
exiftool_params = ' -xmp:CroppedAreaImageHeightPixels="' + self.xmp_CroppedAreaImageHeightPixels.text() + '" '
if self.chk_xmp_CroppedAreaImageWidthPixels.isChecked():
exiftool_params += '-xmp:CroppedAreaImageWidthPixels="' + self.xmp_CroppedAreaImageWidthPixels.text() + '" '
if self.chk_xmp_CroppedAreaLeftPixels.isChecked():
exiftool_params += '-xmp:CroppedAreaLeftPixels="' + self.xmp_CroppedAreaLeftPixels.text() + '" '
if self.chk_xmp_CroppedAreaTopPixels.isChecked():
exiftool_params += '-xmp:CroppedAreaTopPixels="' + self.xmp_CroppedAreaTopPixels.text() + '" '
if self.chk_xmp_FullPanoHeightPixels.isChecked():
exiftool_params += '-xmp:FullPanoHeightPixels="' + self.xmp_FullPanoHeightPixels.text() + '" '
if self.chk_xmp_FullPanoWidthPixels.isChecked():
exiftool_params += '-xmp:FullPanoWidthPixels="' + self.xmp_FullPanoWidthPixels.text() + '" '
if self.chk_xmp_ProjectionType.isChecked():
#print "projectiontype " + str(self.xmp_ProjectionType.currentIndex())
#print "projectiontype " + str(self.xmp_ProjectionType.currentText())
if self.xmp_ProjectionType.currentIndex() == 0:
exiftool_params += '-xmp:ProjectionType="equirectangular" '
elif self.xmp_ProjectionType.currentIndex() == 1:
exiftool_params += '-xmp:ProjectionType="cylindrical" '
elif self.xmp_ProjectionType.currentIndex() == 2:
exiftool_params += '-xmp:ProjectionType="rectangular" '
if self.chk_xmp_UsePanoramaViewer.isChecked():
if self.xmp_UsePanoramaViewer.isChecked():
exiftool_params += '-xmp:UsePanoramaViewer=1 '
else:
exiftool_params += '-xmp:UsePanoramaViewer=0 '
if self.chk_xmp_PoseHeadingDegrees.isChecked():
exiftool_params += '-xmp:PoseHeadingDegrees="' + self.xmp_PoseHeadingDegrees.text() + '" '
if self.chk_xmp_InitialViewHeadingDegrees.isChecked():
exiftool_params += '-xmp:InitialViewHeadingDegrees="' + self.xmp_InitialViewHeadingDegrees.text() + '" '
if self.chk_xmp_InitialViewPitchDegrees.isChecked():
exiftool_params += '-xmp:InitialViewPitchDegrees="' + self.xmp_InitialViewPitchDegrees.text() + '" '
if self.chk_xmp_InitialViewRollDegrees.isChecked():
exiftool_params += '-xmp:InitialViewRollDegrees="' + self.xmp_InitialViewRollDegrees.text() + '" '
if self.chk_xmp_StitchingSoftware.isChecked():
exiftool_params += '-xmp:StitchingSoftware="' + self.xmp_StitchingSoftware.text() + '" '
if self.chk_xmp_InitialHorizontalFOVDegrees.isChecked():
exiftool_params += '-xmp:InitialHorizontalFOVDegrees="' + self.xmp_InitialHorizontalFOVDegrees.text() + '" '
if self.chk_gpano_backuporiginals.isChecked():
write_image_info(self, exiftool_params, qApp, True)
else:
write_image_info(self, exiftool_params, qApp, False)
#------------------------------------------------------------------------
# Edit -> geotagging tab and actions
def geotag_source_folder(self, qApp):
self.statusbar.showMessage("")
select_folder = QFileDialog(self)
select_folder.setFileMode(QFileDialog.Directory)
qApp.processEvents()
if platform.system() == "Darwin":
select_folder.setDirectory(os.path.expanduser('~/Pictures'))
elif platform.system() == "Linux":
select_folder.setDirectory(os.path.expanduser('~/Pictures'))
elif platform.system() == "Windows":
select_folder.setDirectory(os.path.expanduser('~/My Pictures'))
select_folder.setViewMode(QFileDialog.Detail)
qApp.processEvents()
geotag_source_folder = ""
if select_folder.exec_():
geotag_source_folder = select_folder.selectedFiles()[0]
self.geotag_source_folder = geotag_source_folder
self.LineEdit_geotag_source_folder.setText(geotag_source_folder)
#print(str(self.geotag_source_folder))
# button to write can be enabled
self.btn_write_geotaginfo.setEnabled(True)
else:
# user canceled
self.statusbar.showMessage("you canceled selecting a folder for geotagging.")
geotag_source_folder = ""
def geotag_gps_file(self, qApp):
self.statusbar.showMessage("")
select_file = QFileDialog(self,"Open gpx track log file")
select_file.setFileMode(QFileDialog.ExistingFiles)
qApp.processEvents()
if platform.system() == "Darwin":
select_file.setDirectory(os.path.expanduser('~/Pictures'))
elif platform.system() == "Linux":
select_file.setDirectory(os.path.expanduser('~/Pictures'))
elif platform.system() == "Windows":
select_file.setDirectory(os.path.expanduser('~/My Pictures'))
qApp.processEvents()
select_file.setViewMode(QFileDialog.Detail)
#select_file.setNameFilter("gpx track log files (*.gpx *.GPX *.log *.LOG)\nAll files (*.*)")
geotag_gps_file = ""
if select_file.exec_():
print("select file exec")
geotag_gps_file = select_file.selectedFiles()[0]
self.geotag_gps_file = geotag_gps_file
print("file should be selected")
self.LineEdit_geotag_log_file.setText(geotag_gps_file)
#print(str(self.geotag_gps_file))
else:
# user canceled
self.statusbar.showMessage("you canceled selecting the GPS track log file.")
geotag_gps_file = ""
#------------------------------------------------------------------------
# Edit -> Lens tab and actions
def check_self_defined_lenses(self):
if self.chk_predefined_lenses.isChecked():
self.predefined_lenses.setEnabled(True)
self.btn_save_lens.setEnabled(False)
self.btn_update_lens.setEnabled(True)
self.btn_delete_lens.setEnabled(True)
else:
self.predefined_lenses.setEnabled(False)
self.btn_save_lens.setEnabled(True)
self.btn_update_lens.setEnabled(False)
self.btn_delete_lens.setEnabled(False)
def clear_lens_fields(self):
self.lens_make.setText("")
self.lens_model.setText("")
self.lens_serialnumber.setText("")
self.lens_focallength.setText("")
self.lens_focallengthin35mmformat.setText("")
self.lens_maxaperturevalue.setText("")
self.lens_fnumber.setText("")
self.lens_meteringmode.setCurrentIndex(0)
self.chk_lens_make.setChecked(1)
self.chk_lens_model.setChecked(1)
self.chk_lens_serialnumber.setChecked(1)
self.chk_lens_focallength.setChecked(1)
self.chk_lens_focallengthin35mmformat.setChecked(1)
self.chk_lens_maxaperturevalue.setChecked(1)
self.chk_lens_fnumber.setChecked(1)
self.chk_lens_meteringmode.setChecked(1)
def copylensfromselected(self,qApp):
# First clean input fields
clear_lens_fields(self)
lenstool_params = ' -s -n -exif:lensmake -exif:lensmodel -exif:lensserialnumber -exif:focallength -exif:focallengthIn35mmformat -exif:fnumber -exif:maxaperturevalue -exif:meteringmode '
p = read_image_info(self, lenstool_params)
print (" lensparameters read " + str(p))
if len(p) == 0:
# data = False
message = ("<p>You are trying to copy lens info from your source image, but your source image "
"doesn't contain the specified lens data or doesn't seem to contain any lens data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying lens info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
# gpslat = 0
# gpslon = 0
if descriptor == "LensMake":
self.lens_make.setText(description)
if descriptor == "LensModel":
self.lens_model.setText(description)
if descriptor == "LensSerialNumber":
self.lens_serialnumber.setText(description)
if descriptor == "FocalLength":
self.lens_focallength.setText(description)
if descriptor == "FocalLengthIn35mmFormat":
self.lens_focallengthin35mmformat.setText(description)
if descriptor == "MaxApertureValue":
self.lens_maxaperturevalue.setText(description)
if descriptor == "FNumber":
self.lens_fnumber.setText(description)
if descriptor == "MeteringMode":
self.lens_meteringmode.setCurrentIndex(int(description))
#print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
def savelensdata(self, qApp):
# This function saves the lens data into the image
lenstool_params = ""
if self.chk_lens_make.isChecked():
lenstool_params = ' -exif:lensmake="' + self.lens_make.text() + '" -xmp:lensmake="' + self.lens_make.text() + '" '
if self.chk_lens_model.isChecked():
lenstool_params += '-exif:lensmodel="' + self.lens_model.text() + '" -xmp:lensmodel="' + self.lens_model.text() + '" '
if self.chk_lens_serialnumber.isChecked():
lenstool_params += '-exif:lensserialnumber="' + self.lens_serialnumber.text() + '" -xmp:lensserialnumber="' + self.lens_serialnumber.text() + '" '
if self.chk_lens_focallength.isChecked():
lenstool_params += '-exif:focallength="' + self.lens_focallength.text() + '" -xmp:focallength="' + self.lens_focallength.text() + '" '
if self.chk_lens_focallengthin35mmformat.isChecked():
lenstool_params += '-exif:focallengthin35mmformat="' + self.lens_focallengthin35mmformat.text() + '" -xmp:focallengthin35mmformat="' + self.lens_focallengthin35mmformat.text() + '" '
if self.chk_lens_maxaperturevalue.isChecked():
lenstool_params += '-exif:maxaperturevalue="' + self.lens_maxaperturevalue.text() + '" -xmp:maxaperturevalue="' + self.lens_maxaperturevalue.text() + '" '
if self.chk_lens_fnumber.isChecked():
lenstool_params += '-exif:fnumber="' + self.lens_fnumber.text() + '" -xmp:fnumber="' + self.lens_fnumber.text() + '" '
if self.chk_lens_meteringmode.isChecked():
if self.lens_meteringmode.currentIndex() == 0:
meteringmode = "Unknown"
elif self.lens_meteringmode.currentIndex() == 1:
meteringmode = "Average"
elif self.lens_meteringmode.currentIndex() == 2:
meteringmode = "Center-weighted average"
elif self.lens_meteringmode.currentIndex() == 3:
meteringmode = "Spot"
elif self.lens_meteringmode.currentIndex() == 4:
meteringmode = "Multi-spot"
elif self.lens_meteringmode.currentIndex() == 5:
meteringmode = "Multi-segment"
elif self.lens_meteringmode.currentIndex() == 6:
meteringmode = "Partial"
elif self.lens_meteringmode.currentIndex() == 255:
meteringmode = "Other"
# lenstool_params += '-exif:meteringmode=' + str(self.lens_meteringmode.currentIndex()) + ' -xmp:meteringmode=' + str(self.lens_meteringmode.currentIndex()) + ' '
lenstool_params += '-exif:meteringmode="' + meteringmode + '" -xmp:meteringmode="' + meteringmode + '" '
print("lenstool_params " + lenstool_params)
if self.chk_lens_backuporiginals.isChecked():
write_image_info(self, lenstool_params, qApp, True)
else:
write_image_info(self, lenstool_params, qApp, False)
def definedlenschanged(self, qApp):
tempstr = lambda val: '' if val is None else val
clear_lens_fields(self)
for lens in self.lensdbroot:
if lens.attrib["name"] == self.predefined_lenses.currentText():
self.lens_make.setText(str(lens.find('make').text))
self.lens_model.setText(str(lens.find('model').text))
self.lens_serialnumber.setText(str(tempstr(lens.find('serialnumber').text)))
self.lens_focallength.setText(str(tempstr(lens.find('focallength').text)))
self.lens_focallengthin35mmformat.setText(str(tempstr(lens.find('focallengthin35mmformat').text)))
self.lens_fnumber.setText(str(tempstr(lens.find('fnumber').text)))
self.lens_maxaperturevalue.setText(str(tempstr(lens.find('maxaperturevalue').text)))
#print(str(self.lensdb))
def updatelens(self, qApp):
print('update lens data for this lens inside the lens database')
tempstr = lambda val: '' if val is None else val
self.lens_current_index = self.predefined_lenses.currentIndex()
for lens in self.lensdbroot:
if lens.attrib["name"] == self.predefined_lenses.currentText():
for tags in lens.iter('make'):
tags.text = self.lens_make.text()
for tags in lens.iter('model'):
tags.text = self.lens_model.text()
for tags in lens.iter('serialnumber'):
tags.text = self.lens_serialnumber.text()
for tags in lens.iter('focallength'):
tags.text = self.lens_focallength.text()
for tags in lens.iter('focallengthin35mmformat'):
tags.text = self.lens_focallengthin35mmformat.text()
for tags in lens.iter('maxaperturevalue'):
tags.text = self.lens_maxaperturevalue.text()
for tags in lens.iter('fnumber'):
tags.text = self.lens_fnumber.text()
'''if self.lens_meteringmode.currentIndex() == 0:
meteringmode = "Unknown"
elif self.lens_meteringmode.currentIndex() == 1:
meteringmode = "Average"
elif self.lens_meteringmode.currentIndex() == 2:
meteringmode = "Center-weighted average"
elif self.lens_meteringmode.currentIndex() == 3:
meteringmode = "Spot"
elif self.lens_meteringmode.currentIndex() == 4:
meteringmode = "Multi-spot"
elif self.lens_meteringmode.currentIndex() == 5:
meteringmode = "Multi-segment"
elif self.lens_meteringmode.currentIndex() == 6:
meteringmode = "Partial"
elif self.lens_meteringmode.currentIndex() == 255:
meteringmode = "Other"
for tags in lens.iter('meteringmodel'):
tags.text = meteringmode'''
petgfilehandling.write_lensdb_xml(self, qApp)
petgfilehandling.read_defined_lenses(self, qApp)
#------------------------------------------------------------------------
# Edit -> Iptc tab and actions
def clear_iptc_fields(self):
self.iptc_keywords.setText("")
self.chk_iptc_keywords.setChecked(1)
def copyiptcfromselected(self,qApp):
# First clean input fields
clear_iptc_fields(self)
exiftool_params = ' -e -n -iptc:Keywords '
p = read_image_info(self, exiftool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy iptc info from your source image, but your source image "
"doesn't contain the specified iptc data or doesn't seem to contain any iptc data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying iptc info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
if descriptor == "Keywords":
self.iptc_keywords.setText(description)
#print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
def saveiptcdata(self, qApp):
exiftool_params = ""
if self.chk_iptc_keywords.isChecked():
exiftool_params = ' -iptc:Keywords="' + self.iptc_keywords.text() + '" '
write_image_info(self, exiftool_params, qApp, self.chk_iptc_backuporiginals.isChecked())
#---
def date_to_datetimeoriginal(self, qApp):
exiftool_params = " -FileModifyDate<DateTimeOriginal "
message = "If you have modified your images in a \"sloppy\" image editor or copied them around or whatever other action(s), the file "
message += "date/time of your images might have changed to the date your did the action/modification on the image "
message += "where as the real file date (= creation date) of your images is most certainly (much) older.\n"
message += "This function will take the original date/time when the photo was taken from the exif:DateTimeOriginal "
message += "and use that (again) as file date/time.\n\n"
message += "Do you want to continue?"
reply = QMessageBox.question(self, "Set file date/time to DateTimeOriginal?", message, QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
write_image_info(self, exiftool_params, qApp, False)
#------------------------------------------------------------------------
# Other dialogs and windows and their related functions
def info_window(self):
if self.OSplatform == "Windows":
if os.path.isfile(os.path.join(self.parent_dir, 'COPYING')):
# started from python
license_file = os.path.join(self.parent_dir, 'COPYING')
elif os.path.isfile(os.path.join(self.realfile_dir, 'COPYING')):
# Started from the executable
license_file = os.path.join(self.realfile_dir, 'COPYING')
else:
QMessageBox.critical(self, "Can't find the license file", "Please check www.gnu.org/license")
elif self.OSplatform == "Darwin":
if os.path.isfile(os.path.join(self.parent_dir, 'COPYING')):
# started from python
license_file = os.path.join(self.parent_dir, 'COPYING')
elif os.path.isfile(os.path.join(self.realfile_dir, "pyexiftoolgui.app","Contents","Resources","COPYING")):
# Started from the executable
license_file = os.path.join(self.realfile_dir,"pyexiftoolgui.app","Contents","Resources",'COPYING')
else:
QMessageBox.critical(self, "Can't find the license file", "Please check www.gnu.org/license")
else:
license_file = os.path.join(self.parent_dir, 'COPYING')
self.info_window_dialog = QDialog()
self.info_window_dialog.resize(500, 640)
self.info_window_text = QTextEdit(self.info_window_dialog)
self.info_window_text.setGeometry(QRect(3, 11, 491, 591))
self.info_window_text.setObjectName("info_window_text")
self.buttonBox = QDialogButtonBox(self.info_window_dialog)
self.buttonBox.setGeometry(QRect(300, 610, 176, 27))
self.buttonBox.setStandardButtons(QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.info_window_dialog.setWindowTitle(programinfo.NAME + " " + programinfo.VERSION + " license")
self.info_window_text.setText(open(license_file).read())
QObject.connect(self.buttonBox, SIGNAL("clicked(QAbstractButton*)"), self.info_window_dialog.close)
QMetaObject.connectSlotsByName(self.info_window_dialog)
self.info_window_dialog.exec_()
#---
class dialog_synchronizedatetime(QDialog, Ui_SyncDateTimeTagsDialog):
# This loads the py file created by pyside-uic from the ui.
# the Quiloader segfaults on windows after ending the function
def __init__(self, parent=None):
super(dialog_synchronizedatetime, self).__init__(parent)
self.setupUi(self)
def synchronizedatetime(self, qApp):
self.synchronizedatetime_dialog = dialog_synchronizedatetime()
#---
def qddt_shift_clicked(self):
if self.modifydatetime_dialog.chk_qddt_shift.isChecked():
self.modifydatetime_dialog.qddt_modifydate.setEnabled(False)
self.modifydatetime_dialog.qddt_datetimeoriginal.setEnabled(False)
self.modifydatetime_dialog.qddt_createdate.setEnabled(False)
else:
self.modifydatetime_dialog.qddt_modifydate.setEnabled(True)
self.modifydatetime_dialog.qddt_datetimeoriginal.setEnabled(True)
self.modifydatetime_dialog.qddt_createdate.setEnabled(True)
def qddt_use_reference_image_data(self):
if self.modifydatetime_dialog.chk_qddt_use_referencedata.isChecked():
exiftool_params = " -exif:ModifyDate -exif:DateTimeOriginal -exif:CreateDate "
if self.OSplatform == "Windows":
self.referenceimage = self.referenceimage.replace("/", "\\")
args = '"' + self.exiftoolprog + '" -a ' + exiftool_params + ' ' + self.referenceimage
p = subprocess.check_output(args, universal_newlines=True, shell=True)
else:
command_line = '"' + self.exiftoolprog + '" -a ' + exiftool_params + ' ' + self.referenceimage
args = shlex.split(command_line)
p = subprocess.check_output(args, universal_newlines=True)
p = p[:-1]
p_lines = re.split('\n',p)
for line in p_lines:
try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
if descriptor == "Modify Date":
modifydate = description
self.modifydatetime_dialog.qddt_modifydate.setText(modifydate)
if descriptor == "Date/Time Original":
datetimeoriginal = description
self.modifydatetime_dialog.qddt_datetimeoriginal.setText(datetimeoriginal)
if descriptor == "Create Date":
createdate = description
self.modifydatetime_dialog.qddt_createdate.setText(createdate)
except:
print("always the last line that doesn't work")
else:
now = datetime.datetime.now()
strnow = now.strftime("%Y:%m:%d %H:%M:%S")
self.modifydatetime_dialog.qddt_modifydate.setText(strnow)
self.modifydatetime_dialog.qddt_datetimeoriginal.setText(strnow)
self.modifydatetime_dialog.qddt_createdate.setText(strnow)
class dialog_modifydatetime(QDialog, Ui_DateTimeDialog):
# This loads the py file created by pyside-uic from the ui.
# the Quiloader segfaults on windows after ending the function
def __init__(self, parent=None):
super(dialog_modifydatetime, self).__init__(parent)
self.setupUi(self)
def modifydatetime(self, qApp):
self.modifydatetime_dialog = dialog_modifydatetime()
now = datetime.datetime.now()
strnow = now.strftime("%Y:%m:%d %H:%M:%S")
self.modifydatetime_dialog.qddt_modifydate.setText(strnow)
self.modifydatetime_dialog.qddt_datetimeoriginal.setText(strnow)
self.modifydatetime_dialog.qddt_createdate.setText(strnow)
self.modifydatetime_dialog.qddt_shiftdatetime.setText("0000:00:00 00:00:00")
# Set proper event
self.modifydatetime_dialog.chk_qddt_shift.clicked.connect(self.moddialog_shift_clicked)
self.modifydatetime_dialog.chk_qddt_use_referencedata.clicked.connect(self.moddialog_use_reference_image_data)
if self.modifydatetime_dialog.exec_() == QDialog.Accepted:
print("You selected Save")
if self.modifydatetime_dialog.chk_qddt_shift.isChecked():
# we will do a date/time shift
if self.modifydatetime_dialog.qddt_shiftdatetime.text() == "0000:00:00 00:00:00":
QMessageBox.information(self,"No shift value set", "You selected the shift function but you left the value at \"0000:00:00 00:00:00\".\nI can't do anything. ")
# exit function
return
else:
print(self.modifydatetime_dialog.qddt_shiftdatetime.text())
# We will first build the parameter string and then check for forward or backward timeshift and simply use
# a string replace on the already created exiftool_parameters string
exiftool_params = ""
if self.modifydatetime_dialog.chk_qddt_datetimeoriginal.isChecked():
exiftool_params += " \"-exif:DateTimeOriginal-=" + self.modifydatetime_dialog.qddt_shiftdatetime.text() + "\" "
if self.modifydatetime_dialog.chk_qddt_updatexmp.isChecked():
exiftool_params += " \"-xmp:DateTimeOriginal-=" + self.modifydatetime_dialog.qddt_shiftdatetime.text() + "\" "
if self.modifydatetime_dialog.chk_qddt_modifydate.isChecked():
exiftool_params += " \"-exif:ModifyDate-=" + self.modifydatetime_dialog.qddt_shiftdatetime.text() + "\" "
if self.modifydatetime_dialog.chk_qddt_updatexmp.isChecked():
exiftool_params += " \"-xmp:ModifyDate-=" + self.modifydatetime_dialog.qddt_shiftdatetime.text() + "\" "
if self.modifydatetime_dialog.chk_qddt_createdate.isChecked():
exiftool_params += " \"-exif:CreateDate-=" + self.modifydatetime_dialog.qddt_shiftdatetime.text() + "\" "
if self.modifydatetime_dialog.chk_qddt_updatexmp.isChecked():
exiftool_params += " \"-xmp:DateTimeDigitized-=" + self.modifydatetime_dialog.qddt_shiftdatetime.text() + "\" "
if self.modifydatetime_dialog.chk_qddt_forward.isChecked():
print("we are going to shift date and time forward")
exiftool_params = exiftool_params.replace("-=", "+=")
write_image_info(self, exiftool_params, qApp, False)
else:
# Update the selected date time fields, so no date/time shift
if self.modifydatetime_dialog.chk_qddt_modifydate.isChecked():
print("-exif:ModifyDate " + self.modifydatetime_dialog.qddt_modifydate.text())
exiftool_params = '-exif:ModifyDate="' + self.modifydatetime_dialog.qddt_modifydate.text() + '" '
if self.modifydatetime_dialog.chk_qddt_updatexmp.isChecked():
exiftool_params += '-xmp:ModifyDate="' + self.modifydatetime_dialog.qddt_modifydate.text() + '" '
if self.modifydatetime_dialog.chk_qddt_datetimeoriginal.isChecked():
print(self.modifydatetime_dialog.qddt_datetimeoriginal.text())
exiftool_params += '-exif:DateTimeOriginal="' + self.modifydatetime_dialog.qddt_datetimeoriginal.text() + '" '
if self.modifydatetime_dialog.chk_qddt_updatexmp.isChecked():
exiftool_params += '-xmp:DateTimeOriginal="' + self.modifydatetime_dialog.qddt_datetimeoriginal.text() + '" '
if self.modifydatetime_dialog.chk_qddt_createdate.isChecked():
print(self.modifydatetime_dialog.qddt_createdate.text())
exiftool_params += '-exif:CreateDate="' + self.modifydatetime_dialog.qddt_createdate.text() + '" '
if self.modifydatetime_dialog.chk_qddt_updatexmp.isChecked():
exiftool_params += '-xmp:DateTimeDigitized="' + self.modifydatetime_dialog.qddt_createdate.text() + '" '
print(exiftool_params)
write_image_info(self, exiftool_params, qApp, False)
else:
print("you cancelled")
self.statusbar.showMessage("you canceled the \"Modification of date/time\" action")
#---
def check_create_args_boxes(self):
if self.create_args_dialog.qdca_chk_args_all_metadata.isChecked():
self.create_args_dialog.qdca_chk_args_exif_data.setChecked(1)
self.create_args_dialog.qdca_chk_args_xmp_data.setChecked(1)
self.create_args_dialog.qdca_chk_args_gps_data.setChecked(1)
self.create_args_dialog.qdca_chk_args_iptc_data.setChecked(1)
self.create_args_dialog.qdca_chk_args_iccprofile_data.setChecked(1)
else:
self.create_args_dialog.qdca_chk_args_exif_data.setChecked(0)
self.create_args_dialog.qdca_chk_args_xmp_data.setChecked(0)
self.create_args_dialog.qdca_chk_args_gps_data.setChecked(0)
self.create_args_dialog.qdca_chk_args_iptc_data.setChecked(0)
self.create_args_dialog.qdca_chk_args_iccprofile_data.setChecked(0)
class dialog_create_args(QDialog, Ui_Dialog_create_args):
# This loads the py file created by pyside-uic from the ui.
# the Quiloader segfaults on windows after ending the function
def __init__(self, parent=None):
super(dialog_create_args, self).__init__(parent)
self.setupUi(self)
print("create arguments file(s) from selected image(s)")
def create_args(self, qApp):
self.create_args_dialog = dialog_create_args()
# Set proper event
self.create_args_dialog.qdca_chk_args_all_metadata.clicked.connect(self.check_create_args_boxes)
if self.create_args_dialog.exec_() == QDialog.Accepted:
message = "You selected:\n\n"
empty_selection = 0
if self.create_args_dialog.qdca_chk_args_all_metadata.isChecked():
print("Add all metadata to args file(s)")
message += "- Add all metadata\n"
et_param = " -a -all "
else:
empty_selection = 1
et_param = ""
if self.create_args_dialog.qdca_chk_args_exif_data.isChecked():
print("Add exif data to args file(s)")
message += "- Add exif data\n"
et_param += " -a -exif:all "
empty_selection = 0
if self.create_args_dialog.qdca_chk_args_xmp_data.isChecked():
print("Add xmp data to args file(s)")
message += "- Add xmp data\n"
et_param += " -a -xmp:all "
empty_selection = 0
if self.create_args_dialog.qdca_chk_args_gps_data.isChecked():
print("Add gps data to args file(s)")
message += "- Add gps data\n"
et_param += " -a -gps:all "
empty_selection = 0
if self.create_args_dialog.qdca_chk_args_iptc_data.isChecked():
print("Add iptc data to args file(s)")
message += "- Add iptc data\n"
et_param += " -a -iptc:all "
empty_selection = 0
if self.create_args_dialog.qdca_chk_args_iccprofile_data.isChecked():
print("Add icc profile data to args file(s)")
message += "- Add icc profile data\n"
et_param += " -a -icc_profile:all "
empty_selection = 0
if empty_selection == 1:
QMessageBox.information(self,"Nothing selected", "You selected nothing. Cancel would have been the correct option.\nNothing will we done.")
else:
message += "\nAre you sure you want to add the above metadata from the selected image(s) to your args file(s)?"
ret = QMessageBox.question(self, "Add metadata from image(s) to args file(s)", message, buttons=QMessageBox.Ok|QMessageBox.Cancel)
if ret == QMessageBox.Ok:
print("User wants to continue")
et_param += " -args --filename --directory -w args "
print(et_param)
write_image_info(self, et_param, qApp, False)
else:
self.statusbar.showMessage("you canceled the \"Export metadata to args file(s)\" action")
else:
print("you cancelled")
self.statusbar.showMessage("you canceled the \"Export metadata to args file(s)\" action")
#---
def check_export_metadata_boxes(self):
# This one checks whether "export all" is checked
if self.export_metadata_dialog.qdem_chk_export_all_metadata.isChecked():
self.export_metadata_dialog.qdem_chk_export_exif_data.setChecked(1)
self.export_metadata_dialog.qdem_chk_export_xmp_data.setChecked(1)
self.export_metadata_dialog.qdem_chk_export_gps_data.setChecked(1)
self.export_metadata_dialog.qdem_chk_export_iptc_data.setChecked(1)
self.export_metadata_dialog.qdem_chk_export_iccprofile_data.setChecked(1)
else:
self.export_metadata_dialog.qdem_chk_export_exif_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_xmp_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_gps_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_iptc_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_iccprofile_data.setChecked(0)
def check_xmpexport_metadata_boxes(self):
# This one checks whether the xmp export file is checked
#print "in the check_xmpexport_metadata_boxes"
if self.export_metadata_dialog.qdem_xmp_radiobutton.isChecked():
self.export_metadata_dialog.qdem_chk_export_all_metadata.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_exif_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_xmp_data.setChecked(1)
self.export_metadata_dialog.qdem_chk_export_gps_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_iptc_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_iccprofile_data.setChecked(0)
self.export_metadata_dialog.qdem_chk_export_all_metadata.setEnabled(False)
self.export_metadata_dialog.qdem_chk_export_exif_data.setEnabled(False)
self.export_metadata_dialog.qdem_chk_export_gps_data.setEnabled(False)
self.export_metadata_dialog.qdem_chk_export_iptc_data.setEnabled(False)
self.export_metadata_dialog.qdem_chk_export_iccprofile_data.setEnabled(False)
else:
self.export_metadata_dialog.qdem_chk_export_all_metadata.setEnabled(True)
self.export_metadata_dialog.qdem_chk_export_exif_data.setEnabled(True)
self.export_metadata_dialog.qdem_chk_export_xmp_data.setEnabled(True)
self.export_metadata_dialog.qdem_chk_export_gps_data.setEnabled(True)
self.export_metadata_dialog.qdem_chk_export_iptc_data.setEnabled(True)
self.export_metadata_dialog.qdem_chk_export_iccprofile_data.setEnabled(True)
class dialog_export_metadata(QDialog, Ui_Dialog_export_metadata):
# This loads the py file created by pyside-uic from the ui.
# the Quiloader segfaults on windows after ending the function
def __init__(self, parent=None):
super(dialog_export_metadata, self).__init__(parent)
self.setupUi(self)
print("create arguments file(s) from selected image(s)")
def export_metadata(self, qApp):
self.export_metadata_dialog = dialog_export_metadata()
# Set proper events
self.export_metadata_dialog.qdem_chk_export_all_metadata.clicked.connect(self.check_export_metadata_boxes)
self.export_metadata_dialog.qdem_txt_radiobutton.clicked.connect(self.check_xmpexport_metadata_boxes)
self.export_metadata_dialog.qdem_tab_radiobutton.clicked.connect(self.check_xmpexport_metadata_boxes)
self.export_metadata_dialog.qdem_xml_radiobutton.clicked.connect(self.check_xmpexport_metadata_boxes)
self.export_metadata_dialog.qdem_html_radiobutton.clicked.connect(self.check_xmpexport_metadata_boxes)
self.export_metadata_dialog.qdem_xmp_radiobutton.clicked.connect(self.check_xmpexport_metadata_boxes)
if self.export_metadata_dialog.exec_() == QDialog.Accepted:
message = "You selected:\n\n"
empty_selection = 0
if self.export_metadata_dialog.qdem_chk_export_all_metadata.isChecked():
print("export all metadata")
message += "- export all metadata\n"
et_param = " -a -all "
else:
empty_selection = 1
et_param = ""
if self.export_metadata_dialog.qdem_chk_export_exif_data.isChecked():
print("export exif data")
message += "- export exif data\n"
et_param += " -a -exif:all "
empty_selection = 0
if self.export_metadata_dialog.qdem_chk_export_xmp_data.isChecked():
print("export xmp data")
message += "- export xmp data\n"
et_param += " -a -xmp:all "
empty_selection = 0
if self.export_metadata_dialog.qdem_chk_export_gps_data.isChecked():
print("export gps data")
message += "- export gps data\n"
et_param += " -a -gps:all "
empty_selection = 0
if self.export_metadata_dialog.qdem_chk_export_iptc_data.isChecked():
print("export iptc data")
message += "- export iptc data\n"
et_param += " -a -iptc:all "
empty_selection = 0
if self.export_metadata_dialog.qdem_chk_export_iccprofile_data.isChecked():
print("export icc profile data")
message += "- export icc profile data\n"
et_param += " -a -icc_profile:all "
empty_selection = 0
if empty_selection == 1:
QMessageBox.information(self,"Nothing selected", "You selected nothing. Cancel would have been the correct option.\nNothing will we done.")
else:
message += "\nAre you sure you want to export the above metadata from the selected image(s)?"
ret = QMessageBox.question(self, "export metadata from image(s)", message, buttons=QMessageBox.Ok|QMessageBox.Cancel)
if ret == QMessageBox.Ok:
print("User wants to continue")
print(et_param)
if self.export_metadata_dialog.qdem_txt_radiobutton.isChecked():
et_param += " -w! txt "
elif self.export_metadata_dialog.qdem_tab_radiobutton.isChecked():
et_param += " -t -w! txt "
elif self.export_metadata_dialog.qdem_xml_radiobutton.isChecked():
et_param += " -X -w! xml "
elif self.export_metadata_dialog.qdem_html_radiobutton.isChecked():
et_param += " -h -w! html "
elif self.export_metadata_dialog.qdem_xmp_radiobutton.isChecked():
et_param = " xmpexport "
elif self.export_metadata_dialog.qdem_csv_radiobutton.isChecked():
et_param += " -csv "
write_image_info(self, et_param, qApp, False)
else:
self.statusbar.showMessage("you canceled the \"Export of metadata\" action")
else:
print("you cancelled")
self.statusbar.showMessage("you canceled the \"Export of metadata\" action")
#---
def check_remove_metadata_boxes(self):
if self.rem_metadata_dialog.chk_rem_all_metadata.isChecked():
self.rem_metadata_dialog.chk_rem_exif_data.setChecked(1)
self.rem_metadata_dialog.chk_rem_xmp_data.setChecked(1)
self.rem_metadata_dialog.chk_rem_gps_data.setChecked(1)
self.rem_metadata_dialog.chk_rem_iptc_data.setChecked(1)
self.rem_metadata_dialog.chk_rem_iccprofile_data.setChecked(1)
else:
self.rem_metadata_dialog.chk_rem_exif_data.setChecked(0)
self.rem_metadata_dialog.chk_rem_xmp_data.setChecked(0)
self.rem_metadata_dialog.chk_rem_gps_data.setChecked(0)
self.rem_metadata_dialog.chk_rem_iptc_data.setChecked(0)
self.rem_metadata_dialog.chk_rem_iccprofile_data.setChecked(0)
class dialog_remove_metadata(QDialog, Ui_Dialog_remove_metadata):
# This loads the py file created by pyside-uic from the ui.
# the Quiloader segfaults on windows after ending the function
def __init__(self, parent=None):
super(dialog_remove_metadata, self).__init__(parent)
self.setupUi(self)
def remove_metadata(self, qApp):
self.rem_metadata_dialog = dialog_remove_metadata()
# Set proper event
self.rem_metadata_dialog.chk_rem_all_metadata.clicked.connect(self.check_remove_metadata_boxes)
if self.rem_metadata_dialog.exec_() == QDialog.Accepted:
message = "You selected:\n\n"
empty_selection = 0
if self.rem_metadata_dialog.chk_rem_all_metadata.isChecked():
print("Remove all metadata")
message += "- Remove all metadata\n"
et_param = " -all= "
else:
empty_selection = 1
et_param = ""
if self.rem_metadata_dialog.chk_rem_exif_data.isChecked():
print("Remove exif data")
message += "- Remove exif data\n"
et_param += " -exif:all= "
empty_selection = 0
if self.rem_metadata_dialog.chk_rem_xmp_data.isChecked():
print("Remove xmp data")
message += "- Remove xmp data\n"
et_param += " -xmp:all= "
empty_selection = 0
if self.rem_metadata_dialog.chk_rem_gps_data.isChecked():
print("Remove gps data")
message += "- Remove gps data\n"
et_param += " -gps:all= "
empty_selection = 0
if self.rem_metadata_dialog.chk_rem_iptc_data.isChecked():
print("Remove iptc data")
message += "- Remove iptc data\n"
et_param += " -iptc:all= "
empty_selection = 0
if self.rem_metadata_dialog.chk_rem_iccprofile_data.isChecked():
print("Remove icc profile data")
message += "- Remove icc profile data\n"
et_param += " -icc_profile:all= "
empty_selection = 0
if empty_selection == 1:
QMessageBox.information(self,"Nothing selected", "You selected nothing. Cancel would have been the correct option.\nNothing will we done.")
else:
message += "\nAre you sure you want to remove the above metadata from the selected image(s)?"
ret = QMessageBox.question(self, "Remove metadata from image(s)", message, buttons=QMessageBox.Ok|QMessageBox.Cancel)
if ret == QMessageBox.Ok:
print("User wants to continue")
print(et_param)
if self.rem_metadata_dialog.chk_rem_backuporiginals.isChecked():
print("make backup of originals")
write_image_info(self, et_param, qApp, True)
else:
write_image_info(self, et_param, qApp, False)
else:
self.statusbar.showMessage("you canceled the \"Removal of metadata\" action")
else:
print("you cancelled")
self.statusbar.showMessage("you canceled the \"Removal of metadata\" action")
#------------------------------------------------------------------------
# This is the part where the geotag functions will be executed
def write_geotag_info(self,qApp):
# First check if we have something to work on
result = check_geotag_folder_before_run_geotag_photos(self)
if result == "nothing_to_work_with":
# error message already displayed, exit function
return
else:
# work_on gets the geotag folder or the main images screen selection
work_on = result
# Now check whether we have a GPS track log file
if self.LineEdit_geotag_log_file.text() == "":
# user did not specify a GPS track log file
QMessageBox.information(self,"No GPS track log file", "You did not select a GPS track log file\n. Cancelling this action")
return "nothing_to_work_with"
else:
# At this stage we have images and a track log file
run_geotag_photos(self, work_on, qApp)
#---
def check_geotag_folder_before_run_geotag_photos(self):
print("self.LineEdit_geotag_source_folder #" + self.LineEdit_geotag_source_folder.text() + "#")
if self.LineEdit_geotag_source_folder.text() == "":
# user did not select a source folder, now check in the except whether he/she selected images in the main screen
try:
#if len(self.fileNames) == 0:
selected_rows = self.MaintableWidget.selectedIndexes()
if len(selected_rows) == 0:
QMessageBox.information(self,"Nothing to work with","You did not specify a source folder and neither did you load/select any photos in the main screen.")
return "nothing_to_work_with"
else:
# just exit this function with the option "main_screen_selection"
print("main_screen_selection")
return "main_screen_selection"
except:
QMessageBox.information(self,"Nothing to work with","You did not specify a source folder and neither did you load/select any photos in the main screen.")
return "nothing_to_work_with"
else:
# just exit this function with the option rename_source_folder (this is not the path)
print("geotag_source_folder")
return "geotag_source_folder"
#---
def run_geotag_photos(self, work_on, qApp):
# Now do the real work
# Check whether user specified a geosync time
if self.LineEdit_geotagging_geosynctime.text() == "":
exiftoolparams = " -P -overwrite_original_in_place -geotag '" + self.LineEdit_geotag_log_file.text() + "'"
xmpparams = " -P -overwrite_original_in_place -xmp:geotag='" + self.LineEdit_geotag_log_file.text() + "'"
else:
# A geosync time has been specified. just make sure to remove extra quotes or double quotes
gstime = self.LineEdit_geotagging_geosynctime.text()
gstime = gstime.replace("'", "")
gstime = gstime.replace('"', '')
exiftoolparams = " -P -overwrite_original_in_place -geotag '" + self.LineEdit_geotag_log_file.text() + "' -geosync=" + gstime + " "
xmpparams = " -P -overwrite_original_in_place -xmp:geotag='" + self.LineEdit_geotag_log_file.text() + "' -geosync=" + gstime + " "
# final check
if work_on == "nothing_to_work_with":
# This should already been dealt with earlier, but in case I did something stupid we simply exit this function
return
elif work_on == "main_screen_selection":
# we use the images that were selected from the main screen
print("we use the images that were selected from the main screen")
selected_rows = self.MaintableWidget.selectedIndexes()
#exiftoolparams = "'-FileName<" + self.prefix + "_" + self.suffix + ".%le' " + self.prefixformat + " " + self.suffixformat + "-." + self.combobox_digits.currenttext() + "nc" + self.sourcefolder + "/*"
rowcounter = 0
total_rows = len(selected_rows)
self.progressbar.setRange(0, total_rows)
self.progressbar.setValue(0)
self.progressbar.show()
rows = []
qApp.processEvents()
for selected_row in selected_rows:
selected_row = str(selected_row)
selected_row = selected_row.replace("<PySide.QtCore.QModelIndex(",'')
selected_row, tail = re.split(',0x0',selected_row)
#print str(selected_row)
row, column = re.split(',',selected_row)
if row not in rows:
rows.append(row)
selected_image = "\"" + self.fileNames[int(row)] + "\""
print('exiftool ' + exiftoolparams + ' ' + selected_image)
rowcounter += 1
self.progressbar.setValue(rowcounter)
parameters = ' ' + exiftoolparams + ' ' + selected_image
xmpparameters = ' ' + xmpparams + ' ' + selected_image
self.statusbar.showMessage("Trying to geotag " + os.path.basename(selected_image))
qApp.processEvents()
if self.OSplatform in ("Windows", "win32"):
parameters = parameters.replace("/", "\\")
parameters = parameters.replace("'", "\"")
xmpparameters = xmpparameters.replace("/", "\\")
xmpparameters = xmpparameters.replace("'", "\"")
args = '"' + self.exiftoolprog + '" ' + parameters
xmpargs = '"' + self.exiftoolprog + '" ' + xmpparameters
print(args)
print(xmpargs)
p = subprocess.call(args, shell=True)
p = subprocess.call(xmpargs, shell=True)
else:
#parameters = parameters.replace("'", "\"")
command_line = '"' + self.exiftoolprog + '" ' + exiftoolparams + ' ' + selected_image
xmp_command_line = '"' + self.exiftoolprog + '" ' + xmpparams + ' ' + selected_image
args = shlex.split(command_line)
xmpargs = shlex.split(xmp_command_line)
print("command_line " + command_line)
print("xmp command_line " + xmp_command_line)
#p = subprocess.call(command_line)
p = subprocess.call(args)
p = subprocess.call(xmpargs)
self.statusbar.showMessage("Finished geotagging images where timestamps fit.")
qApp.processEvents()
self.progressbar.hide()
self.statusbar.showMessage("")
elif work_on == "geotag_source_folder":
# work on all images in the source folder and do it in this function self
#print "work on all images in the source folder"
#print self.rename_photos_dialog.LineEdit_rename_source_folder.text()
self.statusbar.showMessage("Trying to geotag all images in: " + self.LineEdit_geotag_source_folder.text())
print("Trying to geotag all images in: " + self.LineEdit_geotag_source_folder.text())
parameters = exiftoolparams + ' "' + self.LineEdit_geotag_source_folder.text() + '"'
xmpparameters = xmpparams + ' "' + self.LineEdit_geotag_source_folder.text() + '"'
if self.OSplatform in ("Windows", "win32"):
parameters = parameters.replace("/", "\\")
parameters = parameters.replace("'", "\"")
xmpparameters = xmpparameters.replace("/", "\\")
xmpparameters = xmpparameters.replace("'", "\"")
args = '"' + self.exiftoolprog + '" ' + parameters
xmpargs = '"' + self.exiftoolprog + '" ' + xmpparameters
print("args " + args)
print("xmpargs " + xmpargs)
p = subprocess.call(args, shell=True)
p = subprocess.call(xmpargs, shell=True)
else:
pathofimages = self.LineEdit_geotag_source_folder.text().replace(" ", "\\ ")
command_line = '"' + self.exiftoolprog + '" ' + exiftoolparams + ' "' + pathofimages + '"'
xmpcommand_line = '"' + self.exiftoolprog + '" ' + xmpparams + ' "' + pathofimages + '"'
print("command_line " + command_line)
print("xmpcommandline " + xmpcommand_line)
p = subprocess.call(command_line, shell=True)
p = subprocess.call(xmpcommand_line, shell=True)
self.statusbar.showMessage("Finished geotagging all images in: " + self.LineEdit_geotag_source_folder.text() + " where timestamps fit.")
#------------------------------------------------------------------------
# This is the part where your own exiftool parameters will be executed
def yourcommands_go(self, qApp):
output_text = ""
exiftoolparams = " " + self.yourcommands_input.text() + " "
mysoftware = programinfo.NAME + " " + programinfo.VERSION
''''if self.OSplatform in ("Windows", "win32"):
exiftoolparams = " -ProcessingSoftware=\"" + mysoftware + "\" " + exiftoolparams
else:
exiftoolparams = " -ProcessingSoftware='" + mysoftware + "' " + exiftoolparams
'''
selected_rows = self.MaintableWidget.selectedIndexes()
if len(selected_rows) == 0:
self.the_no_photos_messagebox()
else:
print('number of rows ' + str(len(selected_rows)))
rowcounter = 0
total_rows = len(selected_rows)
self.progressbar.setRange(0, total_rows)
self.progressbar.setValue(0)
self.progressbar.show()
rows = []
for selected_row in selected_rows:
selected_row = str(selected_row)
selected_row = selected_row.replace("<PySide.QtCore.QModelIndex(",'')
selected_row, tail = re.split(',0x0',selected_row)
#print str(selected_row)
row, column = re.split(',',selected_row)
if row not in rows:
rows.append(row)
selected_image = "\"" + self.fileNames[int(row)] + "\""
print('exiftool ' + exiftoolparams + ' ' + selected_image)
rowcounter += 1
self.progressbar.setValue(rowcounter)
if self.OSplatform in ("Windows", "win32"):
# First write the info
selected_image = selected_image.replace("/", "\\")
args = '"' + self.exiftoolprog + '" ' + exiftoolparams + selected_image
try:
p = subprocess.check_output(args, universal_newlines=True, shell=True)
except:
p = "Your parameter(s) is/are wrong and could not be executed at all by exiftool.\nTherefore you don't get output."
else:
# First write the info
command_line = '"' + self.exiftoolprog + '" ' + exiftoolparams + selected_image
print(command_line)
args = shlex.split(command_line)
try:
p = subprocess.check_output(args, universal_newlines=True)
except:
p = "Your parameter(s) is/ware wrong and could not be executed at all by exiftool.\nTherefore you don't get output."
if p == "":
p = "Your parameters did not return output.\nEither there is no output or you did something wrong."
p = p[:-1]
#p_lines = re.split('\n',p)
self.statusbar.showMessage("Executing your parameter(s) on: " + selected_image)
self.yourcommands_output.insertPlainText("==== " + selected_image + " ====\n")
self.yourcommands_output.insertPlainText(str(p))
self.yourcommands_output.insertPlainText("\n\n\n")
self.progressbar.hide()
self.statusbar.showMessage("Finished executing your parameter(s)")
#------------------------------------------------------------------------
# Real exiftool read/write functions
def read_image_info(self, exiftool_params):
self.statusbar.showMessage("")
if self.images_view.currentText() == "by cells":
selected_image = "\"" + self.fileNames[int((self.MaintableWidget.columnCount()*self.MaintableWidget.currentRow())+self.MaintableWidget.currentColumn())] + "\""
else:
selected_image = "\"" + self.fileNames[self.MaintableWidget.currentRow()] + "\""
if self.OSplatform in ("Windows", "win32"):
selected_image = selected_image.replace("/", "\\")
args = '"' + self.exiftoolprog + '" ' + exiftool_params + selected_image
p = subprocess.check_output(args, universal_newlines=True, shell=True)
else:
command_line = '"' + self.exiftoolprog + '" ' + exiftool_params + selected_image
args = shlex.split(command_line)
p = subprocess.check_output(args, universal_newlines=True)
return p
def write_image_info(self, exiftoolparams, qApp, backup_originals):
mysoftware = programinfo.NAME + " " + programinfo.VERSION
xmpexportparam = ""
# silly if/elif/else statement. improve later
if exiftoolparams =="":
# nothing to do
self.statusbar.showMessage("no changes")
else:
if " -w! " in exiftoolparams:
# exporting metadata
print("exporting metadata")
#exiftoolparams += " -overwrite_original_in_place "
elif " -csv " in exiftoolparams:
# Create args file(s) from selected images(s)
print("Exporting metadata from selected images(s)to csv file")
images_to_csv = exiftoolparams + ' '
elif " -args " in exiftoolparams:
# Create args file(s) from selected images(s)
print("Create args file(s) from selected images(s)")
elif " xmpexport " in exiftoolparams:
# Create xmp file(s) from selected images(s) only for xmp data
print("Create xmp file(s) from selected images(s) only for xmp data")
# create extra variable otherwise exiftoolparams ovewrites original xmpexport string, bit clumsy but it works
xmpexportparam = exiftoolparams
elif " -FileModifyDate<DateTimeOriginal " in exiftoolparams:
print("Only change file date/time to DateTimeOriginal")
else:
# writing metadata info to photos
if backup_originals == True:
if self.OSplatform in ("Windows", "win32"):
exiftoolparams = " -P -ProcessingSoftware=\"" + mysoftware + "\" " + exiftoolparams
else:
exiftoolparams = " -P -ProcessingSoftware='" + mysoftware + "' " + exiftoolparams
else:
if self.OSplatform in ("Windows", "win32"):
exiftoolparams = " -P -overwrite_original_in_place -ProcessingSoftware=\"" + mysoftware + "\" " + exiftoolparams
else:
exiftoolparams = " -P -overwrite_original_in_place -ProcessingSoftware='" + mysoftware + "' " + exiftoolparams
selected_rows = self.MaintableWidget.selectedIndexes()
print('number of rows ' + str(len(selected_rows)))
rowcounter = 0
total_rows = len(selected_rows)
self.progressbar.setRange(0, total_rows)
self.progressbar.setValue(0)
self.progressbar.show()
rows = []
for selected_row in selected_rows:
#selected_row = str(selected_row)
#selected_row = selected_row.replace("<PySide.QtCore.QModelIndex(",'')
#selected_row, tail = re.split(',0x0',selected_row)
#print str(selected_row)
#row, column = re.split(',',selected_row)
row, column = selected_row.row(), selected_row.column()
if str(str(row)+","+str(column)) not in rows:
rows.append(str(row)+","+str(column))
if self.images_view.currentText() == "by cells":
selected_image = "\"" + self.fileNames[int((self.MaintableWidget.columnCount()*row)+column)] + "\""
else:
selected_image = "\"" + self.fileNames[int(row)] + "\""
print('exiftool ' + exiftoolparams + ' ' + selected_image)
#print 'exiftool "-FileModifyDate<DateTimeOriginal" ' + selected_image
rowcounter += 1
self.progressbar.setValue(rowcounter)
if " -csv " in exiftoolparams:
# First collect images. Do not write yet
# if self.OSplatform in ("Windows", "win32"):
# images_to_csv += " " + selected_image + " "
# else:
images_to_csv += ' ' + selected_image + ' '
#print images_to_csv
else:
# All other actions are performed per image.
if " -w " in exiftoolparams:
self.statusbar.showMessage("Exporting information from: " + os.path.basename(selected_image) + " to chosen export format")
elif " -args " in exiftoolparams:
self.statusbar.showMessage("Create args file from: " + os.path.basename(selected_image))
elif "copymetadatatoxmp" in exiftoolparams:
self.statusbar.showMessage("Create all metadata internally inside " + os.path.basename(selected_image) + " to xmp format")
if self.OSplatform in ("Windows", "win32"):
exiftoolparams = " -TagsFromFile " + selected_image.replace("/", "\\") + " \"-all>xmp:all\" "
else:
exiftoolparams = " -TagsFromFile " + selected_image + " '-all>xmp:all' "
else:
#check whether we do an xmp to xmp file export
if xmpexportparam == "":
# no it's not an xmp to xmp file export, this means all other actions
self.statusbar.showMessage("Writing information to: " + os.path.basename(selected_image))
else:
# less frequent so put the xmp export to xmp here
self.statusbar.showMessage("Create xmp file from: " + os.path.basename(selected_image))
base = os.path.basename(selected_image)
basexmp = os.path.splitext(base)[0] + ".xmp"
#print "basexmp " + basexmp
if os.path.isfile(os.path.join(self.image_folder, basexmp)):
# remove xmp file first as exiftool doesn't overwrite
fls = os.remove(os.path.join(self.image_folder, basexmp))
exiftoolparams = " -o \"" + os.path.join(self.image_folder, basexmp) + "\" -xmp "
qApp.processEvents()
if self.OSplatform in ("Windows", "win32"):
# First write the info
selected_image = selected_image.replace("/", "\\")
args = '"' + self.exiftoolprog + '" ' + exiftoolparams + selected_image
p = subprocess.call(args, shell=True)
else:
# First write the info
command_line = '"' + self.exiftoolprog + '" ' + exiftoolparams + selected_image
print(command_line)
args = shlex.split(command_line)
p = subprocess.call(args)
self.progressbar.hide()
# csv option: After having collected the images
if " -csv " in exiftoolparams:
# Use self.image_folder from loading the images
if self.OSplatform in ("Windows", "win32"):
parameters = " " + images_to_csv + " > \"" + os.path.join(self.image_folder, "output.csv") + "\""
#parameters = " " + images_to_csv + " > output.csv"
parameters = parameters.replace("/", "\\")
args = '"' + self.exiftoolprog + '" ' + parameters
print(args)
p = subprocess.call(args, shell=True)
else:
command_line = '"' + self.exiftoolprog + '" ' + images_to_csv + ' > \'' + os.path.join(self.image_folder, 'output.csv') + '\''
#args = shlex.split(command_line)
print(command_line)
#p = subprocess.call(args,shell=True)
p = subprocess.call(command_line,shell=True)
# end of csv option
if " -w " in exiftoolparams:
self.statusbar.showMessage("Done exporting the metadata for the selected image(s)")
elif " -args " in exiftoolparams:
self.statusbar.showMessage("Done creating the args file(s) for the selected image(s)")
elif " -csv " in exiftoolparams:
self.statusbar.showMessage("Done creating the csv file for the selected image(s)")
else:
self.statusbar.showMessage("Done writing the info to the selected image(s)")
|
hvdwolf/pyExifToolGUI
|
scripts/petgfunctions.py
|
Python
|
gpl-3.0
| 115,680 | 0.00593 |
text = 'this is a sample file\nnew line'
savefile = open('newtext', 'w')
savefile.write(text)
savefile.close()
|
Faraaz54/python_training_problems
|
basic_python/write_file.py
|
Python
|
mit
| 123 | 0.00813 |
# Moviemazer XBMC Addon
# written by Tristan Fischer (sphere)
#
# If you have suggestions or problems: write me.
#
# Mail: sphere@dersphere.de
#
# Special Thanks to the website www.moviemaze.de
# Import Python stuff
import urllib
import urllib2
import re
import os
import sys
import time
from shutil import copyfile
# Import XBMC Stuff
import xbmcplugin
import xbmcgui
import xbmcaddon
# Creating some default variables and objects
Addon = xbmcaddon.Addon('plugin.video.moviemazer')
MAIN_URL = 'http://www.moviemaze.de'
ADDON_ID = Addon.getAddonInfo('id')
CACHE_DIR = 'special://profile/addon_data/%s/cache/' % ADDON_ID
IMAGE_DIR = 'special://home/addons/%s/resources/images/' % ADDON_ID
GetSetting = Addon.getSetting
SetSetting = Addon.setSetting
Language = Addon.getLocalizedString
Handle = int(sys.argv[1])
ProgressDialog = xbmcgui.DialogProgress()
# Functions for getting a list of dicts containing movie headers like ID and title
def get_top_ten_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtopten = re.compile('<tr><td valign="top" align="right"><b>([0-9]+)</b></td><td width=100% style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a> <span class="small_grey">\(([^<]+)\)</span></td></tr>').findall(link)
for rank, movieid, urlend, title, trailerkind in matchtopten:
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '%s. ' % rank,
'date': ''}
returnmovies.append(movie)
return returnmovies
def get_recent_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtrecentupdates = re.compile('<td(?: valign="top" style="text-align:left;"><b style="white-space: nowrap;">([^<]*)</b)?></td><td width=100% style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a> <span class="small_grey">\(([^<]+)\)</span></td></tr>').findall(link)
for date, movieid, urlend, title, trailerkind in matchtrecentupdates:
if date != '':
lastdate = date
else:
date = lastdate
datearray = date.split(' ')
months_de_short = ['', 'Jan', 'Feb', 'M\xe4r', 'Apr', 'Mai', 'Juni', 'Juli', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
try:
date = ' (%s%02d.%s)' % (datearray[0], months_de_short.index(datearray[1]), '2011') # Fixme: dirty hack :(
except:
date = ''
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '',
'date': date}
returnmovies.append(movie)
return returnmovies
def get_current_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtacttrailers = re.compile('<tr><td(?: valign="top"><b>[A-Z0-9]</b)?></td><td style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a></td></tr>').findall(link)
for movieid, urlend, title in matchtacttrailers:
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '',
'date': ''}
returnmovies.append(movie)
return returnmovies
# Function to get a dict of detailed movie information like coverURL, plot and genres
def get_movie_infos(movieid, urlend='movie.html'):
returnmovie = {'movieid': movieid,
'title': '',
'otitle': '',
'coverurl': '',
'plot': '',
'genres': '',
'date': ''}
fullurl = '%s/media/trailer/%s,15,%s' % (MAIN_URL,
movieid,
urlend)
cachefile = 'id%s.cache' % movieid
link = get_cached_url(fullurl, cachefile, GetSetting('cache_movie_info'))
titlematch = re.compile('<h1>(.+?)</h1>.*<h2>\((.+?)\)</h2>', re.DOTALL).findall(link)
for title, otitle in titlematch:
returnmovie.update({'title': title, 'otitle': otitle})
covermatch = re.compile('src="([^"]+?)" width="150"').findall(link)
for coverurl in covermatch:
if coverurl != '/filme/grafiken/kein_poster.jpg':
returnmovie.update({'coverurl': MAIN_URL + coverurl})
plotmatch = re.compile('WERDEN! -->(.+?)</span>').findall(link)
for plot in plotmatch:
plot = re.sub('<[^<]*?/?>', '', plot)
returnmovie.update({'plot': plot})
releasedatematch = re.compile('Dt. Start:</b> ([0-9]+.+?)<img').findall(link)
for releasedateugly in releasedatematch:
datearray = releasedateugly.split(' ')
months_de_long = ['', 'Januar', 'Februar', 'M\xe4rz', 'April', 'Mai', 'Juni', 'Juli', 'August', 'September', 'Oktober', 'November', 'Dezember']
date = ' (%s%02d.%s)' % (datearray[0], months_de_long.index(datearray[1]), '2011') # Fixme: dirty hack :(
returnmovie.update({'date': date})
genresmatch = re.compile('<b style="font-weight:bold;">Genre:</b> (.+?)<br />', re.DOTALL).findall(link)
for allgenres in genresmatch:
returnmovie.update({'genres': allgenres})
return returnmovie
# Function to get a list of dicts which contains trailer- URL, resolution, releasedate
def get_movie_trailers(movieid, urlend='movie.html'):
returntrailers = []
fullurl = '%s/media/trailer/%s,15,%s' % (MAIN_URL,
movieid,
urlend)
cachefile = 'id%s.cache' % movieid
link = get_cached_url(fullurl, cachefile, GetSetting('cache_movie_info'))
matchtrailerblock = re.compile('<table border=0 cellpadding=0 cellspacing=0 align=center width=100%><tr><td class="standard">.+?<b style="font-weight:bold;">(.+?)</b><br />\(([0-9:]+) Minuten\)(.+?</td></tr></table><br /></td></tr></table><br />)', re.DOTALL).findall(link)
for trailername, duration, trailerblock in matchtrailerblock:
matchlanguageblock = re.compile('alt="Sprache: (..)">(.+?)>([^<]+)</td></tr></table></td>', re.DOTALL).findall(trailerblock)
for language, languageblock, date in matchlanguageblock:
datearray = date.split(' ')
months_de_short = ['', 'Jan', 'Feb', 'M\xe4rz', 'Apr', 'Mai', 'Juni', 'Juli', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
try:
date = datearray[0] + str(months_de_short.index(datearray[1])).zfill(2) + '.2011' # fixme: this could be made better, no idea how :)
except:
date = ''
matchtrailer = re.compile('generateDownloadLink\("([^"]+_([0-9]+)\.(?:mov|mp4)\?down=1)"\)').findall(languageblock)
for trailerurl, resolution in matchtrailer:
trailer = {'trailername': trailername,
'duration': duration,
'language': language,
'resolution': resolution,
'date': date,
'trailerurl': MAIN_URL + trailerurl}
returntrailers.append(trailer)
return returntrailers
# Functions to get the informations for xbmc
def show_categories():
add_dir(Language(30003), 3, os.path.join(IMAGE_DIR, 'database.png')) # Current
add_dir(Language(30001), 1, os.path.join(IMAGE_DIR, 'ranking.png')) # TopTen
add_dir(Language(30002), 2, os.path.join(IMAGE_DIR, 'schedule.png')) # Recent
end_dir()
def show_top_ten_movies():
toptenmovies = get_top_ten_movies()
show_movies(toptenmovies)
end_dir()
def show_recent_movies():
recentmovies = get_recent_movies()
show_movies(recentmovies)
end_dir()
def show_current_movies():
currentmovies = get_current_movies()
show_movies(currentmovies)
end_dir()
# Functions to show the informations in xbmc
def show_movies(movies):
counter = 0
ProgressDialog = xbmcgui.DialogProgress()
ProgressDialog.create(Language(30020), '%s %s' % (str(len(movies)), Language(30021)))
ProgressDialog.update(0)
for movie in movies:
movieinfo = get_movie_infos(movieid=movie['movieid'], urlend=movie['urlend'])
title = '%s%s%s' % (movie['rank'], movieinfo['title'], movie['date'])
add_movie(title=title,
movieid=movieinfo['movieid'],
coverurl=movieinfo['coverurl'],
plot=movieinfo['plot'],
otitle=movieinfo['otitle'],
genres=movieinfo['genres'],
releasedate=movieinfo['date'],
playcount=get_playcount(movie['movieid']))
counter += 1
ProgressDialog.update(100 * counter / len(movies),
'%s %s' % (str(len(movies)), Language(30021)), # x movies have to be cached
'%s: %s' % (Language(30022), movieinfo['title'].decode('utf-8', 'ignore'))) # Loading : y
if ProgressDialog.iscanceled():
break
ProgressDialog.close()
# Functions to add single Folders to the xbmc screen and tell xbmc that all is there
def add_dir(dirname, cat, iconimage):
u = '%s?cat=%s' % (sys.argv[0], str(cat))
liz = xbmcgui.ListItem(dirname,
iconImage='DefaultVideo.png',
thumbnailImage=iconimage)
liz.setInfo(type='Video',
infoLabels={'Title': dirname})
ok = xbmcplugin.addDirectoryItem(handle=Handle,
url=u,
listitem=liz,
isFolder=True)
def add_movie(title, movieid, coverurl='', plot='', otitle='', genres='', releasedate='', playcount=0):
u = '%s?cat=%s&movieid=%s' % (sys.argv[0], str(cat), movieid)
liz = xbmcgui.ListItem(title,
iconImage='DefaultVideo.png',
thumbnailImage=coverurl)
liz.setInfo(type='Video',
infoLabels={'Title': title,
'Tagline': '%s: %s' % (Language(30030), releasedate),
'Plot': plot,
'Studio': otitle, # fixme: there is no label for "original title"
'Genre': genres})
liz.setProperty('releasedate', releasedate)
if int(playcount) > 0:
liz.setInfo(type='Video', infoLabels={'overlay': 7})
contextmenu = [(Language(30231), 'XBMC.RunPlugin(%s&mode=guess)' % u),
(Language(30232), 'XBMC.RunPlugin(%s&mode=ask)' % u),
(Language(30233), 'XBMC.Action(Info)'),
(Language(1045), 'XBMC.RunPlugin(%s&GetSettings=open)' % u)]
liz.addContextMenuItems(contextmenu, True)
xbmcplugin.addDirectoryItem(handle=Handle,
url=u,
listitem=liz,
isFolder=False)
def end_dir():
xbmcplugin.addSortMethod(Handle, xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(Handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(Handle, xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.endOfDirectory(Handle, cacheToDisc=True)
# Functions to choose a trailer - ask user or guess with the preferred GetSettings
def ask_for_trailer(movietrailers):
movieinfo = get_movie_infos(movieid)
backlabel = '--> %s <--' % Language(30011) # Back, there is no 'cancel' in Dialog.select :(
trailercaptionlist = [backlabel]
trailerurllist = ['']
for trailer in movietrailers:
trailercaption = '%s - %s - %s (%s)' % (trailer['trailername'],
trailer['language'],
trailer['resolution'],
trailer['date'])
trailercaptionlist.append(trailercaption)
trailerurllist.append(trailer['trailerurl'])
Dialog = xbmcgui.Dialog()
if len(trailercaptionlist) > 1:
chosentrailer = Dialog.select(Language(30010), trailercaptionlist)
if chosentrailer != 0:
trailer = {'trailerurl': trailerurllist[chosentrailer],
'title': movieinfo['title'],
'studio': trailercaptionlist[chosentrailer],
'coverurl': movieinfo['coverurl']}
return trailer
else:
Dialog.ok(movieinfo['title'], Language(30012)) # No Trailer found :(
def guess_pref_trailer(movietrailers):
prefres = int(GetSetting('trailer_xres'))
allres = ['1920', '1280', '1024', '848', '720', '640', '512', '480', '320']
prefmovietrailers = []
diff = 0
if len(filter_dic(movietrailers, 'language', GetSetting('trailer_lang'))) > 0:
movietrailers = filter_dic(movietrailers, 'language', GetSetting('trailer_lang'))
while len(prefmovietrailers) == 0:
searchres = prefres + diff
if not searchres >= len(allres):
prefmovietrailers = filter_dic(movietrailers, 'resolution', allres[searchres])
if len(prefmovietrailers) == 0 and not diff == 0:
searchres = prefres - diff
if searchres >= 0:
prefmovietrailers = filter_dic(movietrailers, 'resolution', allres[searchres])
diff += 1
if diff > len(allres) + 1:
break
prefmovietrailer = prefmovietrailers[len(prefmovietrailers) - 1]
trailercaption = '%s - %s - %s (%s)' % (prefmovietrailer['trailername'],
prefmovietrailer['language'],
prefmovietrailer['resolution'],
prefmovietrailer['date'])
movieinfo = get_movie_infos(movieid)
trailer = {'trailerurl': prefmovietrailer['trailerurl'],
'title': movieinfo['title'],
'studio': trailercaption,
'coverurl': movieinfo['coverurl']}
return trailer
# Function to play a Trailer
def play_trailer(trailerurl, movieid, title='', studio='', coverurl=''):
liz = xbmcgui.ListItem(label=title,
iconImage='DefaultVideo.png',
thumbnailImage=coverurl)
liz.setInfo(type='Video',
infoLabels={'Title': title, 'Studio': studio})
if GetSetting('play_mode') == '0': # Setting is to download and then play the trailer
ProgressDialog.create(Language(30025), Language(30026) % ('0', '?'), '%s (%s)' % (title, studio))
ProgressDialog.update(0)
trailerfile = re.search('.*/([^/]+)\?down=1', trailerurl).group(1)
trailerfile = re.sub('[^\w\s.-]', '', '%s - %s' % (title, trailerfile))
downloadpath = GetSetting('download_path')
if downloadpath == '':
downloadpath = CACHE_DIR
filepath = downloadpath + trailerfile
if not os.path.isfile(filepath):
filepathtemp = filepath + '.tmp'
urllib.urlretrieve(trailerurl, filepathtemp, update_progress_hook)
copyfile(filepathtemp, filepath)
os.remove(xbmc.translatePath(filepathtemp))
trailerurl = filepath
ProgressDialog.close()
Player = xbmc.Player(xbmc.PLAYER_CORE_AUTO)
Player.play(trailerurl, liz)
set_playcount(movieid)
# Function to update the xbmc Dialog while downloading, inspired by the videomonkey addon :-)
def update_progress_hook(count, blocksize, totalsize):
percent = int(float(count * blocksize * 100) / totalsize)
kilofloat = float(1024)
totalsizemb = "%.2f" % (totalsize / kilofloat / kilofloat)
countmb = "%.2f" % (count * blocksize / kilofloat / kilofloat)
ProgressDialog.update(percent, Language(30026) % (countmb, totalsizemb))
if ProgressDialog.iscanceled():
raise KeyboardInterrupt
# Helper Functions
def get_cached_url(url, filename, timetolive=1):
requestheader = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.9) Gecko/20100824 Firefox/3.6.9'
cachefilefullpath = CACHE_DIR + filename
timetolive = int(timetolive) * 60 * 60 # timetolive Settings are in hours!
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
try:
cachefiledate = os.path.getmtime(cachefilefullpath)
except:
cachefiledate = 0
if (time.time() - (timetolive)) > cachefiledate:
req = urllib2.Request(url)
req.add_header('User-Agent', requestheader)
sock = urllib2.urlopen(req)
link = sock.read()
encoding = sock.headers['Content-type'].split('charset=')[1]
outfile = open(cachefilefullpath, 'w')
outfile.write(link)
outfile.close()
else:
sock = open(cachefilefullpath, 'r')
link = sock.read()
sock.close()
return link
def filter_dic(dic, key, value):
return [d for d in dic if (d.get(key) == value)]
def get_params():
param = []
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
return param
# Functions for get/set the user playcounts
def get_playcount(movieid):
playcount = GetSetting('playcount-movieid=%s' % movieid)
if not playcount:
playcount = 0
return playcount
def set_playcount(movieid):
pc = int(get_playcount(movieid))
pc += 1
SetSetting('playcount-movieid=%s' % movieid, str(pc))
# Addon Standard Stuff - here the addon starts
params = get_params()
print 'MovieMazer Addon started with "%s"' % params
try:
movieid = params['movieid']
except:
movieid = ''
try:
cat = int(params['cat'])
except:
cat = None
try:
mode = params['mode']
except:
mode = None
startwith = int(GetSetting('start_with'))
if startwith != 0:
if cat == None:
cat = startwith
add_dir(Language(30311), 0, os.path.join(IMAGE_DIR, 'trailer.png')) # Categories
if movieid != '':
trailer = None
if mode == 'guess':
trailer = guess_pref_trailer(get_movie_trailers(movieid))
elif mode == 'ask':
trailer = ask_for_trailer(get_movie_trailers(movieid))
else:
prefmode = GetSetting('pref_mode')
if prefmode == '0':
trailer = guess_pref_trailer(get_movie_trailers(movieid))
elif prefmode == '1':
trailer = ask_for_trailer(get_movie_trailers(movieid))
if trailer != None:
play_trailer(trailerurl=trailer['trailerurl'],
movieid=movieid,
title=trailer['title'],
studio=trailer['studio'],
coverurl=trailer['coverurl'])
else:
pass # could be that user was asked to chose trailer but he hit "back"
elif cat == 1:
show_top_ten_movies()
elif cat == 2:
show_recent_movies()
elif cat == 3:
show_current_movies()
else:
show_categories()
print 'MovieMazer Addon ended'
|
dersphere/plugin.video.moviemazer
|
default.py
|
Python
|
gpl-2.0
| 19,585 | 0.00337 |
#!/usr/bin/env python
import webapp2
import logging
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.ext import ndb
from MailMessage import MailMessage
# the email domain of this app is @pomis-newsletterman.appspotmail.com
class EmailHandlerV1(InboundMailHandler):
def receive(self, mail_message):
logging.info(mail_message.to_mime_message())
# store message
service_id = mail_message.to.split('@')[0]
if '<' in service_id:
service_id = service_id.split('<')[1]
mime_message = str(mail_message.to_mime_message())
service_key = ndb.Key(MailMessage, service_id)
new_id = ndb.Model.allocate_ids(size = 1, parent = service_key)[0]
mail_message_key = ndb.Key(MailMessage, new_id, parent = service_key)
persistent_mail_message = MailMessage(parent = mail_message_key, mime_message = mime_message)
persistent_mail_message.put()
app = webapp2.WSGIApplication([EmailHandlerV1.mapping()], debug=True)
|
Dudy/newsletterman
|
src/EmailHandlerV1.py
|
Python
|
apache-2.0
| 1,058 | 0.014178 |
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 Jonathan Matthew <jonathan@d14n.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import xml.dom.minidom as dom
import urllib.parse
import rb
from gi.repository import RB
# musicbrainz URLs
MUSICBRAINZ_RELEASE_URL = "http://musicbrainz.org/ws/2/release/%s?inc=artists"
MUSICBRAINZ_RELEASE_PREFIX = "http://musicbrainz.org/release/"
MUSICBRAINZ_RELEASE_SUFFIX = ".html"
MUSICBRAINZ_SEARCH_QUERY = "artist:\"%s\" AND release:\"%s\""
MUSICBRAINZ_SEARCH_URL = "http://musicbrainz.org/ws/2/release/?query=%s&limit=1"
# musicbrainz IDs
MUSICBRAINZ_VARIOUS_ARTISTS = "89ad4ac3-39f7-470e-963a-56509c546377"
# Amazon URL bits
AMAZON_IMAGE_URL = "http://images.amazon.com/images/P/%s.01.LZZZZZZZ.jpg"
class MusicBrainzSearch(object):
def get_release_cb (self, data, args):
(key, store, callback, cbargs) = args
if data is None:
print("musicbrainz release request returned nothing")
callback(*cbargs)
return
try:
parsed = dom.parseString(data)
storekey = RB.ExtDBKey.create_storage('album', key.get_field('album'))
# check that there's an artist that isn't 'various artists'
artist_tags = parsed.getElementsByTagName('artist')
if len(artist_tags) > 0:
artist_id = artist_tags[0].attributes['id'].firstChild.data
if artist_id != MUSICBRAINZ_VARIOUS_ARTISTS:
# add the artist name (as album-artist) to the storage key
nametags = artist_tags[0].getElementsByTagName('name')
if len(nametags) > 0:
artistname = nametags[0].firstChild.data
print("got musicbrainz artist name %s" % artistname)
storekey.add_field('artist', artistname)
# look for an ASIN tag
asin_tags = parsed.getElementsByTagName('asin')
if len(asin_tags) > 0:
asin = asin_tags[0].firstChild.data
print("got ASIN %s" % asin)
image_url = AMAZON_IMAGE_URL % asin
store.store_uri(storekey, RB.ExtDBSourceType.SEARCH, image_url)
else:
print("no ASIN for this release")
callback(*cbargs)
except Exception as e:
print("exception parsing musicbrainz response: %s" % e)
callback(*cbargs)
def try_search_artist_album (self, key, store, callback, *args):
album = key.get_field("album")
artist = key.get_field("artist")
if not album or not artist:
print("artist or album information missing")
callback(*args)
return
query = MUSICBRAINZ_SEARCH_QUERY % (artist.lower(), album.lower())
url = MUSICBRAINZ_SEARCH_URL % (urllib.parse.quote(query, safe=':'),)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
def search(self, key, last_time, store, callback, *args):
key = key.copy() # ugh
album_id = key.get_info("musicbrainz-albumid")
if album_id is None:
print("no musicbrainz release ID for this track")
self.try_search_artist_album(key, store, callback, args)
return
if album_id.startswith(MUSICBRAINZ_RELEASE_PREFIX):
album_id = album_id[len(MUSICBRAINZ_RELEASE_PREFIX):]
if album_id.endswith(MUSICBRAINZ_RELEASE_SUFFIX):
album_id = album_id[:-len(MUSICBRAINZ_RELEASE_SUFFIX)]
print("stripped release ID: %s" % album_id)
url = MUSICBRAINZ_RELEASE_URL % (album_id)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
|
DylanMcCall/rhythmbox-songinfo-context-menu
|
plugins/artsearch/musicbrainz.py
|
Python
|
gpl-2.0
| 4,454 | 0.014594 |
#!/usr/bin/env python
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_SessionCompute_2
import pprint
# connector and config
client = DataStoreClient("mongodb", ConfigDB_SessionCompute_2)
config = ConfigDB_SessionCompute_2
# according to config
dataList = client.getData() # return an array of docs (like a csv reader)
output = []
ONE_HOUR_IN_SECONDS = 3600
if(dataList):
for i in dataList:
contributor_username = i[config.COLUMN]
current_user = contributor_username
start_time = None
end_time = None
duration = None
last_start_timestamp = None
count = 1
if contributor_username:
print "\n\n"
print contributor_username.encode('utf-8')
while True:
doc = i['data'].next()
if doc is None:
break;
print doc["timestamp"]
if start_time is None:
start_time = float(doc["timestamp"])
if end_time is None:
end_time = start_time + ONE_HOUR_IN_SECONDS
else:
if float(doc["timestamp"]) <= end_time:
end_time = float(doc["timestamp"]) + ONE_HOUR_IN_SECONDS
count += 1
else:
new_doc = {}
new_doc["start time"] = start_time
new_doc["end time"] = end_time
new_doc["duration"] = (end_time - start_time)
new_doc["edition_counts"] = count
new_doc["contributor_username"] = contributor_username
output.append(new_doc)
start_time = float(doc["timestamp"])
end_time = start_time + ONE_HOUR_IN_SECONDS
count = 1
if start_time:
new_doc = {}
new_doc["start time"] = start_time
new_doc["end time"] = end_time
new_doc["duration"] = (end_time - start_time)
new_doc["edition_counts"] = count
new_doc["contributor_username"] = contributor_username
output.append(new_doc)
pprint.pprint(output)
clientOutput = DataStoreClient("mongodb", ConfigDB_SessionCompute_2)
clientOutput.saveData(output)
# import datetime
# print(
# datetime.datetime.fromtimestamp(
# int("1176585742")
# ).strftime('%Y-%m-%d %H:%M:%S')
# )
# {
# start time:
# end time:
# duration:
# user:
# }
# import time
# timestamp2 = time.mktime(d.timetuple()) # DO NOT USE IT WITH UTC DATE
# datetime.fromtimestamp(timestamp2)
# datetime.datetime(2011, 1, 1, 0, 0)
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/instances/11_2_wikiflow_1sh_1s_annot/sessioncompute_2/SessionCompute_2.py
|
Python
|
gpl-3.0
| 2,784 | 0.001796 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
from os import environ
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component calling the different backend procedures.
"""
@asyncio.coroutine
def onJoin(self, details):
procs = [u'com.mathservice.add2',
u'com.mathservice.mul2',
u'com.mathservice.div2']
try:
for proc in procs:
res = yield from self.call(proc, 2, 3)
print("{}: {}".format(proc, res))
except Exception as e:
print("Something went wrong: {}".format(e))
self.leave()
def onDisconnect(self):
asyncio.get_event_loop().stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug=False, # optional; log even more details
)
runner.run(Component)
|
RyanHope/AutobahnPython
|
examples/asyncio/wamp/rpc/decorators/frontend.py
|
Python
|
mit
| 2,365 | 0 |
#!/usr/bin/env python
'''
Plot distribution of each feature,
conditioned on its bfeature type
'''
import argparse
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from common import *
from information import utils
from scipy.stats import itemfreq
nbins = 100
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('features', type=load_npz,
help='Training data features (npz)')
parser.add_argument('output',
help='Output file with plots (pdf)')
return parser
if __name__ == "__main__":
args = opts().parse_args()
pdf = PdfPages(args.output)
dfs = args.features['ifeatures']
cfs = args.features['ffeatures']
print "Plotting float features"
bfs = args.features['bfeatures']
u = utils.unique_rows(bfs)
indices = [np.all(bfs==ui, axis=-1) for ui in u]
for j, f in enumerate(cfs.T):
print "...ffeature %d" % j
fig = plt.figure()
h = np.zeros(nbins)
not_nan = f[np.logical_not(np.isnan(f))]
f_min = not_nan.min()
f_max = not_nan.max()
x = np.linspace(f_min, f_max, nbins)
dx = (f_max - f_min) / nbins
for idx in indices:
h_new, bins = np.histogram(f[idx], range=(f_min, f_max), bins=nbins)
plt.bar(x, h_new, bottom=h, width=dx)
h += h_new
plt.xlim(f_min, f_max)
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('FFeature %d. # NaN = %d' % (j, np.sum(np.isnan(f))))
pdf.savefig(fig)
plt.close()
print "Plotting integer features"
for j, x in enumerate(dfs.T):
print "...dfeature %d" % j
freq = itemfreq(x)
fig = plt.figure()
xu = np.sort(np.unique(x))
h = np.zeros_like(xu)
for idx in indices:
f = itemfreq(x[idx])
h_new = np.zeros_like(h)
h_new[f[:,0]] = f[:,1]
plt.bar(xu, h_new, bottom=h)
h += h_new
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('DFeature %d' % j)
pdf.savefig(fig)
plt.close()
pdf.close()
|
timpalpant/KaggleTSTextClassification
|
scripts/plot_feature_distributions.py
|
Python
|
gpl-3.0
| 2,179 | 0.005048 |
#!/usr/bin/env python3
import os
from pathlib import Path
import numpy as np
from pysisyphus.helpers import geom_from_xyz_file
from pysisyphus.stocastic.align import matched_rmsd
THIS_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def test_matched_rmsd():
geom1 = geom_from_xyz_file(THIS_DIR / "eins.xyz")
# Calling with the identical geometries should return RMSD of 0.
min_rmsd, (geom1_matched, geom2_matched) = matched_rmsd(geom1, geom1)
np.testing.assert_allclose(min_rmsd, 0.0, atol=1e-10)
np.testing.assert_allclose(geom1_matched.coords, geom2_matched.coords)
geom2 = geom_from_xyz_file(THIS_DIR / "zwei.xyz")
min_rmsd, _ = matched_rmsd(geom1, geom2)
np.testing.assert_allclose(min_rmsd, 0.057049, atol=1e-5)
if __name__ == "__main__":
test_matched_rmsd()
|
eljost/pysisyphus
|
tests_staging/test_matched_rmsd/test_matched_rmsd.py
|
Python
|
gpl-3.0
| 816 | 0 |
# ------------------------------------------------------------------------------
# Name: test_nansat.py
# Purpose: Test the Nansat class
#
# Author: Morten Wergeland Hansen, Asuka Yamakawa, Anton Korosov
#
# Created: 18.06.2014
# Last modified:24.08.2017 14:00
# Copyright: (c) NERSC
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------------------------
from __future__ import unicode_literals, absolute_import
import os
import logging
import unittest
import warnings
import datetime
from mock import patch, PropertyMock, Mock, MagicMock, DEFAULT
import numpy as np
try:
if 'DISPLAY' not in os.environ:
import matplotlib; matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
MATPLOTLIB_IS_INSTALLED = False
else:
MATPLOTLIB_IS_INSTALLED = True
from nansat import Nansat, Domain, NSR
from nansat.utils import gdal
import nansat.nansat
from nansat.exceptions import NansatGDALError, WrongMapperError, NansatReadError
from nansat.tests.nansat_test_base import NansatTestBase
warnings.simplefilter("always", UserWarning)
class NansatTest(NansatTestBase):
def test_open_gcps(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertEqual(type(n), Nansat)
self.assertEqual(n.vrt.dataset.GetProjection(), '')
self.assertTrue((n.vrt.dataset.GetGCPProjection().startswith('GEOGCS["WGS 84",')))
self.assertEqual(n.vrt.dataset.RasterCount, 3)
self.assertEqual(n.filename, self.test_file_gcps)
self.assertIsInstance(n.logger, logging.Logger)
self.assertEqual(n.name, os.path.split(self.test_file_gcps)[1])
self.assertEqual(n.path, os.path.split(self.test_file_gcps)[0])
def test_that_only_mappers_with_mapper_in_the_module_name_are_imported(self):
mappers = nansat.nansat._import_mappers()
for mapper in mappers:
self.assertTrue('mapper' in mapper)
def test_get_time_coverage_start_end(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.set_metadata('time_coverage_start', '2016-01-20')
n.set_metadata('time_coverage_end', '2016-01-21')
self.assertEqual(type(n.time_coverage_start), datetime.datetime)
self.assertEqual(type(n.time_coverage_end), datetime.datetime)
def test_from_domain_array(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, np.random.randn(500, 500), {'name': 'band1'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n[1].shape, (500, 500))
self.assertEqual(n.filename, '')
self.assertIsInstance(n.logger, logging.Logger)
self.assertEqual(n.name, '')
self.assertEqual(n.path, '')
def test_from_domain_nansat(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2 = Nansat.from_domain(n1, n1[1])
self.assertEqual(type(n2), Nansat)
self.assertEqual(len(n2.bands()), 1)
self.assertEqual(type(n2[1]), np.ndarray)
def test_add_band(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_band(arr, {'name': 'band1'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n[1].shape, (500, 500))
def test_add_band_twice(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_band(arr, {'name': 'band1'})
n.add_band(arr, {'name': 'band2'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(type(n[2]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n.get_metadata('name', 2), 'band2')
self.assertEqual(n[1].shape, (500, 500))
self.assertEqual(n[2].shape, (500, 500))
def test_add_bands(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_bands([arr, arr],
[{'name': 'band1'}, {'name': 'band2'}])
self.assertIsInstance(n, Nansat)
self.assertEqual(n.vrt.vrt.vrt, None)
self.assertIsInstance(n[1], np.ndarray)
self.assertIsInstance(n[2], np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n.get_metadata('name', 2), 'band2')
def test_add_bands_no_parameter(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_bands([arr, arr])
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(type(n[2]), np.ndarray)
def test_add_subvrts_only_to_one_nansat(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n1 = Nansat.from_domain(d, log_level=40)
n2 = Nansat.from_domain(d, log_level=40)
n1.add_band(arr, {'name': 'band1'})
self.assertEqual(type(n1.vrt.band_vrts), dict)
self.assertTrue(len(n1.vrt.band_vrts) > 0)
self.assertEqual(n2.vrt.band_vrts, {})
def test_bands(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
bands = n.bands()
self.assertEqual(type(bands), dict)
self.assertTrue(1 in bands)
self.assertTrue('name' in bands[1])
self.assertEqual(bands[1]['name'], 'L_645')
def test_has_band_if_name_matches(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
hb = n.has_band('L_645')
self.assertTrue(hb)
def test_has_band_if_standard_name_matches(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
hb = n.has_band('surface_upwelling_spectral_radiance_in_air_emerging_from_sea_water')
self.assertTrue(hb)
def test_write_fig_tif(self):
n = Nansat(self.test_file_arctic, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_write_fig_tif.tif')
n.write_figure(tmpfilename)
nn = Nansat(tmpfilename, mapper=self.default_mapper)
# Asserts that the basic georeference (corners in this case) is still
# present after opening the image
self.assertTrue(np.allclose(n.get_corners(), nn.get_corners()))
def test_resize_by_pixelsize(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(pixelsize=500, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_factor(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_width(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(width=100, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_height(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(height=500, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_resize(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(0.1)
n.resize(10)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_resize_resize.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_complex_alg_average(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
with warnings.catch_warnings(record=True) as w:
n.resize(0.5, resample_alg=-1)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assertIn('The imaginary parts of complex numbers '
'are lost when resampling by averaging ', str(w[-1].message))
def test_resize_complex_alg0(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=0)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg1(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=1)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg2(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=2)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg3(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=3)
self.assertTrue(np.any(n[1].imag != 0))
def test_resize_complex_alg4(self):
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=4)
self.assertTrue(np.any(n[1].imag != 0))
def test_get_GDALRasterBand(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
b = n.get_GDALRasterBand(1)
arr = b.ReadAsArray()
self.assertEqual(type(b), gdal.Band)
self.assertEqual(type(arr), np.ndarray)
def test_get_GDALRasterBand_if_band_id_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
b = n.get_GDALRasterBand(band_id=1)
arr = b.ReadAsArray()
self.assertEqual(type(b), gdal.Band)
self.assertEqual(type(arr), np.ndarray)
def test_list_bands_true(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
lb = n.list_bands(True)
self.assertEqual(lb, None)
def test_list_bands_false(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
lb = n.list_bands(False)
self.assertEqual(type(lb), str)
def test_reproject_domain(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_domain_if_dst_domain_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(dst_domain=d)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_domain_if_resample_alg_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d, resample_alg=0)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
@patch.object(Nansat, 'get_corners',
return_value=(np.array([0, 0, 360, 360]), np.array([90,-90, 90, -90])))
def test_reproject_domain_if_source_and_destination_domain_span_entire_lons(self, mock_Nansat):
n = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te -180 180 60 90 -ts 500 500")
n.reproject(d)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_reproject_domain_span_entire_lons.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_domain_if_tps_is_given(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d, tps=False)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.reproject(d, tps=True)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_domain.png')
n.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n.shape(), (500, 500))
self.assertEqual(type(n[1]), np.ndarray)
self.assertTrue(n.has_band('swathmask'))
def test_reproject_of_complex(self):
""" Should return np.nan in areas out of swath """
n = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
d = Domain(4326, '-te -92.08 26.85 -92.00 26.91 -ts 200 200')
n.reproject(d)
b = n[1]
self.assertTrue(n.has_band('swathmask'))
self.assertTrue(np.isnan(b[0, 0]))
self.assertTrue(np.isfinite(b[100, 100]))
def test_add_band_and_reproject(self):
""" Should add band and swath mask and return np.nan in areas out of swath """
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, "-te 27 70 30 72 -ts 500 500")
n.add_band(np.ones(n.shape(), np.uint8))
n.reproject(d)
b4 = n[4] # added, reprojected band
b5 = n[5] # swathmask
self.assertTrue(n.has_band('swathmask')) # the added band
self.assertTrue(n.has_band('swathmask_0000')) # the actual swathmask
self.assertTrue(b4[0, 0]==0)
self.assertTrue(b4[300, 300] == 1)
self.assertTrue(b5[0, 0]==0)
self.assertTrue(b5[300, 300] == 1)
def test_reproject_no_addmask(self):
""" Should not add swath mask and return 0 in areas out of swath """
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
d = Domain(4326, '-te -92.08 26.85 -92.00 26.91 -ts 200 200')
n.reproject(d, addmask=False)
b = n[1]
self.assertTrue(not n.has_band('swathmask'))
self.assertTrue(np.isfinite(b[0, 0]))
self.assertTrue(np.isfinite(b[100, 100]))
def test_reproject_stere(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_stere.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps_on_repro_gcps(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2.reproject_gcps()
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps_on_repro_gcps.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps_resize(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
n1.resize(2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps_resize.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape()[0], n2.shape()[0] * 2)
self.assertEqual(n1.shape()[1], n2.shape()[1] * 2)
self.assertEqual(type(n1[1]), np.ndarray)
def test_undo(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
shape1 = n1.shape()
n1.resize(10)
n1.undo()
shape2 = n1.shape()
self.assertEqual(shape1, shape2)
def test_write_figure(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure.png')
n1.write_figure(tmpfilename)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_band(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_band.png')
n1.write_figure(tmpfilename, 2)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_clim(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_clim.png')
n1.write_figure(tmpfilename, 3, clim='hist')
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_legend(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_legend.png')
n1.write_figure(tmpfilename, 3, clim='hist', legend=True, titleString="Title String")
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_logo(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_logo.png')
n1.write_figure(tmpfilename, 3, clim='hist',
logoFileName=self.test_file_gcps)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_geotiffimage(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_geotiffimage.tif')
n1.write_geotiffimage(tmpfilename)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_geotiffimage_if_band_id_is_given(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_geotiffimage.tif')
n1.write_geotiffimage(tmpfilename, band_id=1)
self.assertTrue(os.path.exists(tmpfilename))
def test_get_metadata(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata()
self.assertEqual(type(m), dict)
self.assertTrue('filename' in m)
def test_get_metadata_key(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata('filename')
self.assertEqual(type(m), str)
def test_get_metadata_wrong_key(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
with self.assertRaises(ValueError):
n1.get_metadata('some_crap')
def test_get_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata(band_id=1)
self.assertEqual(type(m), dict)
self.assertTrue('name' in m)
def test_get_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata(band_id=1)
self.assertEqual(type(m), dict)
self.assertTrue('name' in m)
def test_set_metadata(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal')
m = n1.get_metadata('newKey')
self.assertEqual(m, 'newVal')
def test_set_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal', band_id=1)
m = n1.get_metadata('newKey', 1)
self.assertEqual(m, 'newVal')
def test_set_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal', band_id=1)
m = n1.get_metadata('newKey', 1)
self.assertEqual(m, 'newVal')
def test_get_band_number(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
self.assertEqual(n1.get_band_number(1), 1)
@unittest.skipUnless(MATPLOTLIB_IS_INSTALLED, 'Matplotlib is required')
def test_get_transect(self):
plt.switch_backend('agg')
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[28.31299128, 28.93691525],
[70.93709219, 70.69646524]],
[str('L_645')])
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_get_transect.png')
plt.plot(t['lat'], t['L_645'], '.-')
plt.savefig(tmpfilename)
plt.close('all')
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_outside(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[0, 28.31299128], [0, 70.93709219]], [1])
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_wrong_points(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertRaises(ValueError, n1.get_transect, [1, 1], [1])
def test_get_transect_wrong_band(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[0, 28.31299128], [0, 70.93709219]], [10])
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_pixlin(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[10, 20],
[10, 10]],
[str('L_645')],
lonlat=False)
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
self.assertEqual(len(t['lon']), 11)
def test_get_transect_data(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
b1 = n1[1]
t = n1.get_transect([[28.3], [70.9]], [], data=b1)
self.assertTrue('input' in t.dtype.fields)
self.assertTrue('L_645' not in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
@patch('nansat.nansat.PointBrowser')
def test_digitize_points(self, mock_PointBrowser):
""" shall create PointBrowser and call PointBrowser.get_points() """
value = 'points'
mock_PointBrowser().get_points.return_value = value
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
points = n.digitize_points(1)
self.assertTrue(mock_PointBrowser.called_once())
self.assertEqual(points, value)
def test_crop(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
ext = n1.crop(10, 20, 50, 60)
self.assertEqual(n1.shape(), (60, 50))
self.assertEqual(ext, (10, 20, 50, 60))
self.assertEqual(type(n1[1]), np.ndarray)
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
ext = n1.crop(0, 0, 200, 200)
self.assertEqual(n1.shape(), (200, 200))
self.assertEqual(ext, (0, 0, 200, 200))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_gcpproj(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject_gcps()
ext = n1.crop(10, 20, 50, 60)
xmed = abs(np.median(np.array([gcp.GCPX
for gcp in n1.vrt.dataset.GetGCPs()])))
gcpproj = NSR(n1.vrt.dataset.GetGCPProjection()
).ExportToProj4().split(' ')[0]
self.assertTrue(xmed > 360)
self.assertTrue(gcpproj=='+proj=stere')
def test_crop_complex(self):
n1 = Nansat(self.test_file_complex, log_level=40, mapper=self.default_mapper)
ext = n1.crop(10, 20, 50, 60)
self.assertEqual(n1.shape(), (60, 50))
self.assertEqual(ext, (10, 20, 50, 60))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_no_gcps_arctic(self):
n1 = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
ext = n1.crop(10, 20, 50, 60)
self.assertEqual(n1.shape(), (60, 50))
self.assertEqual(ext, (10, 20, 50, 60))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_lonlat(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
ext = n1.crop_lonlat([28, 29], [70.5, 71])
self.assertEqual(n1.shape(), (111, 110))
self.assertEqual(ext, (31, 89, 110, 111))
self.assertEqual(type(n1[1]), np.ndarray)
def test_crop_outside(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertRaises(ValueError, n1.crop_lonlat, [-10, 10], [-10, 10])
def test_watermask(self):
""" if watermask data exists: should fetch array with watermask
else: should raise an error """
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
mod44path = os.getenv('MOD44WPATH')
if mod44path is not None and os.path.exists(mod44path + '/MOD44W.vrt'):
wm = n1.watermask()[1]
self.assertEqual(type(wm), np.ndarray)
self.assertEqual(wm.shape[0], n1.shape()[0])
self.assertEqual(wm.shape[1], n1.shape()[1])
def test_watermask_fail_if_mod44path_is_wrong(self):
""" Nansat.watermask should raise an IOError"""
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
os.environ['MOD44WPATH'] = '/fakepath'
self.assertRaises(IOError, n1.watermask)
def test_watermask_fail_if_mod44path_not_exist(self):
""" Nansat.watermask should raise an IOError"""
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
del os.environ['MOD44WPATH']
self.assertRaises(IOError, n1.watermask)
def test_init_no_arguments(self):
""" No arguments should raise ValueError """
self.assertRaises(ValueError, Nansat)
def test_get_item_basic_expressions(self):
""" Testing get_item with some basic expressions """
self.mock_pti['get_wkv_variable'].return_value=dict(short_name='newband')
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, np.zeros((500, 500)), {'expression': 'np.ones((500, 500))'})
self.assertIsInstance(n[1], np.ndarray)
self.assertEqual(n[1].shape, (500, 500))
band1 = n[1]
self.assertTrue(np.allclose(band1, np.ones((500, 500))))
def test_get_item_inf_expressions(self):
""" inf should be replaced with nan """
self.mock_pti['get_wkv_variable'].return_value=dict(short_name='newband')
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, log_level=40)
arr = np.empty((500, 500))
n.add_band(arr, {'expression': 'np.array([0,1,2,3,np.inf,5,6,7])'})
self.assertIsInstance(n[1], np.ndarray)
self.assertTrue(np.isnan(n[1][4]))
def test_repr_basic(self):
""" repr should include some basic elements """
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, log_level=40)
arr = np.empty((500, 500))
exp = 'np.array([0,1,2,3,np.inf,5,6,7])'
n.add_band(arr, {'expression': exp})
n_repr = repr(n)
self.assertIn(exp, n_repr, 'The expressions should be in repr')
self.assertIn('SourceFilename', n_repr)
self.assertIn('/vsimem/', n_repr)
self.assertIn('500 x 500', n_repr)
self.assertIn('Projection(dataset):', n_repr)
self.assertIn('25', n_repr)
self.assertIn('72', n_repr)
self.assertIn('35', n_repr)
self.assertIn('70', n_repr)
@patch.object(Nansat, 'get_GDALRasterBand')
def test_getitem(self, mock_Nansat):
type(mock_Nansat()).GetMetadata = MagicMock(return_value={'a':1})
type(mock_Nansat()).ReadAsArray = MagicMock(return_value=None)
with self.assertRaises(NansatGDALError):
Nansat(self.test_file_stere, mapper=self.default_mapper).__getitem__(1)
@patch.object(Nansat, 'digitize_points')
def test_crop_interactive(self, mock_digitize_points):
mock_digitize_points.return_value=[np.array([[10, 20], [10, 30]])]
n = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
n.crop_interactive()
self.assertEqual(n.shape(), (20, 10))
def test_extend(self):
n = Nansat(self.test_file_arctic, log_level=40, mapper=self.default_mapper)
nshape1 = n.shape()
n.extend(left=10, right=20, top=30, bottom=40)
be = n[1]
self.assertEqual(n.shape(), (nshape1[0]+70, nshape1[1]+30))
self.assertIsInstance(be, np.ndarray)
def test_open_no_mapper(self):
n = Nansat(self.test_file_arctic)
self.assertEqual(type(n), Nansat)
self.assertEqual(n.mapper, 'netcdf_cf')
@patch.multiple(Nansat, vrt=DEFAULT, __init__ = Mock(return_value=None))
def test_get_metadata_unescape(self, vrt):
meta0 = {"key1": "" AAA " & > <", "key2": "'BBB'"}
n = Nansat()
vrt.dataset.GetMetadata.return_value = meta0
meta1 = n.get_metadata()
meta2 = n.get_metadata(unescape=False)
self.assertEqual(meta1, {'key1': '" AAA " & > <', 'key2': "'BBB'"})
self.assertEqual(meta2, meta0)
def test_reproject_pure_geolocation(self):
n0 = Nansat(self.test_file_gcps)
b0 = n0[1]
lon0, lat0 = n0.get_geolocation_grids()
d1 = Domain.from_lonlat(lon=lon0, lat=lat0)
d2 = Domain.from_lonlat(lon=lon0, lat=lat0, add_gcps=False)
d3 = Domain(NSR().wkt, '-te 27 70 31 72 -ts 500 500')
n1 = Nansat.from_domain(d1, b0)
n2 = Nansat.from_domain(d2, b0)
n1.reproject(d3)
n2.reproject(d3)
b1 = n1[1]
b2 = n2[1]
self.assertTrue(np.allclose(b1,b2))
if __name__ == "__main__":
unittest.main()
|
nansencenter/nansat
|
nansat/tests/test_nansat.py
|
Python
|
gpl-3.0
| 34,081 | 0.00355 |
# Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from setuptools import setup, Extension
classifiers = ["License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Build Tools"]
setup(name="sourcestamp",
version="0.1",
description="Compute timestamp for a source code tree",
author="Daniel Narvaez",
author_email="dwnarvaez@gmail.com",
url="http://github.com/dnarvaez/sourcestamp",
classifiers=classifiers,
ext_modules=[Extension("sourcestamp", ["src/sourcestamp.c"])])
|
dnarvaez/sourcestamp
|
setup.py
|
Python
|
apache-2.0
| 1,161 | 0 |
config = {
"interfaces": {
"google.cloud.talent.v4beta1.TenantService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"CreateTenant": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetTenant": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateTenant": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteTenant": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListTenants": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}
|
tseaver/google-cloud-python
|
talent/google/cloud/talent_v4beta1/gapic/tenant_service_client_config.py
|
Python
|
apache-2.0
| 1,782 | 0 |
x = raw_input()
print x[0].upper() + x[1:]
|
yamstudio/Codeforces
|
200/281A - Word Capitalization.py
|
Python
|
gpl-3.0
| 43 | 0 |
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
continue
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
continue
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
continue
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
continue
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
continue
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
|
ruuk/script.web.viewer2
|
lib/webviewer/bs4/element.py
|
Python
|
gpl-2.0
| 61,200 | 0.001307 |
from __future__ import unicode_literals
import uuid
from django.forms import UUIDField, ValidationError
from django.test import SimpleTestCase
class UUIDFieldTest(SimpleTestCase):
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
|
filias/django
|
tests/forms_tests/field_tests/test_uuidfield.py
|
Python
|
bsd-3-clause
| 971 | 0.00103 |
# coding=utf-8
import asyncio
import random
import json
import hashlib
import aiohttp
import async_timeout
import sys
class BaiduTranslate:
lang_auto = 'auto'
lang_zh = 'zh'
lang_en = 'en'
timeout = 20
api_addr = 'http://fanyi-api.baidu.com/api/trans/vip/translate'
def __init__(self, loop=None):
self.appid = '20171009000086968'
self.secret = 'vZ36FjnZ91FoLJwe5NrF'
if loop is None:
self.async = False
self.loop = asyncio.get_event_loop()
else:
self.async = True
self.loop = loop
def translate(self, text, from_lang, to_lang):
if self.async:
return self._request(text, from_lang, to_lang)
else:
return self.loop.run_until_complete(self._request(text, from_lang, to_lang))
async def _request(self, text, from_lang, to_lang):
salt = random.randint(0, 2147483647)
sign = self.appid + text + str(salt) + self.secret
sign = hashlib.md5(sign.encode('utf-8')).hexdigest()
params = {'q': text, 'from': from_lang, 'to': to_lang, 'appid': self.appid, 'salt': salt, 'sign': sign}
async with aiohttp.ClientSession(loop=self.loop) as session:
with async_timeout.timeout(self.timeout, loop=self.loop):
async with session.post(self.api_addr,
data=params) as resp:
body = await resp.read()
res = json.loads(body.decode('utf-8'))
if 'error_code' in res and res['error_code'] != '52000':
raise RuntimeError(res['error_msg'])
return res['trans_result'][0]['dst']
|
xiaoslzhang/pyyingyu
|
yingyu/yingyu_baidu.py
|
Python
|
apache-2.0
| 1,682 | 0.003567 |
from core.plugins.lib.proxies import MetricProxy, SourceProxy
from core.plugins.lib.models import PluginDataModel
from core.plugins.lib.fields import Field, ListField, DateTimeField, FloatField, IntegerField
from core.plugins.lib.scope import Scope, ZonePerm, BlockPerm
class BaseFitbitModel(PluginDataModel):
metric_proxy = MetricProxy(name="newsfeed")
source_proxy = SourceProxy(name="fitbit")
date = DateTimeField()
value = FloatField()
class StepModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="steps")
class DistanceModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="distance")
class TimeInBedModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="time_in_bed")
class MinutesAsleepModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="minutes_asleep")
class WeightModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="weight")
class SleepEfficiencyModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="sleep_efficiency")
class ActivityCaloriesModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="activity_calories")
class SleepStartTimeModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="sleep_start_time")
value = DateTimeField()
class CaloriesInModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="calories_in")
class CaloriesModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="calories")
class WaterModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="water")
MODEL_DICT = {
"activities/steps": StepModel,
"activities/distance": DistanceModel,
"sleep/timeInBed": TimeInBedModel,
"sleep/minutesAsleep": MinutesAsleepModel,
"body/weight": WeightModel,
"sleep/efficiency": SleepEfficiencyModel,
"activities/activityCalories": ActivityCaloriesModel,
"sleep/startTime": SleepStartTimeModel,
"foods/log/caloriesIn": CaloriesInModel,
"activities/calories": CaloriesModel,
"foods/log/water": WaterModel
}
|
realizeapp/realize-core
|
plugins/fitbit/models.py
|
Python
|
agpl-3.0
| 1,974 | 0.007092 |
"""MySQLdb Cursors
This module implements Cursors of various types for MySQLdb. By
default, MySQLdb uses the Cursor class.
"""
import re
insert_values = re.compile(r"\svalues\s*(\(((?<!\\)'.*?\).*(?<!\\)?'|.)+?\))", re.IGNORECASE)
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
class BaseCursor(object):
"""A base for Cursor classes. Useful attributes:
description
A tuple of DB API 7-tuples describing the columns in
the last executed query; see PEP-249 for details.
description_flags
Tuple of column flags for last query, one entry per column
in the result set. Values correspond to those in
MySQLdb.constants.FLAG. See MySQL documentation (C API)
for more information. Non-standard extension.
arraysize
default number of rows fetchmany() will fetch
"""
from _mysql_exceptions import MySQLError, Warning, Error, InterfaceError, \
DatabaseError, DataError, OperationalError, IntegrityError, \
InternalError, ProgrammingError, NotSupportedError
_defer_warnings = False
def __init__(self, connection):
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.description_flags = None
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.lastrowid = None
self.messages = []
self.errorhandler = connection.errorhandler
self._result = None
self._warnings = 0
self._info = None
self.rownumber = None
def __del__(self):
self.close()
self.errorhandler = None
self._result = None
def close(self):
"""Close the cursor. No further queries will be possible."""
if not self.connection: return
while self.nextset(): pass
self.connection = None
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def _warning_check(self):
from warnings import warn
if self._warnings:
warnings = self._get_db().show_warnings()
if warnings:
# This is done in two loops in case
# Warnings are set to raise exceptions.
for w in warnings:
self.messages.append((self.Warning, w))
for w in warnings:
warn(w[-1], self.Warning, 3)
elif self._info:
self.messages.append((self.Warning, self._info))
warn(self._info, self.Warning, 3)
def nextset(self):
"""Advance to the next result set.
Returns None if there are no more result sets.
"""
if self._executed:
self.fetchall()
del self.messages[:]
db = self._get_db()
nr = db.next_result()
if nr == -1:
return None
self._do_get_result()
self._post_get_result()
self._warning_check()
return 1
def _post_get_result(self): pass
def _do_get_result(self):
db = self._get_db()
self._result = self._get_result()
self.rowcount = db.affected_rows()
self.rownumber = 0
self.description = self._result and self._result.describe() or None
self.description_flags = self._result and self._result.field_flags() or None
self.lastrowid = db.insert_id()
self._warnings = db.warning_count()
self._info = db.info()
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def execute(self, query, args=None):
"""Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any
"""
from types import ListType, TupleType
from sys import exc_info
del self.messages[:]
db = self._get_db()
charset = db.character_set_name()
if isinstance(query, unicode):
query = query.encode(charset)
if args is not None:
query = query % db.literal(args)
try:
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except:
exc, value, tb = exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
charset = db.character_set_name()
if isinstance(query, unicode): query = query.encode(charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = [ qv % db.literal(a) for a in args ]
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, msg.args[0]))
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.messages.append((TypeError, msg))
self.errorhandler(self, TypeError, msg)
except:
from sys import exc_info
exc, value, tb = exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
from types import UnicodeType
db = self._get_db()
charset = db.character_set_name()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index,
db.literal(arg))
if isinstance(q, unicode):
q = q.encode(charset)
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range(len(args))]))
if type(q) is UnicodeType:
q = q.encode(charset)
self._query(q)
self._executed = q
if not self._defer_warnings: self._warning_check()
return args
def _do_query(self, q):
db = self._get_db()
self._last_executed = q
db.query(q)
self._do_get_result()
return self.rowcount
def _query(self, q): return self._do_query(q)
def _fetch_row(self, size=1):
if not self._result:
return ()
return self._result.fetch_row(size, self._fetch_type)
def __iter__(self):
return iter(self.fetchone, None)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
class CursorStoreResultMixIn(object):
"""This is a MixIn class which causes the entire result set to be
stored on the client side, i.e. it uses mysql_store_result(). If the
result set can be very large, consider adding a LIMIT clause to your
query, or using CursorUseResultMixIn instead."""
def _get_result(self): return self._get_db().store_result()
def _query(self, q):
rowcount = self._do_query(q)
self._post_get_result()
return rowcount
def _post_get_result(self):
self._rows = self._fetch_row(0)
self._result = None
def fetchone(self):
"""Fetches a single row from the cursor. None indicates that
no more rows are available."""
self._check_executed()
if self.rownumber >= len(self._rows): return None
result = self._rows[self.rownumber]
self.rownumber = self.rownumber+1
return result
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position according
to mode.
If mode is 'relative' (default), value is taken as offset to
the current position in the result set, if set to 'absolute',
value states an absolute target position."""
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % `mode`)
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def __iter__(self):
self._check_executed()
result = self.rownumber and self._rows[self.rownumber:] or self._rows
return iter(result)
class CursorUseResultMixIn(object):
"""This is a MixIn class which causes the result set to be stored
in the server and sent row-by-row to client side, i.e. it uses
mysql_use_result(). You MUST retrieve the entire result set and
close() the cursor before additional queries can be peformed on
the connection."""
_defer_warnings = True
def _get_result(self): return self._get_db().use_result()
def fetchone(self):
"""Fetches a single row from the cursor."""
self._check_executed()
r = self._fetch_row(1)
if not r:
self._warning_check()
return None
self.rownumber = self.rownumber + 1
return r[0]
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
r = self._fetch_row(0)
self.rownumber = self.rownumber + len(r)
self._warning_check()
return r
def __iter__(self):
return self
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
class CursorTupleRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as tuples,
which is the standard form required by DB API."""
_fetch_type = 0
class CursorDictRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as
dictionaries. This is a non-standard feature."""
_fetch_type = 1
def fetchoneDict(self):
"""Fetch a single row as a dictionary. Deprecated:
Use fetchone() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchoneDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchone()
def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchmanyDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchmany(size)
def fetchallDict(self):
"""Fetch all available rows as a list of dictionaries. Deprecated:
Use fetchall() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchallDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchall()
class CursorOldDictRowsMixIn(CursorDictRowsMixIn):
"""This is a MixIn class that returns rows as dictionaries with
the same key convention as the old Mysqldb (MySQLmodule). Don't
use this."""
_fetch_type = 2
class Cursor(CursorStoreResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is the standard Cursor class that returns rows as tuples
and stores the result set in the client."""
class DictCursor(CursorStoreResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the client."""
class SSCursor(CursorUseResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as tuples and stores
the result set in the server."""
class SSDictCursor(CursorUseResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the server."""
|
carlgao/lenga
|
images/lenny64-peon/usr/share/python-support/python-mysqldb/MySQLdb/cursors.py
|
Python
|
mit
| 16,782 | 0.003158 |
from django.contrib import admin
from holidays.models import (Holiday, StaticHoliday,
NthXDayHoliday, NthXDayAfterHoliday, CustomHoliday)
class HolidayAdmin(admin.ModelAdmin):
pass
class StaticHolidayAdmin(admin.ModelAdmin):
pass
class NthXDayHolidayAdmin(admin.ModelAdmin):
pass
class NthXDayAfterHolidayAdmin(admin.ModelAdmin):
pass
class CustomHolidayAdmin(admin.ModelAdmin):
pass
admin.site.register(Holiday, HolidayAdmin)
admin.site.register(StaticHoliday, StaticHolidayAdmin)
admin.site.register(NthXDayHoliday, NthXDayHolidayAdmin)
admin.site.register(NthXDayAfterHoliday, NthXDayAfterHolidayAdmin)
admin.site.register(CustomHoliday, CustomHolidayAdmin)
|
dannybrowne86/django-holidays
|
holidays/holidays/admin.py
|
Python
|
mit
| 720 | 0.009722 |
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import user_passes_test
from django.utils.importlib import import_module
from djangae.core.paginator import EmptyPage, PageNotAnInteger
from djangae.core.paginator import DatastorePaginator as Paginator
from google.appengine.ext import db
from google.appengine.ext.deferred import defer
from .models import Error, Event
import calendar
def get_permission_decorator():
if getattr(settings, 'CENTAUR_PERMISSION_DECORATOR', None):
module, decorator = settings.CENTAUR_PERMISSION_DECORATOR.rsplit('.', 1)
return getattr(import_module(module), decorator)
return user_passes_test(lambda u: u.is_superuser)
permission_decorator = get_permission_decorator()
def timestamp(datetime):
""" Returns UTC timestamp, this is included in python3 but not 2"""
return calendar.timegm(datetime.timetuple())
@permission_decorator
def index(request):
errors = Error.objects.all()
# Filter by user email
if request.GET.get('user', None):
errors_pks = [e.error.pk for e in Event.objects.filter(logged_in_user_email=request.GET.get('user'))]
errors = errors.filter(pk__in=errors_pks)
errors = errors.order_by("-last_event")
page = request.GET.get('page', 1)
paginator = Paginator(errors, 20)
try:
errors = paginator.page(page)
except PageNotAnInteger:
errors = paginator.page(1)
except EmptyPage:
errors = paginator.page(paginator.num_pages)
return render(request, "centaur/index.html", {"errors": errors})
@permission_decorator
def error(request, error_id, limit=200):
error = get_object_or_404(Error, pk=error_id)
events = error.events.all().order_by("-created")[:limit]
series = [
timestamp(event.created.replace(minute=0, second=0, microsecond=0))
for event in events
]
page = request.GET.get('page', 1)
paginator = Paginator(events, 1)
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return render(request, "centaur/error.html", {
"error": error,
"events": events,
"series": series,
})
CLEANUP_QUEUE = getattr(settings, 'QUEUE_FOR_EVENT_CLEANUP', 'default')
@permission_decorator
def clear_old_events(request):
defer(_clear_old_events, _queue=CLEANUP_QUEUE)
return HttpResponse("OK. Cleaning task deferred.")
EVENT_BATCH_SIZE = 400
ERROR_UPDATE_BATCH_SIZE = 50
def _update_error_count(error_id, events_removed):
@db.transactional(xg=True)
def txn():
_error = Error.objects.get(pk=error_id)
_error.event_count -= events_removed
_error.save()
txn()
def _clear_old_events():
from google.appengine.api.datastore import Query, Delete, Get
query = Query("centaur_event", keys_only=True)
query["created <= "] = timezone.now() - timedelta(days=30)
old_event_keys = list(query.Run(limit=EVENT_BATCH_SIZE))
old_events = filter(None, Get(old_event_keys))
errors = {}
for event in old_events:
data = errors.setdefault(event['error_id'], {'count': 0, 'event_keys':[]})
data['count'] += 1
data['event_keys'].append(event.key())
to_delete = []
for error_id, data in errors.items()[:ERROR_UPDATE_BATCH_SIZE]:
# Each event might be for a different error and while we can delete hundreds of events, we
# probably don't want to defer hundreds of tasks, so we'll only delete events from a handful of distinct events.
defer(_update_error_count, error_id, data['count'], _queue=CLEANUP_QUEUE)
to_delete.extend(data['event_keys'])
Delete(to_delete)
if len(old_event_keys) == EVENT_BATCH_SIZE or len(to_delete) < len(old_events):
# In case we didn't clear everything, run again to find more old events.
defer(_clear_old_events, _queue=CLEANUP_QUEUE)
|
potatolondon/centaur
|
views.py
|
Python
|
bsd-3-clause
| 4,150 | 0.002892 |
import functools
import pfp.interp
def native(name, ret, interp=None, send_interp=False):
"""Used as a decorator to add the decorated function to the
pfp interpreter so that it can be used from within scripts.
:param str name: The name of the function as it will be exposed in template scripts.
:param pfp.fields.Field ret: The return type of the function (a class)
:param pfp.interp.PfpInterp interp: The specific interpreter to add the function to
:param bool send_interp: If the current interpreter should be passed to the function.
Examples:
The example below defines a ``Sum`` function that will return the sum of
all parameters passed to the function: ::
from pfp.fields import PYVAL
@native(name="Sum", ret=pfp.fields.Int64)
def sum_numbers(params, ctxt, scope, stream, coord):
res = 0
for param in params:
res += PYVAL(param)
return res
The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it
requires that the interpreter be sent as a parameter: ::
@native(name="Int3", ret=pfp.fields.Void, send_interp=True)
def int3(params, ctxt, scope, stream, coord, interp):
if interp._no_debug:
return
if interp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop()
"""
def native_decorator(func):
@functools.wraps(func)
def native_wrapper(*args, **kwargs):
return func(*args, **kwargs)
pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp)
return native_wrapper
return native_decorator
def predefine(template):
pfp.interp.PfpInterp.add_predefine(template)
|
greenoaktree/pfp
|
pfp/native/__init__.py
|
Python
|
mit
| 1,639 | 0.026236 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
plot the results from the files igraph_degree_assort_study and degree_assortativity
'''
from igraph import *
import os
import numpy as np
import matplotlib.pyplot as plt
#########################
IN_DIR = '/home/sscepano/Projects7s/Twitter-workspace/ALL_SR'
img_out_plot = "7MOda_unweighted.png"
#########################
#########################
# read from a file the res
#########################
def read_in_res():
f = open('7MODeg_assort_study.weighted_edge_list', 'r')
DA = []
TH = []
for line in f:
if line.startswith('stats for'):
th = float(line.split()[-1])
TH.append(th)
if line.startswith('The network is'):
da = float(line.split()[-1])
DA.append(da)
th_last = th
f2 = open('plot_da_0.2.txt', 'r')
for line in f2:
(th, da) = line.split()
th = float(th)
if th < th_last:
continue
da = float(da)
TH.append(th)
DA.append(da)
f3 = open('DA_SR_th.tab', 'w')
for i in range(len(TH)):
f3.write(str(TH[i]) + '\t' + str(DA[i]) + '\n')
return TH, DA
def plot_DA(xaxis, da):
x = np.array(xaxis)
y = np.array(da)
plt.plot(x, y, 'c')
plt.grid(True)
plt.title('SR network')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.ylabel('degree assortativity')
plt.xlabel('SR threshold')
plt.savefig(img_out_plot,format='png',dpi=200)
def main():
os.chdir(IN_DIR)
x, DA = read_in_res()
plot_DA(x, DA)
main()
|
sanja7s/SR_Twitter
|
src_graph/plot_degree_assortativity.py
|
Python
|
mit
| 1,447 | 0.032481 |
from dungeon.dungeon import Dungeon
def main():
testdungeon = Dungeon('level1.txt')
print(testdungeon)
if main.__name__ == '__main__':
main()
|
int3l/dungeons-and-pythons
|
main.py
|
Python
|
mit
| 155 | 0.019355 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from getpass import getpass
from datetime import datetime
import pprint
from docopt import docopt
import requests
from .spelling import spellchecker
from .dispatch import dispatch
from .utils import login, validate_username
from .exceptions import ConnectionErrorException
PY3 = sys.version > '3'
if PY3:
pass
else:
input = raw_input
if sys.version < '3':
from urlparse import urljoin
else:
from urllib.parse import urljoin
GITHUB_USERS = 'https://api.github.com/users/'
def parse_respect_args(args):
'''
Respect
Usage:
respect <username> [--repos=<rep>] [--followers=<foll>] [--language=<lang>]
respect <username> bio
respect <username> stars [--verbose]
respect <username> repos [--verbose] [--language=<lang>]
respect -h | --help
Options:
-h, --help Shows this help information.
-v, --verbose Prints detailed information.
-r <rep> --repos <rep> Number of repositories [default: ].
-f <foll> --followers <foll> Number of followers [default: ].
-l <lang> --language <lang> Language name [default: ].
'''
args = docopt(parse_respect_args.__doc__, argv=args)
return args
def main():
"""
Main entry point for the `respect` command.
"""
args = parse_respect_args(sys.argv[1:])
if validate_username(args['<username>']):
print("processing...")
else:
print("@"+args['<username>'], "is not a valid username.")
print("Username may only contain alphanumeric ASCII characters or "
"dashes and cannot begin with a dash.")
return
try:
r = requests.get(urljoin(GITHUB_USERS, args['<username>']))
except ConnectionErrorException as e:
print('Connection Error from requests. Request again, please.')
print(e)
if r.status_code == 404 or r.status_code == 403:
session = login(401, args=args)
return dispatch(args, r, session)
elif r.status_code == 200:
return dispatch(args, response=r)
else:
raise UnknownStausCodeException
if __name__ == '__main__':
main()
|
oubiga/respect
|
respect/main.py
|
Python
|
bsd-3-clause
| 2,305 | 0.000434 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import TrackStatsHookBase
class ModelHookManager:
"""
This class registers and manages a set of hooks of subclassed from
`TrackStatsHookBase`. The given hook is registered on all modules within
'named_modules'.
Tracking is started and stopped for all hooks via `self.start_tracking()` and
`self.stop_tracking()`. Alternatively, this class can be used a context manager to
automate these calls. For example,
```
with hook_manager as hooks:
... # Train here
stats = hooks.get_statitics()
```
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function that
takes a name and module as inputs and then outputs a dictionary of
arguments to pass to the hook
"""
def __init__(
self,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
assert hook_type in ["forward", "backward", "pre_forward"]
assert issubclass(hook_class, TrackStatsHookBase)
# Register the hooks via class method.
tracked_vals = self.register_storage_hooks(named_modules,
hook_class=hook_class,
hook_type=hook_type,
hook_args=hook_args)
# These are the functions that called every forward or backward pass.
self.hooks = tracked_vals[0]
# These are handles to the hooks; PyTorch lets the user unregister
# hooks through these handles.
self._hook_handles = tracked_vals[1]
# These are the filtered modules that will be tracked.
self.tracked_modules = tracked_vals[2]
# Keep track of whether tracking is on.
self._tracking = False
@property
def tracking(self):
return self._tracking
def __enter__(self):
"""Start tracking when `with` is called."""
self.start_tracking()
return self
def __exit__(self, *args):
"""Stop tracking when `with` block is left."""
self.stop_tracking()
@classmethod
def register_storage_hooks(
cls,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
"""
Register hook on each module in 'named_modules'.
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function
that takes a name and module as inputs and then outputs a
dictionary of arguments to pass to the hook
"""
assert hook_type in ["forward", "backward", "pre_forward"]
hooks = []
handles = []
tracked_modules = dict()
# Register hooks on the modules.
for n, m in named_modules.items():
if callable(hook_args):
args = hook_args(n, m)
else:
args = hook_args or {}
hook = hook_class(name=n, **args)
if hook_type == "forward":
handle = m.register_forward_hook(hook)
elif hook_type == "pre_forward":
handle = m.register_forward_pre_hook(hook)
else:
handle = m.register_backward_hook(hook)
hooks.append(hook)
handles.append(handle)
tracked_modules[n] = m
return hooks, handles, tracked_modules
def start_tracking(self):
self._tracking = True
for hook in self.hooks:
hook.start_tracking()
def stop_tracking(self):
self._tracking = False
for hook in self.hooks:
hook.stop_tracking()
def get_statistics(self):
"""
This returns a generator with elements
`(name, module, statistic_0, ..., statistic_n)`.
"""
return (
(name, module, *hook.get_statistics())
for (name, module), hook in zip(self.tracked_modules.items(), self.hooks)
)
def remove_hooks(self):
"""
Remove all hooks from the model and stop tracking statistics.
"""
for handle in self._hook_handles:
handle.remove()
self.hooks = []
self._hook_handles = []
self.tracked_modules = dict()
|
mrcslws/nupic.research
|
src/nupic/research/frameworks/pytorch/hooks/hook_manager.py
|
Python
|
agpl-3.0
| 5,818 | 0.001375 |
# -*- encoding: utf-8 -*-
__author__ = 'pp'
__date__ = '6/25/14'
"""
georest.view.utils
~~~~~~~~~~~~~~~~~
helper/mixin things for views
"""
import sys
from functools import wraps
from flask import request
from .exceptions import InvalidRequest
from ..geo import GeoException
def get_json_content():
"""check content type and return raw text instrad of json data"""
if request.mimetype != 'application/json':
raise InvalidRequest('Only "application/json" supported')
try:
data = request.data.decode('utf-8')
# data = request.get_data().decode('utf-8')
except UnicodeError:
raise InvalidRequest('Cannot decode content with utf-8')
return data
def get_if_match():
"""get if_match etag from request"""
etag = None
if request.if_match and not request.if_match.star_tag:
try:
etag, = request.if_match.as_set() # only 1 allowed
except ValueError:
raise InvalidRequest('Cannot process if_match %s' % \
request.if_match)
return etag
def catcher(f):
"""catching uncatched errors, and filling the traceback"""
@wraps(f)
def decorator(*args, **kwargs):
try:
return f(*args, **kwargs)
except GeoException as e:
if not e.traceback:
e.traceback = sys.exc_info()[2]
raise
return decorator
|
Kotaimen/georest
|
georest/view/utils.py
|
Python
|
bsd-2-clause
| 1,423 | 0.000703 |
import sys
from sim import Sim
from node import Node
from link import Link
from transport import Transport
from tcp import TCP
from network import Network
import optparse
import os
import subprocess
class AppHandler(object):
def __init__(self,filename, directory):
self.filename = filename
self.directory = directory
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.f = open("%s/%s" % (self.directory,self.filename),'w')
def receive_data(self,data):
Sim.trace('AppHandler',"application got %d bytes" % (len(data)))
self.f.write(data)
self.f.flush()
class Main(object):
def __init__(self):
self.iterations = 1 #set from flags
self.out_directory = '../output/received'
self.in_directory = '../data'
self.parse_options()
print self.filename
# self.total = 0.0;
for i in range(0, self.iterations):
self.run()
self.diff()
# for windowSize in [1000]:#, 2000, 5000, 10000, 15000, 20000]:
# print "--Results with window size " + str(windowSize)
#self.window = windowSize
# self.run()
# print "Average over " + str(iterations) + " iterations: " + str(self.total / float(iterations))
def parse_options(self):
parser = optparse.OptionParser(usage = "%prog [options]",
version = "%prog 0.1")
parser.add_option("-f","--filename",type="str",dest="filename",
default='test.txt',
help="filename to send")
parser.add_option("-l","--loss",type="float",dest="loss",
default=0.0,
help="random loss rate")
parser.add_option("-w","--window",type="int",dest="window",
default=1000,
help="transmission window size")
parser.add_option("-i","--iterations",type="int",dest="iterations",
default=1,
help="number of iterations to run")
(options,args) = parser.parse_args()
self.filename = options.filename
self.loss = options.loss
self.window = options.window
self.iterations = options.iterations
def diff(self):
args = ['diff','-u',self.in_directory + '/' + self.filename,self.out_directory+'/'+self.filename]
result = subprocess.Popen(args,stdout = subprocess.PIPE).communicate()[0]
print
if not result:
print "File transfer correct!"
else:
print "File transfer failed. Here is the diff:"
print
print result
sys.exit()
def run(self):
# parameters
Sim.scheduler.reset()
Sim.set_debug('AppHandler')
Sim.set_debug('TCP')
# setup network
net = Network('../networks/setup.txt')
net.loss(self.loss)
# setup routes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n1.add_forwarding_entry(address=n2.get_address('n1'),link=n1.links[0])
n2.add_forwarding_entry(address=n1.get_address('n2'),link=n2.links[0])
# setup transport
t1 = Transport(n1)
t2 = Transport(n2)
# setup application
a = AppHandler(self.filename, self.out_directory)
# setup connection
c1 = TCP(t1,n1.get_address('n2'),1,n2.get_address('n1'),1,a,window=self.window)
c2 = TCP(t2,n2.get_address('n1'),1,n1.get_address('n2'),1,a,window=self.window)
# send a file
with open(self.in_directory + '/' + self.filename,'r') as f:
while True:
data = f.read(10000)
if not data:
break
Sim.scheduler.add(delay=0, event=data, handler=c1.send)
# run the simulation
Sim.scheduler.run()
# print str(self.window) + " & " + \
# str(Sim.scheduler.current_time()) + " & " + \
# str(4116160.0 / float(Sim.scheduler.current_time())) + " & " + \
# str(c2.totalQueueingDelay / float(c1.totalPacketsSent)) + " \\\\"
# print str(self.window) + "," + str(4116160.0 / float(Sim.scheduler.current_time()))
print str(self.window) + "," + str(c2.totalQueueingDelay / float(c1.totalPacketsSent))
# print "Ave Queueing Delay: " + str(c2.totalQueueingDelay / float(c1.totalPacketsSent))
# print "Throughput: " + str(4116160.0 / float(Sim.scheduler.current_time()))
# self.total += Sim.scheduler.current_time()
if __name__ == '__main__':
m = Main()
|
joe-eklund/cs460
|
bene/lab2/src/transfer.py
|
Python
|
gpl-3.0
| 4,724 | 0.013124 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
import array
import string
import re
from google.pyglib.gexcept import AbstractMethod
import httplib
__all__ = ['ProtocolMessage', 'Encoder', 'Decoder',
'ProtocolBufferDecodeError',
'ProtocolBufferEncodeError',
'ProtocolBufferReturnError']
URL_RE = re.compile('^(https?)://([^/]+)(/.*)$')
class ProtocolMessage:
def __init__(self, contents=None):
raise AbstractMethod
def Clear(self):
raise AbstractMethod
def IsInitialized(self, debug_strs=None):
raise AbstractMethod
def Encode(self):
try:
return self._CEncode()
except AbstractMethod:
e = Encoder()
self.Output(e)
return e.buffer().tostring()
def SerializeToString(self):
return self.Encode()
def SerializePartialToString(self):
try:
return self._CEncodePartial()
except (AbstractMethod, AttributeError):
e = Encoder()
self.OutputPartial(e)
return e.buffer().tostring()
def _CEncode(self):
raise AbstractMethod
def _CEncodePartial(self):
raise AbstractMethod
def ParseFromString(self, s):
self.Clear()
self.MergeFromString(s)
def ParsePartialFromString(self, s):
self.Clear()
self.MergePartialFromString(s)
def MergeFromString(self, s):
self.MergePartialFromString(s)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
def MergePartialFromString(self, s):
try:
self._CMergeFromString(s)
except AbstractMethod:
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d)
def _CMergeFromString(self, s):
raise AbstractMethod
def __getstate__(self):
return self.Encode()
def __setstate__(self, contents_):
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
raise AbstractMethod
def ToASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToCompactASCII(self):
return self._CToASCII(ProtocolMessage._NUMERIC_ASCII)
def ToShortASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
raise AbstractMethod
def ParseASCII(self, ascii_string):
raise AbstractMethod
def ParseASCIIIgnoreUnknown(self, ascii_string):
raise AbstractMethod
def Equals(self, other):
raise AbstractMethod
def __eq__(self, other):
if other.__class__ is self.__class__:
return self.Equals(other)
return NotImplemented
def __ne__(self, other):
if other.__class__ is self.__class__:
return not self.Equals(other)
return NotImplemented
def Output(self, e):
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
raise AbstractMethod
def OutputPartial(self, e):
raise AbstractMethod
def Parse(self, d):
self.Clear()
self.Merge(d)
return
def Merge(self, d):
self.TryMerge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
raise AbstractMethod
def CopyFrom(self, pb):
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
raise AbstractMethod
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -20000000000000 or value >= 20000000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if o >= 127 or o < 32: return "\\%03o" % o
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
class Encoder:
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56) & 255)
return
def putVarInt32(self, v):
buf_append = self.buf.append
if v & 127 == v:
buf_append(v)
return
if v >= 0x80000000 or v < -0x80000000:
raise ProtocolBufferEncodeError, "int32 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarInt64(self, v):
buf_append = self.buf.append
if v >= 0x8000000000000000 or v < -0x8000000000000000:
raise ProtocolBufferEncodeError, "int64 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarUint64(self, v):
buf_append = self.buf.append
if v < 0 or v >= 0x10000000000000000:
raise ProtocolBufferEncodeError, "uint64 too big"
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putFloat(self, v):
a = array.array('B')
a.fromstring(struct.pack("<f", v))
self.buf.extend(a)
return
def putDouble(self, v):
a = array.array('B')
a.fromstring(struct.pack("<d", v))
self.buf.extend(a)
return
def putBoolean(self, v):
if v:
self.buf.append(1)
else:
self.buf.append(0)
return
def putPrefixedString(self, v):
v = str(v)
self.putVarInt32(len(v))
self.buf.fromstring(v)
return
def putRawString(self, v):
self.buf.fromstring(v)
class Decoder:
def __init__(self, buf, idx, limit):
self.buf = buf
self.idx = idx
self.limit = limit
return
def avail(self):
return self.limit - self.idx
def buffer(self):
return self.buf
def pos(self):
return self.idx
def skip(self, n):
if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated"
self.idx += n
return
def skipData(self, tag):
t = tag & 7
if t == Encoder.NUMERIC:
self.getVarInt64()
elif t == Encoder.DOUBLE:
self.skip(8)
elif t == Encoder.STRING:
n = self.getVarInt32()
self.skip(n)
elif t == Encoder.STARTGROUP:
while 1:
t = self.getVarInt32()
if (t & 7) == Encoder.ENDGROUP:
break
else:
self.skipData(t)
if (t - Encoder.ENDGROUP) != (tag - Encoder.STARTGROUP):
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.ENDGROUP:
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.FLOAT:
self.skip(4)
else:
raise ProtocolBufferDecodeError, "corrupted"
def get8(self):
if self.idx >= self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
self.idx += 1
return c
def get16(self):
if self.idx + 2 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
self.idx += 2
return (d << 8) | c
def get32(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
self.idx += 4
return (f << 24) | (e << 16) | (d << 8) | c
def get64(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
g = long(self.buf[self.idx + 4])
h = long(self.buf[self.idx + 5])
i = long(self.buf[self.idx + 6])
j = long(self.buf[self.idx + 7])
self.idx += 8
return ((j << 56) | (i << 48) | (h << 40) | (g << 32) | (f << 24)
| (e << 16) | (d << 8) | c)
def getVarInt32(self):
b = self.get8()
if not (b & 128):
return b
result = long(0)
shift = 0
while 1:
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= 0x10000000000000000L:
raise ProtocolBufferDecodeError, "corrupted"
break
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
if result >= 0x8000000000000000L:
result -= 0x10000000000000000L
if result >= 0x80000000L or result < -0x80000000L:
raise ProtocolBufferDecodeError, "corrupted"
return result
def getVarInt64(self):
result = self.getVarUint64()
if result >= (1L << 63):
result -= (1L << 64)
return result
def getVarUint64(self):
result = long(0)
shift = 0
while 1:
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted"
return result
return result
def getFloat(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+4]
self.idx += 4
return struct.unpack("<f", a)[0]
def getDouble(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+8]
self.idx += 8
return struct.unpack("<d", a)[0]
def getBoolean(self):
b = self.get8()
if b != 0 and b != 1: raise ProtocolBufferDecodeError, "corrupted"
return b
def getPrefixedString(self):
length = self.getVarInt32()
if self.idx + length > self.limit:
raise ProtocolBufferDecodeError, "truncated"
r = self.buf[self.idx : self.idx + length]
self.idx += length
return r.tostring()
def getRawString(self):
r = self.buf[self.idx:self.limit]
self.idx = self.limit
return r.tostring()
class ProtocolBufferDecodeError(Exception): pass
class ProtocolBufferEncodeError(Exception): pass
class ProtocolBufferReturnError(Exception): pass
|
SRabbelier/Melange
|
thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py
|
Python
|
apache-2.0
| 14,205 | 0.017247 |
"""
Copyright (C) 2017, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import time
import shutil
import argparse
import configparser
import operator
import itertools
import struct
import numpy as np
import pandas as pd
import tensorflow as tf
import model.yolo2.inference as inference
import utils
def transpose_weights(weights, num_anchors):
ksize1, ksize2, channels_in, _ = weights.shape
weights = weights.reshape([ksize1, ksize2, channels_in, num_anchors, -1])
coords = weights[:, :, :, :, 0:4]
iou = np.expand_dims(weights[:, :, :, :, 4], -1)
classes = weights[:, :, :, :, 5:]
return np.concatenate([iou, coords, classes], -1).reshape([ksize1, ksize2, channels_in, -1])
def transpose_biases(biases, num_anchors):
biases = biases.reshape([num_anchors, -1])
coords = biases[:, 0:4]
iou = np.expand_dims(biases[:, 4], -1)
classes = biases[:, 5:]
return np.concatenate([iou, coords, classes], -1).reshape([-1])
def transpose(sess, layer, num_anchors):
v = next(filter(lambda v: v.op.name.endswith('weights'), layer))
sess.run(v.assign(transpose_weights(sess.run(v), num_anchors)))
v = next(filter(lambda v: v.op.name.endswith('biases'), layer))
sess.run(v.assign(transpose_biases(sess.run(v), num_anchors)))
def main():
model = config.get('config', 'model')
cachedir = utils.get_cachedir(config)
with open(os.path.join(cachedir, 'names'), 'r') as f:
names = [line.strip() for line in f]
width, height = np.array(utils.get_downsampling(config)) * 13
anchors = pd.read_csv(os.path.expanduser(os.path.expandvars(config.get(model, 'anchors'))), sep='\t').values
func = getattr(inference, config.get(model, 'inference'))
with tf.Session() as sess:
image = tf.placeholder(tf.float32, [1, height, width, 3], name='image')
func(image, len(names), len(anchors))
tf.contrib.framework.get_or_create_global_step()
tf.global_variables_initializer().run()
prog = re.compile(r'[_\w\d]+\/conv(\d*)\/(weights|biases|(BatchNorm\/(gamma|beta|moving_mean|moving_variance)))$')
variables = [(prog.match(v.op.name).group(1), v) for v in tf.global_variables() if prog.match(v.op.name)]
variables = sorted([[int(k) if k else -1, [v for _, v in g]] for k, g in itertools.groupby(variables, operator.itemgetter(0))], key=operator.itemgetter(0))
assert variables[0][0] == -1
variables[0][0] = len(variables) - 1
variables.insert(len(variables), variables.pop(0))
with tf.name_scope('assign'):
with open(os.path.expanduser(os.path.expandvars(args.file)), 'rb') as f:
major, minor, revision, seen = struct.unpack('4i', f.read(16))
tf.logging.info('major=%d, minor=%d, revision=%d, seen=%d' % (major, minor, revision, seen))
for i, layer in variables:
tf.logging.info('processing layer %d' % i)
total = 0
for suffix in ['biases', 'beta', 'gamma', 'moving_mean', 'moving_variance', 'weights']:
try:
v = next(filter(lambda v: v.op.name.endswith(suffix), layer))
except StopIteration:
continue
shape = v.get_shape().as_list()
cnt = np.multiply.reduce(shape)
total += cnt
tf.logging.info('%s: %s=%d' % (v.op.name, str(shape), cnt))
p = struct.unpack('%df' % cnt, f.read(4 * cnt))
if suffix == 'weights':
ksize1, ksize2, channels_in, channels_out = shape
p = np.reshape(p, [channels_out, channels_in, ksize1, ksize2]) # Darknet format
p = np.transpose(p, [2, 3, 1, 0]) # TensorFlow format (ksize1, ksize2, channels_in, channels_out)
sess.run(v.assign(p))
tf.logging.info('%d parameters assigned' % total)
remaining = os.fstat(f.fileno()).st_size - f.tell()
transpose(sess, layer, len(anchors))
saver = tf.train.Saver()
logdir = utils.get_logdir(config)
if args.delete:
tf.logging.warn('delete logging directory: ' + logdir)
shutil.rmtree(logdir, ignore_errors=True)
os.makedirs(logdir, exist_ok=True)
model_path = os.path.join(logdir, 'model.ckpt')
tf.logging.info('save model into ' + model_path)
saver.save(sess, model_path)
if args.summary:
path = os.path.join(logdir, args.logname)
summary_writer = tf.summary.FileWriter(path)
summary_writer.add_graph(sess.graph)
tf.logging.info('tensorboard --logdir ' + logdir)
if remaining > 0:
tf.logging.warn('%d bytes remaining' % remaining)
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('file', help='Darknet .weights file')
parser.add_argument('-c', '--config', nargs='+', default=['config.ini'], help='config file')
parser.add_argument('-d', '--delete', action='store_true', help='delete logdir')
parser.add_argument('-s', '--summary', action='store_true')
parser.add_argument('--logname', default=time.strftime('%Y-%m-%d_%H-%M-%S'), help='the name of TensorBoard log')
parser.add_argument('--level', default='info', help='logging level')
return parser.parse_args()
if __name__ == '__main__':
args = make_args()
config = configparser.ConfigParser()
utils.load_config(config, args.config)
if args.level:
tf.logging.set_verbosity(args.level.upper())
main()
|
ruiminshen/yolo-tf
|
parse_darknet_yolo2.py
|
Python
|
lgpl-3.0
| 6,342 | 0.002841 |
model_search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.response-format?" + \
"[q=search term&" + \
"fq=filter-field:(filter-term)&additional-params=values]" + \
"&api-key=9key"
"""http://api.nytimes.com/svc/search/v2/articlesearch.json?q=terrorism+OR+terrorist
&begin_date=19900102&end_date=19900103&sort=newest&api-key=
key"""
search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.json?" + \
"[q=terror]" + \
"&api-key=key"
precise_search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.json"
terms = "?q=terrorism+OR+terrorist"
api = "&api-key=key"
print(precise_search+terms+dates+api)
"""
aggressive for looping in order to overcome the ten article limit. instead search each key word PER JOUR, and then concat the jsons into a nice pandas dataframe, and then eventually a csv.
"""
months_list = ["%.2d" % i for i in range(1,2)]
days_list = ["%.2d" % i for i in range(1,32)]
json_files = []
print(months_list)
for x in months_list:
month_s = x
month_e = x
for y in days_list:
day_s = y
day_e = str(int(y)+1).zfill(2)
year_s = "1990"
year_e = "1990"
start = year_s + month_s + day_s
end = year_e + month_e + day_e
dates = "&begin_date="+start+"&end_date="+end+"&sort=newest"
#print(start + " "+end + "\n" +dates)
r = requests.get(precise_search+terms+dates+api)
original_json = json.loads(r.text)
response_json = original_json['response']
json_file = response_json['docs']
json_files.append(json_file)
frames = []
for x in json_files:
df = pd.DataFrame.from_dict(x)
frames.append(df)
#print(frames)
result = pd.concat(frames)
result
|
polypmer/scrape
|
new-york-times/nytimes-scrape.py
|
Python
|
mit
| 1,833 | 0.004364 |
import os
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag()
def custom_css():
theme_path = os.path.join(
settings.MEDIA_ROOT,
"overrides.css"
)
if os.path.exists(theme_path):
return mark_safe(
'<link rel="stylesheet" type="text/css" href="{}" />'.format(
os.path.join(settings.MEDIA_URL, "overrides.css")
)
)
return ""
@register.simple_tag()
def custom_js():
theme_path = os.path.join(
settings.MEDIA_ROOT,
"overrides.js"
)
if os.path.exists(theme_path):
return mark_safe(
'<script src="{}"></script>'.format(
os.path.join(settings.MEDIA_URL, "overrides.js")
)
)
return ""
|
danielquinn/paperless
|
src/documents/templatetags/customisation.py
|
Python
|
gpl-3.0
| 865 | 0 |
import numpy as np
import scipy.stats as stats
import sys
# lib eh a nossa biblioteca criada para este trabalho
import lib.naive_bayes as nb
import lib.preprocessing as prep
import lib.validation as valid
import lib.normalization as normal
from config.constants import *
def case3(output=True):
accuracy_in_each_turn = list()
precision_in_each_turn_spam = list()
recall_in_each_turn_spam = list()
precision_in_each_turn_ham = list()
recall_in_each_turn_ham = list()
m = np.loadtxt(open("resources/normalized_data.csv","rb"),delimiter=',')
shuffled = np.random.permutation(m)
valid.validate_cross_validation(NUMBER_OF_ROUNDS,TRAIN_TEST_RATIO)
# equiprobable priors
prior_spam = 0.5
prior_ham = 0.5
for i in xrange(NUMBER_OF_ROUNDS):
# we're using cross-validation so each iteration we take a different
# slice of the data to serve as test set
train_set,test_set = prep.split_sets(shuffled,TRAIN_TEST_RATIO,i)
#parameter estimation
#but now we take ALL attributes into consideration
sample_means_word_spam = list()
sample_means_word_ham = list()
sample_variances_word_spam = list()
sample_variances_word_ham = list()
# all but the last one
for attr_index in xrange(57):
sample_means_word_spam.append(nb.take_mean_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_means_word_ham.append(nb.take_mean_ham(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_spam.append(nb.take_variance_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_ham.append(nb.take_variance_ham(train_set,attr_index,SPAM_ATTR_INDEX))
#sample standard deviations from sample variances
sample_std_devs_spam = map(lambda x: x ** (1/2.0), sample_variances_word_spam)
sample_std_devs_ham = map(lambda x: x ** (1/2.0), sample_variances_word_ham)
hits = 0.0
misses = 0.0
#number of instances correctly evaluated as spam
correctly_is_spam = 0.0
#total number of spam instances
is_spam = 0.0
#total number of instances evaluated as spam
guessed_spam = 0.0
#number of instances correctly evaluated as ham
correctly_is_ham = 0.0
#total number of ham instances
is_ham = 0.0
#total number of instances evaluated as ham
guessed_ham = 0.0
# now we test the hypothesis against the test set
for row in test_set:
# ou seja, o produto de todas as prob. condicionais das palavras dada a classe
# eu sei que ta meio confuso, mas se olhar com cuidado eh bonito fazer isso tudo numa linha soh! =)
product_of_all_conditional_probs_spam = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_spam[cur], sample_std_devs_spam[cur]).pdf(row[CASE_2_ATTRIBUTE_INDEXES[cur]]) , xrange(10), 1)
# nao precisa dividir pelo termo de normalizacao pois so queremos saber qual e o maior!
posterior_spam = prior_spam * product_of_all_conditional_probs_spam
product_of_all_conditional_probs_ham = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_ham[cur], sample_std_devs_ham[cur]).pdf(row[CASE_2_ATTRIBUTE_INDEXES[cur]]) , xrange(10), 1)
posterior_ham = prior_ham * product_of_all_conditional_probs_ham
# whichever is greater - that will be our prediction
if posterior_spam > posterior_ham:
guess = 1
else:
guess = 0
if(row[SPAM_ATTR_INDEX] == guess):
hits += 1
else:
misses += 1
# we'll use these to calculate metrics
if (row[SPAM_ATTR_INDEX] == 1 ):
is_spam += 1
if guess == 1:
guessed_spam += 1
correctly_is_spam += 1
else:
guessed_ham += 1
else:
is_ham += 1
if guess == 1:
guessed_spam += 1
else:
guessed_ham += 1
correctly_is_ham += 1
#accuracy = number of correctly evaluated instances/
# number of instances
#
#
accuracy = hits/(hits+misses)
#precision_spam = number of correctly evaluated instances as spam/
# number of spam instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_spam == 0):
precision_spam = 0
else:
precision_spam = correctly_is_spam/is_spam
#recall_spam = number of correctly evaluated instances as spam/
# number of evaluated instances como spam
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_spam == 0):
recall_spam = 0
else:
recall_spam = correctly_is_spam/guessed_spam
#precision_ham = number of correctly evaluated instances as ham/
# number of ham instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_ham == 0):
precision_ham = 0
else:
precision_ham = correctly_is_ham/is_ham
#recall_ham = number of correctly evaluated instances as ham/
# number of evaluated instances como ham
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_ham == 0):
recall_ham = 0
else:
recall_ham = correctly_is_ham/guessed_ham
accuracy_in_each_turn.append(accuracy)
precision_in_each_turn_spam.append(precision_spam)
recall_in_each_turn_spam.append(recall_spam)
precision_in_each_turn_ham.append(precision_ham)
recall_in_each_turn_ham.append(recall_ham)
# calculation of means for each metric at the end
mean_accuracy = np.mean(accuracy_in_each_turn)
std_dev_accuracy = np.std(accuracy_in_each_turn)
variance_accuracy = np.var(accuracy_in_each_turn)
mean_precision_spam = np.mean(precision_in_each_turn_spam)
std_dev_precision_spam = np.std(precision_in_each_turn_spam)
variance_precision_spam = np.var(precision_in_each_turn_spam)
mean_recall_spam = np.mean(recall_in_each_turn_spam)
std_dev_recall_spam = np.std(recall_in_each_turn_spam)
variance_recall_spam = np.var(recall_in_each_turn_spam)
mean_precision_ham = np.mean(precision_in_each_turn_ham)
std_dev_precision_ham = np.std(precision_in_each_turn_ham)
variance_precision_ham = np.var(precision_in_each_turn_ham)
mean_recall_ham = np.mean(recall_in_each_turn_ham)
std_dev_recall_ham = np.std(recall_in_each_turn_ham)
variance_recall_ham = np.var(recall_in_each_turn_ham)
if output:
print "\033[1;32m"
print '============================================='
print 'CASE 3 - ALL ATTRIBUTES - USING NORMAL MODEL'
print '============================================='
print "\033[00m"
print 'MEAN ACCURACY: '+str(round(mean_accuracy,5))
print 'STD. DEV. OF ACCURACY: '+str(round(std_dev_accuracy,5))
print 'VARIANCE OF ACCURACY: '+str(round(variance_accuracy,8))
print ''
print 'MEAN PRECISION FOR SPAM: '+str(round(mean_precision_spam,5))
print 'STD. DEV. OF PRECISION FOR SPAM: '+str(round(std_dev_precision_spam,5))
print 'VARIANCE OF PRECISION FOR SPAM: '+str(round(variance_precision_spam,8))
print ''
print 'MEAN RECALL FOR SPAM: '+str(round(mean_recall_spam,5))
print 'STD. DEV. OF RECALL FOR SPAM: '+str(round(std_dev_recall_spam,5))
print 'VARIANCE OF RECALL FOR SPAM: '+str(round(variance_recall_spam,8))
print ''
print 'MEAN PRECISION FOR HAM: '+str(round(mean_precision_ham,5))
print 'STD. DEV. OF PRECISION FOR HAM: '+str(round(std_dev_precision_ham,5))
print 'VARIANCE OF PRECISION FOR HAM: '+str(round(variance_precision_ham,8))
print ''
print 'MEAN RECALL FOR HAM: '+str(round(mean_recall_ham,5))
print 'STD. DEV. OF RECALL FOR HAM: '+str(round(std_dev_recall_ham,5))
print 'VARIANCE OF RECALL FOR HAM: '+str(round(variance_recall_ham,8))
case3()
|
queirozfcom/spam-filter
|
case3_naive_normal.py
|
Python
|
mit
| 8,514 | 0.009396 |
#!/usr/bin/python
# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rhsm_repository
short_description: Manage RHSM repositories using the subscription-manager command
description:
- Manage(Enable/Disable) RHSM repositories to the Red Hat Subscription
Management entitlement platform using the C(subscription-manager) command.
version_added: '2.5'
author: Giovanni Sciortino (@giovannisciortino)
notes:
- In order to manage RHSM repositories the system must be already registered
to RHSM manually or using the Ansible C(redhat_subscription) module.
requirements:
- subscription-manager
options:
state:
description:
- If state is equal to present or disabled, indicates the desired
repository state.
choices: [present, enabled, absent, disabled]
required: True
default: "present"
name:
description:
- The ID of repositories to enable.
- To operate on several repositories this can accept a comma separated
list or a YAML list.
required: True
'''
EXAMPLES = '''
- name: Enable a RHSM repository
rhsm_repository:
name: rhel-7-server-rpms
- name: Disable all RHSM repositories
rhsm_repository:
name: '*'
state: disabled
- name: Enable all repositories starting with rhel-6-server
rhsm_repository:
name: rhel-6-server*
state: enabled
- name: Disable all repositories except rhel-7-server-rpms
rhsm_repository:
name: "{{ item }}"
state: disabled
with_items: "{{
rhsm_repository.repositories |
map(attribute='id') |
difference(['rhel-7-server-rpms']) }}"
'''
RETURN = '''
repositories:
description:
- The list of RHSM repositories with their states.
- When this module is used to change the repositories states, this list contains the updated states after the changes.
returned: success
type: list
'''
import re
import os
from fnmatch import fnmatch
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
def run_subscription_manager(module, arguments):
# Execute subuscription-manager with arguments and manage common errors
rhsm_bin = module.get_bin_path('subscription-manager')
if not rhsm_bin:
module.fail_json(msg='The executable file subscription-manager was not found in PATH')
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
module.fail_json(msg='This system has no repositories available through subscriptions')
elif rc == 1:
module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
else:
return rc, out, err
def get_repository_list(module, list_parameter):
# Generate RHSM repository list and return a list of dict
if list_parameter == 'list_enabled':
rhsm_arguments = ['repos', '--list-enabled']
elif list_parameter == 'list_disabled':
rhsm_arguments = ['repos', '--list-disabled']
elif list_parameter == 'list':
rhsm_arguments = ['repos', '--list']
rc, out, err = run_subscription_manager(module, rhsm_arguments)
skip_lines = [
'+----------------------------------------------------------+',
' Available Repositories in /etc/yum.repos.d/redhat.repo'
]
repo_id_re_str = r'Repo ID: (.*)'
repo_name_re_str = r'Repo Name: (.*)'
repo_url_re_str = r'Repo URL: (.*)'
repo_enabled_re_str = r'Enabled: (.*)'
repo_id = ''
repo_name = ''
repo_url = ''
repo_enabled = ''
repo_result = []
for line in out.split('\n'):
if line in skip_lines:
continue
repo_id_re = re.match(repo_id_re_str, line)
if repo_id_re:
repo_id = repo_id_re.group(1)
continue
repo_name_re = re.match(repo_name_re_str, line)
if repo_name_re:
repo_name = repo_name_re.group(1)
continue
repo_url_re = re.match(repo_url_re_str, line)
if repo_url_re:
repo_url = repo_url_re.group(1)
continue
repo_enabled_re = re.match(repo_enabled_re_str, line)
if repo_enabled_re:
repo_enabled = repo_enabled_re.group(1)
repo = {
"id": repo_id,
"name": repo_name,
"url": repo_url,
"enabled": True if repo_enabled == '1' else False
}
repo_result.append(repo)
return repo_result
def repository_modify(module, state, name):
name = set(name)
current_repo_list = get_repository_list(module, 'list')
updated_repo_list = deepcopy(current_repo_list)
matched_existing_repo = {}
for repoid in name:
matched_existing_repo[repoid] = []
for idx, repo in enumerate(current_repo_list):
if fnmatch(repo['id'], repoid):
matched_existing_repo[repoid].append(repo)
# Update current_repo_list to return it as result variable
updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
changed = False
results = []
diff_before = ""
diff_after = ""
rhsm_arguments = ['repos']
for repoid in matched_existing_repo:
if len(matched_existing_repo[repoid]) == 0:
results.append("%s is not a valid repository ID" % repoid)
module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
for repo in matched_existing_repo[repoid]:
if state in ['disabled', 'absent']:
if repo['enabled']:
changed = True
diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
results.append("Repository '%s' is disabled for this system" % repo['id'])
rhsm_arguments += ['--disable', repo['id']]
elif state in ['enabled', 'present']:
if not repo['enabled']:
changed = True
diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
results.append("Repository '%s' is enabled for this system" % repo['id'])
rhsm_arguments += ['--enable', repo['id']]
diff = {'before': diff_before,
'after': diff_after,
'before_header': "RHSM repositories",
'after_header': "RHSM repositories"}
if not module.check_mode:
rc, out, err = run_subscription_manager(module, rhsm_arguments)
results = out.split('\n')
module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True),
state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
),
supports_check_mode=True,
)
name = module.params['name']
state = module.params['state']
repository_modify(module, state, name)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/packaging/os/rhsm_repository.py
|
Python
|
gpl-3.0
| 7,912 | 0.00316 |
import unittest
from factorial import fact
class TestFactorial(unittest.TestCase):
"""
Our basic test class
"""
def test_fact(self):
"""
The actual test.
Any method which starts with ``test_`` will considered as a test case.
"""
res = fact(5)
self.assertEqual(res, 120)
if __name__ == '__main__':
unittest.main()
|
rtnpro/test-your-code
|
test_your_code/examples/factorial/test_factorial.py
|
Python
|
gpl-2.0
| 386 | 0.005181 |
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.report_aeroo import report_aeroo
from openerp.addons.at_base import util
from openerp.osv import fields, osv
from openerp.tools.translate import _
class inovice_attachment_wizard(osv.TransientModel):
_name = "account.invoice.attachment.wizard"
_description = "Invoice Attachment Wizard"
def action_import(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0])
invoice_id = util.active_id(context, "account.invoice")
if not invoice_id:
raise osv.except_osv(_("Error!"), _("No invoice found"))
report_obj = self.pool.get("ir.actions.report.xml")
data=base64.decodestring(wizard.document)
data = report_aeroo.fixPdf(data)
if not data:
raise osv.except_osv(_("Error!"), _("PDF is corrupted and unable to fix!"))
if not report_obj.write_attachment(cr, uid, "account.invoice", invoice_id, report_name="account.report_invoice", datas=base64.encodestring(data), context=context, origin="account.invoice.attachment.wizard"):
raise osv.except_osv(_("Error!"), _("Unable to import document (check if invoice is validated)"))
return { "type" : "ir.actions.act_window_close" }
_columns = {
"document" : fields.binary("Document")
}
|
funkring/fdoo
|
addons-funkring/at_account/wizard/invoice_attachment_wizard.py
|
Python
|
agpl-3.0
| 2,242 | 0.005352 |
# partial unit test for gmpy2 threaded mpz functionality
# relies on Tim Peters' "doctest.py" test-driver
import gmpy2 as _g, doctest, sys, operator, gc, queue, threading
from functools import reduce
__test__={}
def _tf(N=2, _K=1234**5678):
"""Takes about 100ms on a first-generation Macbook Pro"""
for i in range(N): assert (_g.mpz(1234)**5678)==_K
a=_g.mpz(123)
b=_g.mpz(456)
c=_g.mpz(123456789123456789)
def factorize(x=c):
r'''
(Takes about 25ms, on c, on a first-generation Macbook Pro)
>>> factorize(a)
[3, 41]
>>> factorize(b)
[2, 2, 2, 3, 19]
>>>
'''
import gmpy2 as _g
savex=x
prime=2
x=_g.mpz(x)
factors=[]
while x>=prime:
newx,mult=x.remove(prime)
if mult:
factors.extend([int(prime)]*mult)
x=newx
prime=_g.next_prime(prime)
for factor in factors: assert _g.is_prime(factor)
from operator import mul
assert reduce(mul, factors)==savex
return factors
def elemop(N=1000):
r'''
(Takes about 40ms on a first-generation Macbook Pro)
'''
for i in range(N):
assert a+b == 579
assert a-b == -333
assert b*a == a*b == 56088
assert b%a == 87
assert divmod(a, b) == (0, 123)
assert divmod(b, a) == (3, 87)
assert -a == -123
assert pow(a, 10) == 792594609605189126649
assert pow(a, 7, b) == 99
assert cmp(a, b) == -1
assert '7' in str(c)
assert '0' not in str(c)
assert a.sqrt() == 11
assert _g.lcm(a, b) == 18696
assert _g.fac(7) == 5040
assert _g.fib(17) == 1597
assert _g.divm(b, a, 20) == 12
assert _g.divm(4, 8, 20) == 3
assert _g.divm(4, 8, 20) == 3
assert _g.mpz(20) == 20
assert _g.mpz(8) == 8
assert _g.mpz(4) == 4
assert a.invert(100) == 87
def _test(chat=None):
if chat:
print("Unit tests for gmpy2 (threading)")
print(" on Python %s" % sys.version)
print("Testing gmpy2 {0}".format(_g.version()))
print(" Mutliple-precision library: {0}".format(_g.mp_version()))
print(" Floating-point library: {0}".format(_g.mpfr_version()))
print(" Complex library: {0}".format(_g.mpc_version()))
print(" Caching Values: (Number) {0}".format(_g.get_cache()[0]))
print(" Caching Values: (Size, limbs) {0}".format(_g.get_cache()[1]))
thismod = sys.modules.get(__name__)
doctest.testmod(thismod, report=0)
if chat: print("Repeating tests, with caching disabled")
_g.set_cache(0,128)
sav = sys.stdout
class _Dummy:
def write(self,*whatever):
pass
try:
sys.stdout = _Dummy()
doctest.testmod(thismod, report=0)
finally:
sys.stdout = sav
if chat:
print()
print("Overall results for thr:")
return doctest.master.summarize(chat)
class DoOne(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
def run(self):
while True:
task = self.q.get()
if task is None: break
task()
def _test_thr(Ntasks=5, Nthreads=1):
q = queue.Queue()
funcs = (_tf, 1), (factorize, 4), (elemop, 2)
for i in range(Ntasks):
for f, n in funcs:
for x in range(n):
q.put(f)
for i in range(Nthreads):
q.put(None)
thrs = [DoOne(q) for i in range(Nthreads)]
for t in thrs: t.start()
for t in thrs: t.join()
if __name__=='__main__':
_test(1)
|
andreamartire/gmpy
|
test3/gmpy_test_thr.py
|
Python
|
lgpl-3.0
| 3,614 | 0.010238 |
class WordDistance(object):
def __init__(self, words):
"""
initialize your data structure here.
:type words: List[str]
"""
self.word_dict = {}
for idx, w in enumerate(words):
self.word_dict[w] = self.word_dict.get(w, []) + [idx]
def shortest(self, word1, word2):
"""
Adds a word into the data structure.
:type word1: str
:type word2: str
:rtype: int
"""
return min(abs(i - j) for i in self.word_dict[word1] for j in self.word_dict[word2])
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2")
|
Mlieou/leetcode_python
|
leetcode/python/ex_244.py
|
Python
|
mit
| 791 | 0.002528 |
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud iam servicetaccount'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
name=dict(default=None, type='str'),
display_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
gcloud = GcloudIAMServiceAccount(module.params['name'], module.params['display_name'])
state = module.params['state']
api_rval = gcloud.list_service_accounts()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.delete_service_account()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = gcloud.create_service_account()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
# update
elif gcloud.needs_update():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = gcloud.update_service_account()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present|update")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/roles/lib_gcloud/build/ansible/gcloud_iam_sa.py
|
Python
|
apache-2.0
| 2,675 | 0.003364 |
from framework.db import models
from framework.config import config
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import ResourceInterface
from framework.lib.general import cprint
import os
import logging
from framework.utils import FileOperations
class ResourceDB(BaseComponent, ResourceInterface):
COMPONENT_NAME = "resource"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.db_config = self.get_component("db_config")
self.target = self.get_component("target")
self.db = self.get_component("db")
self.LoadResourceDBFromFile(self.config.get_profile_path("RESOURCES_PROFILE"))
def LoadResourceDBFromFile(self, file_path): # This needs to be a list instead of a dictionary to preserve order in python < 2.7
logging.info("Loading Resources from: " + file_path + " ..")
resources = self.GetResourcesFromFile(file_path)
# Delete all old resources which are not edited by user
# because we may have updated the resource
self.db.session.query(models.Resource).filter_by(dirty=False).delete()
# resources = [(Type, Name, Resource), (Type, Name, Resource),]
for Type, Name, Resource in resources:
self.db.session.add(models.Resource(resource_type=Type, resource_name=Name, resource=Resource))
self.db.session.commit()
def GetResourcesFromFile(self, resource_file):
resources = set()
ConfigFile = FileOperations.open(resource_file, 'r').read().splitlines() # To remove stupid '\n' at the end
for line in ConfigFile:
if '#' == line[0]:
continue # Skip comment lines
try:
Type, Name, Resource = line.split('_____')
# Resource = Resource.strip()
resources.add((Type, Name, Resource))
except ValueError:
cprint("ERROR: The delimiter is incorrect in this line at Resource File: "+str(line.split('_____')))
return resources
def GetReplacementDict(self):
configuration = self.db_config.GetReplacementDict()
configuration.update(self.target.GetTargetConfig())
configuration.update(self.config.GetReplacementDict())
return configuration
def GetRawResources(self, ResourceType):
filter_query = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter_by(resource_type = ResourceType)
# Sorting is necessary for working of ExtractURLs, since it must run after main command, so order is imp
sort_query = filter_query.order_by(models.Resource.id)
raw_resources = sort_query.all()
return raw_resources
def GetResources(self, ResourceType):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResources(ResourceType)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
def GetRawResourceList(self, ResourceList):
raw_resources = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter(models.Resource.resource_type.in_(ResourceList)).all()
return raw_resources
def GetResourceList(self, ResourceTypeList):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResourceList(ResourceTypeList)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
|
sharad1126/owtf
|
framework/db/resource_manager.py
|
Python
|
bsd-3-clause
| 3,759 | 0.004256 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Very simple script to replace a template with another one.
It also converts the old MediaWiki boilerplate format to the new format.
Syntax: python template.py [-remove] [xml[:filename]] oldTemplate [newTemplate]
Specify the template on the command line. The program will pick up the template
page, and look for all pages using it. It will then automatically loop over
them, and replace the template.
Command line options:
-remove Remove every occurrence of the template from every article
-subst Resolves the template by putting its text directly into the
article. This is done by changing {{...}} or {{msg:...}} into
{{subst:...}}
-assubst Replaces the first argument as old template with the second
argument as new template but substitutes it like -subst does.
Using both options -remove and -subst in the same command line has
the same effect.
-xml retrieve information from a local dump
(https://download.wikimedia.org). If this argument isn't given,
info will be loaded from the maintenance page of the live wiki.
argument can also be given as "-xml:filename.xml".
-user: Only process pages edited by a given user
-skipuser: Only process pages not edited by a given user
-timestamp: (With -user or -skipuser). Only check for a user where his edit is
not older than the given timestamp. Timestamp must be writen in
MediaWiki timestamp format which is "%Y%m%d%H%M%S"
If this parameter is missed, all edits are checked but this is
restricted to the last 100 edits.
-summary: Lets you pick a custom edit summary. Use quotes if edit summary
contains spaces.
-always Don't bother asking to confirm any of the changes, Just Do It.
-addcat: Appends the given category to every page that is edited. This is
useful when a category is being broken out from a template
parameter or when templates are being upmerged but more information
must be preserved.
other: First argument is the old template name, second one is the new
name.
If you want to address a template which has spaces, put quotation
marks around it, or use underscores.
Examples:
If you have a template called [[Template:Cities in Washington]] and want to
change it to [[Template:Cities in Washington state]], start
python pwb.py template "Cities in Washington" "Cities in Washington state"
Move the page [[Template:Cities in Washington]] manually afterwards.
If you have a template called [[Template:test]] and want to substitute it only
on pages in the User: and User talk: namespaces, do:
python pwb.py template test -subst -namespace:2 -namespace:3
Note that -namespace: is a global Pywikibot parameter
This next example substitutes the template lived with a supplied edit summary.
It only performs substitutions in main article namespace and doesn't prompt to
start replacing. Note that -putthrottle: is a global Pywikibot parameter.
python pwb.py template -putthrottle:30 -namespace:0 lived -subst -always \
-summary:"BOT: Substituting {{lived}}, see [[WP:SUBST]]."
This next example removes the templates {{cfr}}, {{cfru}}, and {{cfr-speedy}}
from five category pages as given:
python pwb.py template cfr cfru cfr-speedy -remove -always \
-page:"Category:Mountain monuments and memorials" \
-page:"Category:Indian family names" \
-page:"Category:Tennis tournaments in Belgium" \
-page:"Category:Tennis tournaments in Germany" \
-page:"Category:Episcopal cathedrals in the United States" \
-summary:"Removing Cfd templates from category pages that survived."
This next example substitutes templates test1, test2, and space test on all
pages:
python pwb.py template test1 test2 "space test" -subst -always
"""
#
# (C) Daniel Herding, 2004
# (C) Rob W.W. Hooft, 2003-2005
# (C) xqt, 2009-2015
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
from warnings import warn
import pywikibot
from pywikibot import i18n, pagegenerators, xmlreader, Bot
from pywikibot.exceptions import ArgumentDeprecationWarning
from scripts.replace import ReplaceRobot as ReplaceBot
class XmlDumpTemplatePageGenerator(object):
"""
Generator which yields Pages that transclude a template.
These pages will be retrieved from a local XML dump file
(cur table), and may not still transclude the template.
"""
def __init__(self, templates, xmlfilename):
"""
Constructor.
Arguments:
* templateNames - A list of Page object representing the searched
templates
* xmlfilename - The dump's path, either absolute or relative
"""
self.templates = templates
self.xmlfilename = xmlfilename
def __iter__(self):
"""Yield page objects until the entire XML dump has been read."""
mysite = pywikibot.Site()
dump = xmlreader.XmlDump(self.xmlfilename)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
templatePatterns = []
for template in self.templates:
templatePattern = template.title(withNamespace=False)
if mysite.namespaces[10].case == 'first-letter':
templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
templatePattern[0].lower(),
templatePattern[1:])
templatePattern = re.sub(' ', '[_ ]', templatePattern)
templatePatterns.append(templatePattern)
templateRegex = re.compile(
r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
% '|'.join(templatePatterns))
for entry in dump.parse():
if templateRegex.search(entry.text):
page = pywikibot.Page(mysite, entry.title)
yield page
class TemplateRobot(ReplaceBot):
"""This bot will replace, remove or subst all occurrences of a template."""
def __init__(self, generator, templates, **kwargs):
"""
Constructor.
@param generator: the pages to work on
@type generator: iterable
@param templates: a dictionary which maps old template names to
their replacements. If remove or subst is True, it maps the
names of the templates that should be removed/resolved to None.
@type templates: dict
"""
self.availableOptions.update({
'subst': False,
'remove': False,
'summary': None,
'addedCat': None,
})
Bot.__init__(self, generator=generator, **kwargs)
self.templates = templates
# get edit summary message if it's empty
if not self.getOption('summary'):
comma = self.site.mediawiki_message('comma-separator')
params = {'list': comma.join(self.templates.keys()),
'num': len(self.templates)}
site = self.site
if self.getOption('remove'):
self.options['summary'] = i18n.twntranslate(
site, 'template-removing', params)
elif self.getOption('subst'):
self.options['summary'] = i18n.twntranslate(
site, 'template-substituting', params)
else:
self.options['summary'] = i18n.twntranslate(
site, 'template-changing', params)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
# The group 'parameters' will either match the parameters, or an
# empty string if there are none.
replacements = []
exceptions = {}
namespace = self.site.namespaces[10]
for old, new in self.templates.items():
if namespace.case == 'first-letter':
pattern = '[' + \
re.escape(old[0].upper()) + \
re.escape(old[0].lower()) + \
']' + re.escape(old[1:])
else:
pattern = re.escape(old)
pattern = re.sub(r'_|\\ ', r'[_ ]', pattern)
templateRegex = re.compile(r'\{\{ *(' + ':|'.join(namespace) +
r':|[mM][sS][gG]:)?' + pattern +
r'(?P<parameters>\s*\|.+?|) *}}',
re.DOTALL)
if self.getOption('subst') and self.getOption('remove'):
replacements.append((templateRegex,
r'{{subst:%s\g<parameters>}}' % new))
exceptions['inside-tags'] = ['ref', 'gallery']
elif self.getOption('subst'):
replacements.append((templateRegex,
r'{{subst:%s\g<parameters>}}' % old))
exceptions['inside-tags'] = ['ref', 'gallery']
elif self.getOption('remove'):
replacements.append((templateRegex, ''))
else:
template = pywikibot.Page(self.site, new, ns=10)
if not template.exists():
pywikibot.warning(u'Template "%s" does not exist.' % new)
if not pywikibot.input_yn('Do you want to proceed anyway?',
default=False, automatic_quit=False):
continue
replacements.append((templateRegex,
r'{{%s\g<parameters>}}' % new))
super(TemplateRobot, self).__init__(
generator, replacements, exceptions,
always=self.getOption('always'),
addedCat=self.getOption('addedCat'),
summary=self.getOption('summary'))
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
templateNames = []
templates = {}
options = {}
# If xmlfilename is None, references will be loaded from the live wiki.
xmlfilename = None
user = None
skip = False
timestamp = None
# read command line parameters
local_args = pywikibot.handle_args(args)
# Avoid conflicts with pagegenerators.py parameters.
if any(arg.startswith('-category:') for arg in local_args):
warn('-category (to append a category to each edited page) has been'
' renamed to -addcat; make sure you are using the correct param.',
ArgumentDeprecationWarning)
site = pywikibot.Site()
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg == '-remove':
options['remove'] = True
elif arg == '-subst':
options['subst'] = True
elif arg == '-assubst':
options['subst'] = options['remove'] = True
elif arg == '-always':
options['always'] = True
elif arg.startswith('-xml'):
if len(arg) == 4:
xmlfilename = pywikibot.input(
u'Please enter the XML dump\'s filename: ')
else:
xmlfilename = arg[5:]
elif arg.startswith('-addcat:'):
options['addedCat'] = arg[len('-addcat:'):]
elif arg.startswith('-summary:'):
options['summary'] = arg[len('-summary:'):]
elif arg.startswith('-user:'):
user = arg[len('-user:'):]
elif arg.startswith('-skipuser:'):
user = arg[len('-skipuser:'):]
skip = True
elif arg.startswith('-timestamp:'):
timestamp = arg[len('-timestamp:'):]
else:
if not genFactory.handleArg(arg):
templateName = pywikibot.Page(site, arg, ns=10)
templateNames.append(templateName.title(withNamespace=False))
if not templateNames:
pywikibot.bot.suggest_help(missing_parameters=['templates'])
return False
if options.get('subst', False) ^ options.get('remove', False):
for templateName in templateNames:
templates[templateName] = None
else:
try:
for i in range(0, len(templateNames), 2):
templates[templateNames[i]] = templateNames[i + 1]
except IndexError:
pywikibot.output('Unless using solely -subst or -remove, '
'you must give an even number of template names.')
return
oldTemplates = []
for templateName in templates.keys():
oldTemplate = pywikibot.Page(site, templateName, ns=10)
oldTemplates.append(oldTemplate)
if xmlfilename:
gen = XmlDumpTemplatePageGenerator(oldTemplates, xmlfilename)
else:
gen = genFactory.getCombinedGenerator()
if not gen:
gens = [
pagegenerators.ReferringPageGenerator(t, onlyTemplateInclusion=True)
for t in oldTemplates
]
gen = pagegenerators.CombinedPageGenerator(gens)
gen = pagegenerators.DuplicateFilterPageGenerator(gen)
if user:
gen = pagegenerators.UserEditFilterGenerator(gen, user, timestamp, skip,
max_revision_depth=100,
show_filtered=True)
if not genFactory.gens:
# make sure that proper namespace filtering etc. is handled
gen = genFactory.getCombinedGenerator(gen)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = TemplateRobot(preloadingGen, templates, **options)
bot.run()
if __name__ == "__main__":
try:
main()
except Exception:
pywikibot.error("Fatal error:", exc_info=True)
|
icyflame/batman
|
scripts/template.py
|
Python
|
mit
| 14,372 | 0.000348 |
# -*- coding: utf-8 -*-
from canaimagnulinux.wizard.interfaces import IChat
from canaimagnulinux.wizard.interfaces import ISocialNetwork
from canaimagnulinux.wizard.utils import CanaimaGnuLinuxWizardMF as _
from collective.beaker.interfaces import ISession
from collective.z3cform.wizard import wizard
from plone import api
from plone.z3cform.fieldsets import group
from z3c.form import field
try:
from zope.browserpage import viewpagetemplatefile
except ImportError:
# Plone < 4.1
from zope.app.pagetemplate import viewpagetemplatefile
import logging
logger = logging.getLogger(__name__)
class ChatGroup(group.Group):
prefix = 'chats'
label = _(u'Chats Information')
fields = field.Fields(IChat)
class SocialNetworkGroup(group.Group):
prefix = 'socialnetwork'
label = _(u'Social Network Information')
fields = field.Fields(ISocialNetwork)
class SocialNetworkStep(wizard.GroupStep):
prefix = 'Social'
label = _(u'Social Network accounts')
description = _(u'Input your social networks details')
template = viewpagetemplatefile.ViewPageTemplateFile('templates/socialnetwork.pt')
fields = field.Fields()
groups = [ChatGroup, SocialNetworkGroup]
def __init__(self, context, request, wizard):
# Use collective.beaker for session managment
session = ISession(request, None)
self.sessionmanager = session
super(SocialNetworkStep, self).__init__(context, request, wizard)
def load(self, context):
member = api.user.get_current()
data = self.getContent()
# Chats group
if not data.get('irc', None):
irc = member.getProperty('irc')
if type(irc).__name__ == 'object':
irc = None
data['irc'] = irc
if not data.get('telegram', None):
telegram = member.getProperty('telegram')
if type(telegram).__name__ == 'object':
telegram = None
data['telegram'] = telegram
if not data.get('skype', None):
skype = member.getProperty('skype')
if type(skype).__name__ == 'object':
skype = None
data['skype'] = skype
# Social Network group
if not data.get('twitter', None):
twitter = member.getProperty('twitter')
if type(twitter).__name__ == 'object':
twitter = None
data['twitter'] = twitter
if not data.get('instagram', None):
instagram = member.getProperty('instagram')
if type(instagram).__name__ == 'object':
instagram = None
data['instagram'] = instagram
if not data.get('facebook', None):
facebook = member.getProperty('facebook')
if type(facebook).__name__ == 'object':
facebook = None
data['facebook'] = facebook
def apply(self, context, initial_finish=False):
data = self.getContent()
return data
def applyChanges(self, data):
member = api.user.get_current()
member.setMemberProperties(mapping={
'irc': data['irc'],
'telegram': data['telegram'],
'skype': data['skype'],
'twitter': data['twitter'],
'instagram': data['instagram'],
'facebook': data['facebook']}
)
|
CanaimaGNULinux/canaimagnulinux.wizard
|
canaimagnulinux/wizard/browser/socialnetwork.py
|
Python
|
gpl-2.0
| 3,378 | 0.000296 |
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"realpath"]
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/macpath.py
|
Python
|
gpl-3.0
| 6,978 | 0.005589 |
from datetime import datetime
def foo(p):
"""Foo
:param datetime p: a datetime
<ref>
"""
|
asedunov/intellij-community
|
python/testData/resolve/ReferenceInDocstring.py
|
Python
|
apache-2.0
| 114 | 0.017544 |
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# History:
# 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid)
# 1.0.7: added support for "Apply" button
# 1.0.6: code cleaning
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '1.0.10'
__license__ = __doc__
DEBUG = False
import copy
import datetime
import warnings
import six
from matplotlib import colors as mcolors
from matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore
BLACKLIST = set(["title", "label"])
class ColorButton(QtWidgets.QPushButton):
"""
Color choosing push button
"""
colorChanged = QtCore.Signal(QtGui.QColor)
def __init__(self, parent=None):
QtWidgets.QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QtCore.QSize(12, 12))
self.clicked.connect(self.choose_color)
self._color = QtGui.QColor()
def choose_color(self):
color = QtWidgets.QColorDialog.getColor(
self._color, self.parentWidget(), "",
QtWidgets.QColorDialog.ShowAlphaChannel)
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@QtCore.Slot(QtGui.QColor)
def set_color(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit(self._color)
pixmap = QtGui.QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QtGui.QIcon(pixmap))
color = QtCore.Property(QtGui.QColor, get_color, set_color)
def to_qcolor(color):
"""Create a QColor from a matplotlib color"""
qcolor = QtGui.QColor()
try:
rgba = mcolors.to_rgba(color)
except ValueError:
warnings.warn('Ignoring invalid color %r' % color)
return qcolor # return invalid QColor
qcolor.setRgbF(*rgba)
return qcolor
class ColorLayout(QtWidgets.QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QtWidgets.QHBoxLayout.__init__(self)
assert isinstance(color, QtGui.QColor)
self.lineedit = QtWidgets.QLineEdit(
mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)
self.lineedit.editingFinished.connect(self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
self.colorbtn.colorChanged.connect(self.update_text)
self.addWidget(self.colorbtn)
def update_color(self):
color = self.text()
qcolor = to_qcolor(color)
self.colorbtn.color = qcolor # defaults to black if not qcolor.isValid()
def update_text(self, color):
self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))
def text(self):
return self.lineedit.text()
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QtGui.QFontDatabase().families()
if six.text_type(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not (isinstance(tup, tuple) and len(tup) == 4
and font_is_installed(tup[0])
and isinstance(tup[1], int)
and isinstance(tup[2], bool)
and isinstance(tup[3], bool)):
return None
font = QtGui.QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (six.text_type(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QtWidgets.QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QtWidgets.QGridLayout.__init__(self)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QtWidgets.QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QtWidgets.QComboBox(parent)
self.size.setEditable(True)
sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QtWidgets.QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QtWidgets.QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def is_edit_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QtGui.QDoubleValidator.Acceptable
class FormWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, data, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.data = copy.deepcopy(data)
self.widgets = []
self.formlayout = QtWidgets.QFormLayout(self)
if comment:
self.formlayout.addRow(QtWidgets.QLabel(comment))
self.formlayout.addRow(QtWidgets.QLabel(" "))
if DEBUG:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QtWidgets.QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
# self.formlayout.setFieldGrowthPolicy(1)
for label, value in self.data:
if DEBUG:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
self.formlayout.addRow(QtWidgets.QLabel(" "), QtWidgets.QLabel(" "))
self.widgets.append(None)
continue
elif label is None:
# Comment
self.formlayout.addRow(QtWidgets.QLabel(value))
self.widgets.append(None)
continue
elif tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
elif (label.lower() not in BLACKLIST
and mcolors.is_color_like(value)):
field = ColorLayout(to_qcolor(value), self)
elif isinstance(value, six.string_types):
field = QtWidgets.QLineEdit(value, self)
field.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum))
elif isinstance(value, (list, tuple)):
if isinstance(value, tuple):
value = list(value)
selindex = value.pop(0)
field = QtWidgets.QComboBox(self)
if isinstance(value[0], (list, tuple)):
keys = [key for key, _val in value]
value = [val for _key, val in value]
else:
keys = value
field.addItems(value)
if selindex in value:
selindex = value.index(selindex)
elif selindex in keys:
selindex = keys.index(selindex)
elif not isinstance(selindex, int):
warnings.warn(
"index '%s' is invalid (label: %s, value: %s)" %
(selindex, label, value))
selindex = 0
field.setCurrentIndex(selindex)
elif isinstance(value, bool):
field = QtWidgets.QCheckBox(self)
if value:
field.setCheckState(QtCore.Qt.Checked)
else:
field.setCheckState(QtCore.Qt.Unchecked)
elif isinstance(value, float):
field = QtWidgets.QLineEdit(repr(value), self)
field.setCursorPosition(0)
field.setValidator(QtGui.QDoubleValidator(field))
field.validator().setLocale(QtCore.QLocale("C"))
dialog = self.get_dialog()
dialog.register_float_field(field)
field.textChanged.connect(lambda text: dialog.update_buttons())
elif isinstance(value, int):
field = QtWidgets.QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QtWidgets.QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QtWidgets.QDateEdit(self)
field.setDate(value)
else:
field = QtWidgets.QLineEdit(repr(value), self)
self.formlayout.addRow(label, field)
# print(self.formlayout.fieldGrowthPolicy())
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif (isinstance(value, six.string_types)
or mcolors.is_color_like(value)):
value = six.text_type(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], (list, tuple)):
value = value[index][0]
else:
value = value[index]
elif isinstance(value, bool):
value = field.checkState() == QtCore.Qt.Checked
elif isinstance(value, float):
value = float(str(field.text()))
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime().toPyDateTime()
elif isinstance(value, datetime.date):
value = field.date().toPyDate()
else:
value = eval(str(field.text()))
valuelist.append(value)
return valuelist
class FormComboWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.combobox = QtWidgets.QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QtWidgets.QStackedWidget(self)
layout.addWidget(self.stackwidget)
self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex)
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormTabWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.tabwidget = QtWidgets.QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0]) == 3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormDialog(QtWidgets.QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="", icon=None, parent=None, apply=None):
QtWidgets.QDialog.__init__(self, parent)
self.apply_callback = apply
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment, parent=self)
elif len(data[0]) == 3:
self.formwidget = FormComboWidget(data, comment=comment, parent=self)
else:
self.formwidget = FormWidget(data, comment=comment, parent=self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.formwidget.update_buttons.connect(self.update_buttons)
if self.apply_callback is not None:
apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply)
apply_btn.clicked.connect(self.apply)
bbox.accepted.connect(self.accept)
bbox.rejected.connect(self.reject)
layout.addWidget(bbox)
self.setLayout(layout)
self.setWindowTitle(title)
if not isinstance(icon, QtGui.QIcon):
icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def update_buttons(self):
valid = True
for field in self.float_fields:
if not is_edit_valid(field):
valid = False
for btn_type in (QtWidgets.QDialogButtonBox.Ok,
QtWidgets.QDialogButtonBox.Apply):
btn = self.bbox.button(btn_type)
if btn is not None:
btn.setEnabled(valid)
def accept(self):
self.data = self.formwidget.get()
QtWidgets.QDialog.accept(self)
def reject(self):
self.data = None
QtWidgets.QDialog.reject(self)
def apply(self):
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
data: datalist, datagroup
title: string
comment: string
icon: QIcon instance
parent: parent QWidget
apply: apply callback (function)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
-> one field for each member of a datalist
-> one tab for each member of a top-level datagroup
-> one page (of a multipage widget, each page can be selected with a combo
box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g., if the module is used directly from the interpreter)
if QtWidgets.QApplication.startingUp():
_app = QtWidgets.QApplication([])
dialog = FormDialog(data, title, comment, icon, parent, apply)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
# def create_datalist_example():
# return [('str', 'this is a string'),
# ('list', [0, '1', '3', '4']),
# ('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
# ('-.', 'DashDot'), ('-', 'Solid'),
# ('steps', 'Steps'), (':', 'Dotted')]),
# ('float', 1.2),
# (None, 'Other:'),
# ('int', 12),
# ('font', ('Arial', 10, False, True)),
# ('color', '#123409'),
# ('bool', True),
# ('date', datetime.date(2010, 10, 10)),
# ('datetime', datetime.datetime(2010, 10, 10)),
# ]
#
# def create_datagroup_example():
# datalist = create_datalist_example()
# return ((datalist, "Category 1", "Category 1 comment"),
# (datalist, "Category 2", "Category 2 comment"),
# (datalist, "Category 3", "Category 3 comment"))
#
# #--------- datalist example
# datalist = create_datalist_example()
#
# def apply_test(data):
# print("data:", data)
# print("result:", fedit(datalist, title="Example",
# comment="This is just an <b>example</b>.",
# apply=apply_test))
# --------- datagroup example
# datagroup = create_datagroup_example()
# print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
# datalist = create_datalist_example()
# datagroup = create_datagroup_example()
# print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
# (datalist, "Title 2", "Tab 2 comment"),
# (datalist, "Title 3", "Tab 3 comment")),
# "Global title"))
# MY TEST
data = [('str', 'this is a string'),
('str', 'this is a string'),
('str', 'this is a string'),
('list', [0, '1', '3', '4']),
('list', [2, '1', '3', '4']),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, 'Other:'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('datetime', datetime.datetime(2010, 10, 10)),
]
def apply_test(a):
print(a)
fedit(data, title='henlo', comment='haahha', apply=apply_test)
|
chilleo/ALPHA
|
raxmlOutputWindows/matplotlibCustomBackend/customFormlayout.py
|
Python
|
mit
| 20,667 | 0.000919 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from PIL import Image
def graf2png(weburl, username, password, timeout, imgname, hwin, wwin, onlypanel):
driver = webdriver.PhantomJS()
driver.set_window_size(hwin, wwin)
driver.get(weburl)
# Introducimos username
in_user = driver.find_element_by_name('username')
in_user.clear()
in_user.send_keys(username)
# Introducimos password
in_pass = driver.find_element_by_id('inputPassword')
in_pass.clear()
in_pass.send_keys(password)
in_pass.send_keys(Keys.ENTER)
# Espera a que cargue la consulta
time.sleep(timeout)
# Timestamp para evitar sobreescribir capturas
currtime = time.strftime("%y%m%d%H%M%S", time.localtime())
imgname = imgname + currtime + '.png'
# Realizar screenshot
driver.save_screenshot(imgname)
print("Screen guardada como: " + imgname)
# Recortar panel(?)
# Solo funciona con los paneles cuya clase sea 'panel-fullscreen',
# esta es la clase que tiene por defecto los paneles cuando
# generas un enlace para compartir. (Share Panel > Link > Copy)
if (onlypanel):
panel = driver.find_element_by_class_name('panel-fullscreen')
plocation = panel.location
psize = panel.size
left = plocation['x']
top = plocation['y']
right = plocation['x'] + psize['width']
bottom = plocation['y'] + psize['height']
pimg = Image.open(imgname)
pimg = pimg.crop((left, top, right, bottom))
pimgname = 'panel_' + imgname
pimg.save(pimgname)
print("Panel recortado guardado como: " + pimgname)
|
andoniaf/DefGrafana.py
|
graf2png.py
|
Python
|
gpl-3.0
| 1,737 | 0.000576 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# ---- Standard library imports
import os
import csv
import sys
# ---- Third party imports
import numpy as np
from xlrd import xldate_as_tuple
# ---- Local imports
from gwhat.brf_mod import __install_dir__
def produce_BRFInputtxt(well, time, wl, bp, et):
comment = 'No comment men'
wlu = 'feet'
bpu = 'feet'
etu = 'NONE'
sampleinterval = time[1]-time[0]
timeunits = 'days'
N = len(time)
yr, mth, day, hr, mn, sec = xldate_as_tuple(time[0], 0)
dst = '%02d/%02d/%d, %02d:%02d:%02d' % (yr, mth, day, hr, mn, sec)
yr, mth, day, hr, mn, sec = xldate_as_tuple(time[-1], 0)
det = '%02d/%02d/%d, %02d:%02d:%02d' % (yr, mth, day, hr, mn, sec)
fcontent = []
fcontent.append(['Comment: %s' % comment])
fcontent.append(['Well: %s' % well])
fcontent.append(['WL Units: %s' % wlu])
fcontent.append(['BP Units: %s' % bpu])
fcontent.append(['ET Units: %s' % etu])
fcontent.append(['Sample Interval: %f' % sampleinterval])
fcontent.append(['Time Units: %s' % timeunits])
fcontent.append(['Data Start Time: %s' % dst])
fcontent.append(['Data End Time: %s' % det])
fcontent.append(['Number of Data: %d' % N])
fcontent.append(['Time WL BP ET'])
# Add the data to the file content.
wl = (100 - wl) * 3.28084
bp = bp * 3.28084
t = time - time[0]
fcontent.extend([[time[i], wl[i], bp[i], et[i]] for i in range(N)])
filename = os.path.join(__install_dir__, 'BRFInput.txt')
with open(filename, 'w', encoding='utf8') as f:
writer = writer = csv.writer(f, delimiter='\t', lineterminator='\n')
writer.writerows(fcontent)
def produce_par_file(lagBP, lagET, detrend_waterlevels=True,
correct_waterlevels=True):
"""
Create the parameter file requires by the KGS_BRF program.
"""
brfinput = os.path.join(__install_dir__, 'BRFInput.txt')
brfoutput = os.path.join(__install_dir__, 'BRFOutput.txt')
wlcinput = os.path.join(__install_dir__, 'WLCInput.txt')
wlcoutput = os.path.join(__install_dir__, 'WLCOutput.txt')
detrend = 'Yes' if detrend_waterlevels else 'No'
correct = 'Yes' if correct_waterlevels else 'No'
par = []
par.append(['BRF Option (C[ompute] or R[ead]): Compute'])
par.append(['BRF Input Data File: %s' % brfinput])
par.append(['Number of BP Lags: %d' % lagBP])
par.append(['Number of BP ET: %d' % lagET])
par.append(['BRF Output Data File: %s' % brfoutput])
par.append(['Detrend data? (Y[es] or N[o]): %s' % detrend])
par.append(['Correct WL? (Y[es] or N[o]): %s' % correct])
par.append(['WLC Input Data File: %s' % wlcinput])
par.append(['WLC Output Data File: %s' % wlcoutput])
filename = os.path.join(__install_dir__, 'kgs_brf.par')
with open(filename, 'w', encoding='utf8') as f:
writer = csv.writer(f, delimiter='\t', lineterminator='\n')
writer.writerows(par)
def run_kgsbrf():
exename = os.path.join(__install_dir__, 'kgs_brf.exe')
parname = os.path.join(__install_dir__, 'kgs_brf.par')
if os.path.exists(exename) and os.path.exists(parname):
if os.name == 'nt':
os.system('""%s" < "%s""' % (exename, parname))
def read_brf_output():
"""
Read the barometric response function from the output file produced
by kgs_brf.exe.
"""
filename = os.path.join(__install_dir__, 'BRFOutput.txt')
with open(filename, 'r') as f:
reader = list(csv.reader(f))
header = []
for row in reader:
header.append(row)
if 'LagNo Lag A sdA SumA sdSumA B sdB SumB sdSumB' in row[0]:
break
# well = header[2][0].split()[-1]
# date0 = header[8][0].split()[-1]
# date1 = header[9][0].split()[-1]
data = reader[len(header):]
dataf = []
count = 1
for row in data:
if count == 1:
dataf.append([float(i) for i in row[0].split()])
count += 1
elif count in [2, 3]:
dataf[-1].extend([float(i) for i in row[0].split()])
count += 1
elif count == 4:
dataf[-1].extend([float(i) for i in row[0].split()])
count = 1
# Remove non valid data.
dataf = [row for row in dataf if row[4] > -999]
# Format data into numpy arrays
dataf = np.array(dataf)
lag = dataf[:, 1]
A = dataf[:, 4]
err = dataf[:, 5]
return lag, A, err
if __name__ == "__main__":
# plt.close('all')
# produce_par_file()
run_kgsbrf()
load_BRFOutput(show_ebar=True, msize=5, draw_line=False)
# plt.show()
|
jnsebgosselin/WHAT
|
gwhat/brf_mod/kgs_brf.py
|
Python
|
gpl-3.0
| 4,831 | 0.000414 |
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tasks around VIOS-backed 'physical' fibre channel disks."""
import itertools
from lxml import etree
from oslo_log import log as logging
from pypowervm import const as c
import pypowervm.entities as ent
import pypowervm.exceptions as pexc
from pypowervm.i18n import _
import pypowervm.tasks.storage as tsk_stg
import pypowervm.utils.transaction as tx
from pypowervm.wrappers import job as pvm_job
from pypowervm.wrappers import virtual_io_server as pvm_vios
LOG = logging.getLogger(__name__)
_LUA_CMD_VERSION = '3'
_LUA_VERSION = '2.0'
_LUA_RECOVERY = 'LUARecovery'
_RM_HDISK = 'RemoveDevice'
_MGT_CONSOLE = 'ManagementConsole'
class LUAType(object):
"""LUA Vendors."""
IBM = "IBM"
EMC = "EMC"
NETAPP = "NETAPP"
HDS = "HDS"
HP = "HP"
OTHER = "OTHER"
class LUAStatus(object):
"""LUA Recovery status codes."""
DEVICE_IN_USE = '1'
ITL_NOT_RELIABLE = '2'
DEVICE_AVAILABLE = '3'
STORAGE_NOT_INTEREST = '4'
LUA_NOT_INTEREST = '5'
INCORRECT_ITL = '6'
FOUND_DEVICE_UNKNOWN_UDID = '7'
FOUND_ITL_ERR = '8'
def normalize_lun(scsi_id):
"""Normalize the lun id to Big Endian
:param scsi_id: Volume lun id
:return: Converted LUN id in Big Endian as per the RFC 4455
"""
# PowerVM keeps LUN identifiers in hex format.
lun = '%x' % int(scsi_id)
# For drivers which support complex LUA lun-id exceeding more than 2
# bytes in such cases we need to append 8 zeros else 12 zeros to
# pass 8 byte lun-id
if len(lun) == 8:
lun += "00000000"
else:
lun += "000000000000"
return lun
class ITL(object):
"""The Nexus ITL.
See SCSI ITL. This is the grouping of the SCSI initiator, target and
LUN.
"""
def __init__(self, initiator, target, lun):
"""Create the ITL.
:param initiator: The initiator WWPN.
:param target: The target WWPN.
:param lun: The LUN identifier. Ex. 2 (an int). The identifier will
be formatted from a generic integer LUN ID to match
PowerVM's LUN Identifier format.
"""
self.initiator = initiator.lower().replace(':', '')
self.target = target.lower().replace(':', '')
self.lun = normalize_lun(lun)
def __eq__(self, other):
if other is None or not isinstance(other, ITL):
return False
return (self.initiator == other.initiator and
self.target == other.target and
self.lun == other.lun)
def __hash__(self):
return hash(self.initiator) ^ hash(self.target) ^ hash(self.lun)
def __ne__(self, other):
return not self.__eq__(other)
def good_discovery(status, device_name):
"""Checks the hdisk discovery results for a good discovery.
Acceptable LUA discovery statuses are :-
DEVICE_AVAILABLE: hdisk discovered on all the ITL paths and available.
DEVICE_IN_USE: hdisk discovered on all the ITL paths and is in-use by
the server.
FOUND_ITL_ERR: hdisk is discovered on some of the ITL paths and available.
This can happen if there are multiple ITL nexus paths are passed, and
hdisk is discovered on few of the paths only. This can happen if multiple
target wwpns and vios wwpns exists and only few are connected. If hdisk
can be discovered on ANY of the paths its considered for good discovery.
"""
return device_name is not None and status in [
LUAStatus.DEVICE_AVAILABLE, LUAStatus.DEVICE_IN_USE,
LUAStatus.FOUND_ITL_ERR]
def build_itls(i_wwpns, t_wwpns, lun):
"""This method builds the list of ITLs for all of the permutations.
An ITL is specific to an initiator, target, and LUN. However, with multi
pathing, there are several scenarios where a given LUN will have many ITLs
because of multiple initiators or targets.
The initiators should be tied to a given Virtual I/O Server (or perhaps
specific WWPNs within a VIOS).
:param i_wwpns: List or set of initiator WWPNs.
:param t_wwpns: List or set of target WWPNs.
:param lun: The LUN identifier. Ex. 2 (an int). The identifier will be
formatted from a generic integer LUN ID to match PowerVM's
LUN Identifier format.
:return: List of all the ITL permutations.
"""
return [ITL(i, t, lun) for i, t in itertools.product(i_wwpns, t_wwpns)]
def discover_hdisk(adapter, vios_uuid, itls, vendor=LUAType.OTHER,
device_id=None):
"""Attempt to discover a hard disk attached to a Virtual I/O Server.
See lua_recovery. This method attempts that call and analyzes the
results. On certain failure conditions (see below), this method will find
stale LPARs, scrub storage artifacts associated with them, and then retry
lua_recovery. The retry is only attempted once; that result is returned
regardless.
The main objective of this method is to resolve errors resulting from
incomplete cleanup of previous LPARs. The stale LPAR's storage mappings
can cause hdisk discovery to fail because it thinks the hdisk is already in
use.
Retry conditions: The scrub-and-retry will be triggered if:
o dev_name is None; or
o status is anything other than DEVICE_AVAILABLE or FOUND_ITL_ERR. (The
latter is acceptable because it means we discovered some, but not all, of
the ITLs. This is okay as long as dev_name is set.)
:param adapter: The pypowervm adapter.
:param vios_uuid: The Virtual I/O Server UUID.
:param itls: A list of ITL objects.
:param vendor: The vendor for the LUN. See the LUAType.* constants.
:param device_id: The device ID parameter in the LUA input XML.
Typically the base 64 encoded pg83 value.
:return status: The status code from the discover process.
See LUAStatus.* constants.
:return dev_name: The name of the discovered hdisk.
:return udid: The UDID of the device.
"""
# First attempt
status, devname, udid = lua_recovery(adapter, vios_uuid, itls,
vendor=vendor, device_id=device_id)
# Do we need to scrub and retry?
if not good_discovery(status, devname):
vwrap = pvm_vios.VIOS.get(adapter, uuid=vios_uuid,
xag=(c.XAG.VIO_SMAP, c.XAG.VIO_FMAP))
scrub_ids = tsk_stg.find_stale_lpars(vwrap)
if scrub_ids:
# Detailed warning message by _log_lua_status
LOG.warning(_("hdisk discovery failed; will scrub stale storage "
"for LPAR IDs %s and retry."), scrub_ids)
# Scrub from just the VIOS in question.
scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
scrub_task.execute()
status, devname, udid = lua_recovery(adapter, vios_uuid, itls,
vendor=vendor,
device_id=device_id)
return status, devname, udid
def lua_recovery(adapter, vios_uuid, itls, vendor=LUAType.OTHER,
device_id=None):
"""Logical Unit Address Recovery - discovery of a FC-attached hdisk.
When a new disk is created externally (say on a block device), the Virtual
I/O Server may or may not discover it immediately. This method forces a
discovery on a given Virtual I/O Server.
:param adapter: The pypowervm adapter.
:param vios_uuid: The Virtual I/O Server UUID.
:param itls: A list of ITL objects.
:param vendor: The vendor for the LUN. See the LUAType.* constants.
:param device_id: The device ID parameter in the LUA input XML.
Typically the base 64 encoded pg83 value.
:return status: The status code from the discover process.
See LUAStatus.* constants.
:return dev_name: The name of the discovered hdisk.
:return udid: The UDID of the device.
"""
# Reduce the ITLs to ensure no duplicates
itls = set(itls)
# Build the LUA recovery XML
lua_xml = _lua_recovery_xml(itls, adapter, vendor=vendor,
device_id=device_id)
# Build up the job & invoke
resp = adapter.read(
pvm_vios.VIOS.schema_type, root_id=vios_uuid,
suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_LUA_RECOVERY)
job_wrapper = pvm_job.Job.wrap(resp)
job_parms = [job_wrapper.create_job_parameter('inputXML', lua_xml,
cdata=True)]
job_wrapper.run_job(vios_uuid, job_parms=job_parms)
# Get the job result, and parse the output.
job_result = job_wrapper.get_job_results_as_dict()
status, devname, udid = _process_lua_result(job_result)
return status, devname, udid
def _lua_recovery_xml(itls, adapter, vendor=LUAType.OTHER, device_id=None):
"""Builds the XML that is used as input for the lua_recovery job.
The lua_recovery provides a very quick way for the system to discover
an hdisk on the system. This method builds the input into the lua_recovery
job.
:param itls: The list of ITL objects that define the various connections
between the server port (initiator), disk port (target) and
disk itself.
:param vendor: The LUA vendor. See the LUAType.* Constants.
:param device_id: The device ID parameter in the LUA input XML.
Typically the base 64 encoded pg83 value.
:return: The CDATA XML that is used for the lua_recovery job.
"""
# Used for building the internal XML.
root = ent.Element("XML_LIST", adapter, ns='')
# The general attributes
# TODO(IBM) Need to determine value of making these constants modifiable
general = ent.Element("general", adapter, ns='')
general.append(ent.Element("cmd_version", adapter, text=_LUA_CMD_VERSION,
ns=''))
general.append(ent.Element("version", adapter, text=_LUA_VERSION, ns=''))
root.append(general)
# TODO(IBM) This can be re-evaluated. Set to true if you know for sure
# the ITLs are alive. If there are any bad ITLs, this should be false.
root.append(ent.Element("reliableITL", adapter, text="false", ns=''))
# There is only one device in the device list.
device_list = ent.Element("deviceList", adapter, ns='')
device = ent.Element("device", adapter, ns='')
device.append(ent.Element("vendor", adapter, text=vendor, ns=''))
if device_id:
device.append(ent.Element("deviceID", adapter, text=device_id, ns=''))
device.append(ent.Element("deviceTag", adapter, text="1", ns=''))
itl_list = ent.Element("itlList", adapter, ns='')
itl_list.append(ent.Element("number", adapter, text="%d" % (len(itls)),
ns=''))
for itl in itls:
itl_elem = ent.Element("itl", adapter, ns='')
itl_elem.append(ent.Element("Iwwpn", adapter, text=itl.initiator,
ns=''))
itl_elem.append(ent.Element("Twwpn", adapter, text=itl.target, ns=''))
itl_elem.append(ent.Element("lua", adapter, text=itl.lun, ns=''))
itl_list.append(itl_elem)
device.append(itl_list)
device_list.append(device)
root.append(device_list)
return root.toxmlstring().decode('utf-8')
def _process_lua_result(result):
"""Processes the Output XML returned by LUARecovery.
:return status: The status code from the discover process.
See LUAStatus.* constants.
:return dev_name: The name of the discovered hdisk.
:return udid: The UDID of the device.
"""
if result is None:
return None, None, None
# The result may push to StdOut or to OutputXML (different versions push
# to different locations).
xml_resp = result.get('OutputXML')
if xml_resp is None:
xml_resp = result.get('StdOut')
# If still none, nothing to do.
if xml_resp is None:
return None, None, None
# The response is an XML block. Put into an XML structure and get
# the data out of it.
root = etree.fromstring(xml_resp)
base = 'deviceList/device/'
estatus, edev_name, eudid, emessage = (
root.find(base + x)
for x in ('status', 'pvName', 'udid', 'msg/msgText'))
status, dev_name, udid, message = (
y.text if y is not None else None
for y in (estatus, edev_name, eudid, emessage))
_log_lua_status(status, dev_name, message)
return status, dev_name, udid
def _log_lua_status(status, dev_name, message):
"""Logs any issues with the LUA."""
if status == LUAStatus.DEVICE_AVAILABLE:
LOG.info(_("LUA Recovery Successful. Device Found: %s"),
dev_name)
elif status == LUAStatus.FOUND_ITL_ERR:
# Message is already set.
LOG.warning(_("ITL Error encountered: %s"), message)
elif status == LUAStatus.DEVICE_IN_USE:
LOG.warning(_("%s Device is currently in use."), dev_name)
elif status == LUAStatus.FOUND_DEVICE_UNKNOWN_UDID:
LOG.warning(_("%s Device discovered with unknown UDID."), dev_name)
elif status == LUAStatus.INCORRECT_ITL:
LOG.warning(_("Failed to Discover the Device : %s"), dev_name)
def remove_hdisk(adapter, host_name, dev_name, vios_uuid):
"""Command to remove the device from the VIOS.
:param adapter: The pypowervm adapter.
:param host_name: The name of the host.
:param dev_name: The name of the device to remove.
:param vios_uuid: The Virtual I/O Server UUID.
"""
if adapter.traits.rmdev_job_available:
_remove_hdisk_job(adapter, dev_name, vios_uuid)
else:
_remove_hdisk_classic(adapter, host_name, dev_name, vios_uuid)
def _remove_hdisk_job(adapter, dev_name, vios_uuid):
"""Runs the PowerVM Job to remove a hdisk.
:param adapter: The pypowervm adapter.
:param dev_name: The name of the device to remove.
:param vios_uuid: The Virtual I/O Server UUID.
"""
# Build up the job & invoke
resp = adapter.read(
pvm_vios.VIOS.schema_type, root_id=vios_uuid,
suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_RM_HDISK)
job_wrapper = pvm_job.Job.wrap(resp)
job_parms = [job_wrapper.create_job_parameter('devName', dev_name)]
# Run the job. If the hdisk removal failed, the job will raise an
# exception. No output otherwise.
job_wrapper.run_job(vios_uuid, job_parms=job_parms)
def _remove_hdisk_classic(adapter, host_name, dev_name, vios_uuid):
"""Command to remove the device from the VIOS.
Runs a remote command to perform the action.
:param adapter: The pypowervm adapter.
:param host_name: The name of the host.
:param dev_name: The name of the device to remove.
:param vios_uuid: The Virtual I/O Server UUID.
"""
try:
# Execute a read on the vios to get the vios name
resp = adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid)
vios_w = pvm_vios.VIOS.wrap(resp)
# build command
rm_cmd = ('viosvrcmd -m ' + host_name + ' -p ' + vios_w.name +
' -c \"rmdev -dev ' + dev_name + '\"')
LOG.debug('RMDEV Command Input: %s' % rm_cmd)
# Get the response for the CLIRunner command
resp = adapter.read(_MGT_CONSOLE, None,
suffix_type=c.SUFFIX_TYPE_DO,
suffix_parm='CLIRunner')
# Create the job parameters
job_wrapper = pvm_job.Job.wrap(resp)
ack_parm = 'acknowledgeThisAPIMayGoAwayInTheFuture'
job_parms = [job_wrapper.create_job_parameter('cmd', rm_cmd),
job_wrapper.create_job_parameter(ack_parm,
'true')]
job_wrapper.run_job(None, job_parms=job_parms)
return job_wrapper.job_status()
except pexc.JobRequestFailed as error:
LOG.warning(_('CLIRunner Error: %s') % error)
def get_pg83_via_job(adapter, vios_uuid, udid):
"""Inventory call to fetch the encoded SCSI Page 0x83 descriptor for a PV.
:param adapter: The pypowervm adapter through which to run the Job.
:param vios_uuid: The UUID of the Virtual I/O Server owning the PV.
:param udid: The UDID of the PV to query.
:return: SCSI PG83 NAA descriptor, base64-encoded. May be None.
"""
# TODO(efried): Remove this method once VIOS supports pg83 in Events
# Build the hdisk inventory input XML
lua_xml = ('<uom:VIO xmlns:uom="http://www.ibm.com/xmlns/systems/power/fir'
'mware/uom/mc/2012_10/" version="1.21" xmlns=""><uom:Request ac'
'tion_str="QUERY_INVENTORY"><uom:InventoryRequest inventoryType'
'="base"><uom:VioTypeFilter type="PV"/><uom:VioUdidFilter udid='
'"%s"/></uom:InventoryRequest></uom:Request></uom:VIO>' % udid)
# Build up the job & invoke
job_wrapper = pvm_job.Job.wrap(adapter.read(
pvm_vios.VIOS.schema_type, root_id=vios_uuid,
suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_LUA_RECOVERY))
job_wrapper.run_job(vios_uuid, job_parms=[
job_wrapper.create_job_parameter('inputXML', lua_xml, cdata=True)])
# Get the job result, and parse the output.
result = job_wrapper.get_job_results_as_dict()
# The result may push to StdOut or to OutputXML (different versions push
# to different locations).
if not result or not any((k in result for k in ('OutputXML', 'StdOut'))):
LOG.warning(_('QUERY_INVENTORY LUARecovery Job succeeded, but result '
'contained neither OutputXML nor StdOut.'))
return None
xml_resp = result.get('OutputXML', result.get('StdOut'))
LOG.debug('QUERY_INVENTORY result: %s' % xml_resp)
return _parse_pg83_xml(xml_resp)
def _parse_pg83_xml(xml_resp):
"""Parse LUARecovery XML response, looking for pg83 descriptor.
:param xml_resp: Tuple containing OutputXML and StdOut results of the
LUARecovery Job
:return: pg83 descriptor text, or None if not found.
"""
# QUERY_INVENTORY response may contain more than one element. Each will be
# delimited by its own <?xml?> tag. etree will only parse one at a time.
for chunk in xml_resp.split('<?xml version="1.0"?>'):
if not chunk:
continue
try:
parsed = etree.fromstring(chunk)
except etree.XMLSyntaxError as e:
LOG.warning(_('QUERY_INVENTORY produced invalid chunk of XML '
'(%(chunk)s). Error: %(err)s'),
{'chunk': chunk, 'err': e.args[0]})
continue
for elem in parsed.getiterator():
if (etree.QName(elem.tag).localname == 'PhysicalVolume_base' and
elem.attrib.get('desType') == "NAA"):
return elem.attrib.get('descriptor')
LOG.warning(_('Failed to find pg83 descriptor in XML output:\n%s'),
xml_resp)
return None
|
powervm/pypowervm
|
pypowervm/tasks/hdisk/_fc.py
|
Python
|
apache-2.0
| 19,741 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import local
from cinder.openstack.common import log as logging
from cinder.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green
thread.
"""
def __init__(self, conf, callback, connection_pool):
"""
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag"""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana.
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause
# in Havana.
if not conf.amqp_rpc_single_reply_queue:
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
maelnor/cinder
|
cinder/openstack/common/rpc/amqp.py
|
Python
|
apache-2.0
| 25,306 | 0.00004 |
# ----------------------------------------------------------------------
# Copyright (C) 2012 Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
chetan51/nupic
|
tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py
|
Python
|
gpl-3.0
| 14,616 | 0.003626 |
# -*- coding: utf-8 -*-
from ihm.main_window import launch
if __name__ == '__main__':
launch()
|
ThibaultGigant/Crosswords
|
run.py
|
Python
|
gpl-3.0
| 101 | 0 |
"""
This component provides HA cover support for Abode Security System.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.abode/
"""
import logging
from homeassistant.components.abode import AbodeDevice, DOMAIN as ABODE_DOMAIN
from homeassistant.components.cover import CoverDevice
DEPENDENCIES = ['abode']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Abode cover devices."""
import abodepy.helpers.constants as CONST
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_COVER):
if data.is_excluded(device):
continue
devices.append(AbodeCover(data, device))
data.devices.extend(devices)
add_devices(devices)
class AbodeCover(AbodeDevice, CoverDevice):
"""Representation of an Abode cover."""
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return not self._device.is_open
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self._device.close_cover()
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self._device.open_cover()
|
ewandor/home-assistant
|
homeassistant/components/cover/abode.py
|
Python
|
apache-2.0
| 1,326 | 0 |
import os
import re
import shutil
import zipfile
import requests
import json
from shutil import copy2
from urllib.request import urlretrieve, urlopen
# Input parameters
version_param = os.environ.get('RELEASE_VERSION')
is_latest_param = True if version_param == "master" else False
# build constants
m2repo_path = '/m2repo'
tmp_path = './tmp/%s' % version_param
policies_path = "%s/policies" % tmp_path
resources_path = "%s/resources" % tmp_path
fetchers_path = "%s/fetchers" % tmp_path
services_path = "%s/services" % tmp_path
reporters_path = "%s/reporters" % tmp_path
repositories_path = "%s/repositories" % tmp_path
connectors_path = "%s/connectors" % tmp_path
snapshotPattern = re.compile('.*-SNAPSHOT')
def clean():
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
os.makedirs(tmp_path, exist_ok=True)
os.makedirs(policies_path, exist_ok=True)
os.makedirs(fetchers_path, exist_ok=True)
os.makedirs(resources_path, exist_ok=True)
os.makedirs(services_path, exist_ok=True)
os.makedirs(reporters_path, exist_ok=True)
os.makedirs(repositories_path, exist_ok=True)
os.makedirs(connectors_path, exist_ok=True)
def get_policies(release_json):
components = release_json['components']
search_pattern = re.compile('gravitee-policy-.*')
policies = []
for component in components:
if search_pattern.match(component['name']) and 'gravitee-policy-api' != component['name']:
policies.append(component)
if "gravitee-policy-ratelimit" == component['name']:
policies.append({"name": "gravitee-policy-quota", "version": component['version']})
if int(component['version'].replace(".", "").replace("-SNAPSHOT", "")) >= 1100:
policies.append({"name": "gravitee-policy-spikearrest", "version": component['version']})
return policies
def get_resources(release_json):
components_name = [
"gravitee-resource-cache",
"gravitee-resource-oauth2-provider-generic",
"gravitee-resource-oauth2-provider-am"
]
resources = []
for component_name in components_name:
resources.append(get_component_by_name(release_json, component_name))
return resources
def get_fetchers(release_json):
components = release_json['components']
search_pattern = re.compile('gravitee-fetcher-.*')
fetchers = []
for component in components:
if search_pattern.match(component['name']) and 'gravitee-fetcher-api' != component['name']:
fetchers.append(component)
return fetchers
def get_reporters(release_json):
components_name = [
"gravitee-reporter-file",
"gravitee-reporter-tcp",
"gravitee-elasticsearch"
]
reporters = []
for component_name in components_name:
reporters.append(get_component_by_name(release_json, component_name))
return reporters
def get_repositories(release_json):
components_name = [
"gravitee-repository-mongodb",
"gravitee-repository-jdbc",
"gravitee-elasticsearch",
"gravitee-repository-gateway-bridge-http"
]
repositories = []
for component_name in components_name:
repositories.append(get_component_by_name(release_json, component_name))
return repositories
def get_services(release_json):
components_name = [
"gravitee-service-discovery-consul"
]
components = release_json['components']
search_pattern = re.compile('gravitee-policy-ratelimit')
services = []
for component in components:
if search_pattern.match(component['name']):
service = component.copy()
service['name'] = 'gravitee-gateway-services-ratelimit'
services.append(service)
break
for component_name in components_name:
services.append(get_component_by_name(release_json, component_name))
return services
def get_connectors(release_json):
components = release_json['components']
search_pattern = re.compile('gravitee-.*-connectors-ws')
connectors = []
for component in components:
if search_pattern.match(component['name']):
connectors.append(component)
return connectors
def get_component_by_name(release_json, component_name):
components = release_json['components']
search_pattern = re.compile(component_name)
for component in components:
if search_pattern.match(component['name']):
return component
def get_download_url(group_id, artifact_id, version, t):
m2path = "%s/%s/%s/%s/%s-%s.%s" % (m2repo_path, group_id.replace(".", "/"), artifact_id, version, artifact_id, version, t)
if os.path.exists(m2path):
return m2path
else:
sonatypeUrl = "https://oss.sonatype.org/service/local/artifact/maven/redirect?r=%s&g=%s&a=%s&v=%s&e=%s" % (
("snapshots" if snapshotPattern.match(version) else "releases"), group_id.replace(".", "/"), artifact_id, version, t)
f = urlopen(sonatypeUrl)
return f.geturl()
def get_suffix_path_by_name(name):
if name.find("policy") == -1:
suffix = name[name.find('-') + 1:name.find('-', name.find('-') + 1)]
if suffix == "gateway":
return "services"
if suffix == "repository":
return "repositories"
if suffix == "cockpit":
return "connectors"
return suffix + "s"
else:
return "policies"
def download(name, filename_path, url):
print('\nDowloading %s\n%s' % (name, url))
if url.startswith("http"):
filename_path = tmp_path + "/" + get_suffix_path_by_name(name) + url[url.rfind('/'):]
urlretrieve(url, filename_path)
else:
copy2(url, filename_path)
print('\nDowloaded in %s' % filename_path)
return filename_path
def unzip(files):
unzip_dirs = []
dist_dir = get_dist_dir_name()
for file in files:
with zipfile.ZipFile(file) as zip_file:
zip_file.extractall("%s/%s" % (tmp_path, dist_dir))
unzip_dir = "%s/%s/%s" % (tmp_path, dist_dir, sorted(zip_file.namelist())[0])
unzip_dirs.append(unzip_dir)
preserve_permissions(unzip_dir)
return sorted(unzip_dirs)
def preserve_permissions(d):
search_bin_pattern = re.compile(".*/bin$")
search_gravitee_pattern = re.compile("gravitee(\.bat)?")
perm = 0o0755
for dirname, subdirs, files in os.walk(d):
if search_bin_pattern.match(dirname):
for file in files:
if search_gravitee_pattern.match(file):
file_path = "%s/%s" % (dirname, file)
print(" set permission %o to %s" % (perm, file_path))
os.chmod(file_path, perm)
def copy_files_into(src_dir, dest_dir, exclude_pattern=None):
if exclude_pattern is None:
exclude_pattern = []
filenames = [os.path.join(src_dir, fn) for fn in next(os.walk(src_dir))[2]]
print(" copy")
print(" %s" % filenames)
print(" into")
print(" %s" % dest_dir)
for file in filenames:
to_exclude = False
for pattern in exclude_pattern:
search_pattern = re.compile(pattern)
if search_pattern.match(file):
to_exclude = True
break
if to_exclude:
print("[INFO] %s is excluded from files." % file)
continue
copy2(file, dest_dir)
def download_policies(policies):
paths = []
for policy in policies:
if policy['name'] != "gravitee-policy-core":
url = get_download_url("io.gravitee.policy", policy['name'], policy['version'], "zip")
paths.append(
download(policy['name'], '%s/%s-%s.zip' % (policies_path, policy['name'], policy['version']), url))
return paths
def download_management_api(mgmt_api, default_version):
v = default_version if 'version' not in mgmt_api else mgmt_api['version']
url = get_download_url("io.gravitee.management.standalone", "gravitee-management-api-standalone-distribution-zip",
v, "zip")
return download(mgmt_api['name'], '%s/%s-%s.zip' % (tmp_path, mgmt_api['name'], v), url)
def download_managementV3_api(mgmt_api, default_version):
v = default_version if 'version' not in mgmt_api else mgmt_api['version']
url = get_download_url("io.gravitee.rest.api.standalone.distribution", "gravitee-rest-api-standalone-distribution-zip",
v, "zip")
return download(mgmt_api['name'], '%s/%s-%s.zip' % (tmp_path, mgmt_api['name'], v), url)
def download_gateway(gateway, default_version):
v = default_version if 'version' not in gateway else gateway['version']
url = get_download_url("io.gravitee.gateway.standalone", "gravitee-gateway-standalone-distribution-zip",
v, "zip")
return download(gateway['name'], '%s/%s-%s.zip' % (tmp_path, gateway['name'], v), url)
def download_fetchers(fetchers):
paths = []
for fetcher in fetchers:
url = get_download_url("io.gravitee.fetcher", fetcher['name'], fetcher['version'], "zip")
paths.append(
download(fetcher['name'], '%s/%s-%s.zip' % (fetchers_path, fetcher['name'], fetcher['version']), url))
return paths
def download_resources(resources):
paths = []
for resource in resources:
url = get_download_url("io.gravitee.resource", resource['name'], resource['version'], "zip")
paths.append(
download(resource['name'], '%s/%s-%s.zip' % (resources_path, resource['name'], resource['version']), url))
return paths
def download_services(services):
paths = []
for service in services:
# for release < 1.22
if service is not None:
if service['name'] == "gravitee-gateway-services-ratelimit":
url = get_download_url("io.gravitee.policy", service['name'], service['version'], "zip")
else:
url = get_download_url("io.gravitee.discovery", service['name'], service['version'], "zip")
paths.append(
download(service['name'], '%s/%s-%s.zip' % (services_path, service['name'], service['version']), url))
return paths
def download_connectors(connectors):
paths = []
for connector in connectors:
url = get_download_url("io.gravitee.cockpit", connector['name'], connector['version'], "zip")
paths.append(
download(connector['name'], '%s/%s-%s.zip' % (resources_path, connector['name'], connector['version']), url))
return paths
def download_ui(ui, default_version):
v = default_version if 'version' not in ui else ui['version']
url = get_download_url("io.gravitee.management", ui['name'], v, "zip")
return download(ui['name'], '%s/%s-%s.zip' % (tmp_path, ui['name'], v), url)
def download_portal_ui(ui, default_version):
v = default_version if 'version' not in ui else ui['version']
url = get_download_url("io.gravitee.portal", ui['name'], v, "zip")
return download(ui['name'], '%s/%s-%s.zip' % (tmp_path, ui['name'], v), url)
def download_reporters(reporters):
paths = []
for reporter in reporters:
name = "gravitee-reporter-elasticsearch" if "gravitee-elasticsearch" == reporter['name'] else reporter['name']
url = get_download_url("io.gravitee.reporter", name, reporter['version'], "zip")
paths.append(
download(name, '%s/%s-%s.zip' % (reporters_path, name, reporter['version']), url))
return paths
def download_repositories(repositories):
paths = []
for repository in repositories:
if repository['name'] != "gravitee-repository-gateway-bridge-http":
name = "gravitee-repository-elasticsearch" if "gravitee-elasticsearch" == repository['name'] else repository['name']
url = get_download_url("io.gravitee.repository", name, repository['version'], "zip")
paths.append(download(name, '%s/%s-%s.zip' % (repositories_path, name, repository['version']), url))
else:
for name in ["gravitee-repository-gateway-bridge-http-client", "gravitee-repository-gateway-bridge-http-server"]:
url = get_download_url("io.gravitee.gateway", name, repository['version'], "zip")
paths.append(download(name, '%s/%s-%s.zip' % (repositories_path, name, repository['version']), url))
return paths
def prepare_gateway_bundle(gateway):
print("==================================")
print("Prepare %s" % gateway)
bundle_path = unzip([gateway])[0]
print(" bundle_path: %s" % bundle_path)
copy_files_into(policies_path, bundle_path + "plugins")
copy_files_into(resources_path, bundle_path + "plugins")
copy_files_into(repositories_path, bundle_path + "plugins", [".*gravitee-repository-elasticsearch.*"])
copy_files_into(reporters_path, bundle_path + "plugins")
copy_files_into(services_path, bundle_path + "plugins")
copy_files_into(connectors_path, bundle_path + "plugins")
os.makedirs(bundle_path + "plugins/ext/repository-jdbc", exist_ok=True)
def prepare_ui_bundle(ui):
print("==================================")
print("Prepare %s" % ui)
bundle_path = unzip([ui])[0]
print(" bundle_path: %s" % bundle_path)
def prepare_mgmt_bundle(mgmt):
print("==================================")
print("Prepare %s" % mgmt)
bundle_path = unzip([mgmt])[0]
print(" bundle_path: %s" % bundle_path)
copy_files_into(policies_path, bundle_path + "plugins")
copy_files_into(resources_path, bundle_path + "plugins")
copy_files_into(fetchers_path, bundle_path + "plugins")
copy_files_into(repositories_path, bundle_path + "plugins", [".*gravitee-repository-ehcache.*", ".*gravitee-repository-gateway-bridge-http-client.*", ".*gravitee-repository-gateway-bridge-http-server.*"])
copy_files_into(services_path, bundle_path + "plugins", [".*gravitee-gateway-services-ratelimit.*"])
copy_files_into(connectors_path, bundle_path + "plugins")
os.makedirs(bundle_path + "plugins/ext/repository-jdbc", exist_ok=True)
def prepare_policies(version):
print("==================================")
print("Prepare Policies")
dist_dir = get_dist_dir_name()
policies_dist_path = "%s/%s/gravitee-policies-%s" % (tmp_path, dist_dir, version)
os.makedirs(policies_dist_path, exist_ok=True)
copy_files_into(policies_path, policies_dist_path)
copy_files_into(services_path, policies_dist_path)
def package(version, release_json):
print("==================================")
print("Packaging")
packages = []
exclude_from_full_zip_list = [re.compile(".*graviteeio-policies.*")]
dist_dir = get_dist_dir_name()
full_zip_name = "graviteeio-full-%s" % version
# how to create a symbolic link ?
#if jdbc:
# full_zip_name = "graviteeio-full-jdbc-%s" % version
full_zip_path = "%s/%s/%s.zip" % (tmp_path, dist_dir, full_zip_name)
dirs = [os.path.join("%s/%s/" % (tmp_path, dist_dir), fn) for fn in next(os.walk("%s/%s/" % (tmp_path, dist_dir)))[1]]
# add release.json
jsonfile_name = "release.json"
jsonfile_absname = os.path.join("%s/%s/%s" % (tmp_path, dist_dir, jsonfile_name))
jsonfile = open(jsonfile_absname, "w")
jsonfile.write("%s" % json.dumps(release_json, indent=4))
jsonfile.close()
with zipfile.ZipFile(full_zip_path, "w", zipfile.ZIP_DEFLATED) as full_zip:
print("Create %s" % full_zip_path)
packages.append(full_zip_path)
full_zip.write(jsonfile_absname, jsonfile_name)
for d in dirs:
with zipfile.ZipFile("%s.zip" % d, "w", zipfile.ZIP_DEFLATED) as bundle_zip:
print("Create %s.zip" % d)
packages.append("%s.zip" % d)
dir_abs_path = os.path.abspath(d)
dir_name = os.path.split(dir_abs_path)[1]
for dirname, subdirs, files in os.walk(dir_abs_path):
exclude_from_full_zip = False
for pattern in exclude_from_full_zip_list:
if pattern.match(d):
exclude_from_full_zip = True
break
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(dir_abs_path) - len(dir_name):]
bundle_zip.write(absname, arcname)
if exclude_from_full_zip is False:
full_zip.write(absname, "%s/%s" % (full_zip_name, arcname))
if len(files) == 0:
absname = os.path.abspath(dirname)
arcname = absname[len(dir_abs_path) - len(dir_name):]
bundle_zip.write(absname, arcname)
if exclude_from_full_zip is False:
full_zip.write(absname, "%s/%s" % (full_zip_name, arcname))
return packages
def rename(string):
return string.replace("gravitee", "graviteeio") \
.replace("management-standalone", "management-api") \
.replace("management-webui", "management-ui") \
.replace("portal-webui", "portal-ui") \
.replace("standalone-", "")
def clean_dir_names():
print("==================================")
print("Clean directory names")
dirs = [os.path.join("%s/%s/" % (tmp_path, get_dist_dir_name()), fn) for fn in next(os.walk("%s/%s/" % (tmp_path, get_dist_dir_name())))[1]]
for d in dirs:
os.rename(d, rename(d))
def response_pretty_print(r):
print("###########################################################")
print("STATUS %s" % r.status_code)
print("HEADERS \n%s" % r.headers)
print("RESPONSE \n%s" % r.text)
print("###########################################################\n\n")
r.raise_for_status()
def get_dist_dir_name():
dist_dir = "dist"
return dist_dir
def main():
if is_latest_param:
release_json_url = "https://raw.githubusercontent.com/gravitee-io/release/master/release.json"
else:
release_json_url = "https://raw.githubusercontent.com/gravitee-io/release/%s/release.json" % version_param
print(release_json_url)
release_json = requests.get(release_json_url)
print(release_json)
release_json = release_json.json()
version = release_json['version']
print("Create bundles for Gravitee.io v%s" % version)
clean()
v3 = int(version[0]) > 1
if v3:
portal_ui = download_portal_ui(get_component_by_name(release_json, "gravitee-portal-webui"), version)
mgmt_api = download_managementV3_api(get_component_by_name(release_json, "gravitee-management-rest-api"), version)
else:
mgmt_api = download_management_api(get_component_by_name(release_json, "gravitee-management-rest-api"), version)
ui = download_ui(get_component_by_name(release_json, "gravitee-management-webui"), version)
gateway = download_gateway(get_component_by_name(release_json, "gravitee-gateway"), version)
download_policies(get_policies(release_json))
download_resources(get_resources(release_json))
download_fetchers(get_fetchers(release_json))
download_services(get_services(release_json))
download_reporters(get_reporters(release_json))
download_repositories(get_repositories(release_json))
if int(version.replace(".", "").replace("-SNAPSHOT", "")) > 354:
download_connectors(get_connectors(release_json))
if v3:
prepare_ui_bundle(portal_ui)
prepare_gateway_bundle(gateway)
prepare_ui_bundle(ui)
prepare_mgmt_bundle(mgmt_api)
prepare_policies(version)
clean_dir_names()
package(version, release_json)
main()
|
gravitee-io/jenkins-scripts
|
src/main/python/package_bundles.py
|
Python
|
apache-2.0
| 19,869 | 0.003372 |
import pandas as pd
from sqlalchemy import create_engine
from bcpp_rdb.private_settings import Rdb
class CCC(object):
"""CDC data for close clinical cohort."""
def __init__(self):
self.engine = create_engine('postgresql://{user}:{password}@{host}/{db}'.format(
user=Rdb.user, password=Rdb.password, host=Rdb.host, db=Rdb.name),
connect_args={})
with self.engine.connect() as conn, conn.begin():
self.df_enrolled = pd.read_sql_query(self.sql_enrolled, conn)
def sql_enrolled(self):
"""
* If patient is from BCPP survye, oc_study_id is a BHP identifier.
* ssid is the CDC allocated identifier of format NNN-NNNN.
"""
return """select ssid as cdcid, oc_study_id as subject_identifier,
appt_date from dw.oc_crf_ccc_enrollment"""
def sql_refused(self):
"""
* If patient is from BCPP survye, oc_study_id is a BHP identifier.
* ssid is the CDC allocated identifier of format NNN-NNNN.
"""
return """select ssid as cdcid, oc_study_id as subject_identifier,
appt_date from dw.oc_crf_ccc_enrollment"""
|
botswana-harvard/edc-rdb
|
bcpp_rdb/dataframes/ccc.py
|
Python
|
gpl-2.0
| 1,163 | 0.00086 |
class Information(object):
def __init__(self, pygame):
self.pygame = pygame
self.display_fps = False
def _show_fps(self, clock, screen):
font = self.pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("fps: {0:.2f}".format(clock.get_fps()), True, (0, 0, 0))
screen.blit(text, [0, 0])
def show_fps(self, clock, screen):
if self.display_fps:
self._show_fps(clock, screen)
def toggle_fps(self):
self.display_fps = not self.display_fps
|
evasilchenko/castle
|
core/info.py
|
Python
|
mit
| 536 | 0.001866 |
#!/usr/bin/env python
from __future__ import division
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import linalg
import csv
import codecs
import copy
def comparison(trajOne, trajTwo):
segmentOne=np.array(trajOne)
segmentTwo=np.array(trajTwo)
for i in range(2,5):
segmentOne[:,i]= segmentOne[:,i] - segmentOne[0,i]
segmentTwo[:,i]= segmentTwo[:,i] - segmentTwo[0,i]
dist=0
for i in range(min(len(trajOne), len(trajTwo))):
dist = dist + np.linalg.norm(segmentOne[i,2:]-segmentTwo[i,2:])
return dist
def plotTraj(jointTrajectory):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.tick_params(labelsize=28)
ax.set_xlabel("$\Theta_{1}$ [deg]", size=30)
ax.set_ylabel("$\Theta_{2}$ [deg]", size=30)
ax.set_zlabel("$\Theta_{3}$ [deg]", size=30)
# ax.plot(jointTrajectory[:,2], jointTrajectory[:,3], jointTrajectory[:,4], lw=2,color='red',label='Human-Guided Random Trajectory')
ax.plot(jointTrajectory[:,2], jointTrajectory[:,3], jointTrajectory[:,4], lw=2,color='red',label='Human-Guided Random Trajectory .')
ax.legend(prop={'size':30})
plt.show()
def plotDistances(trajOne, trajTwo):
segmentOne=np.array(trajOne)
segmentTwo=np.array(trajTwo)
for i in range(2,5):
segmentOne[:,i]= segmentOne[:,i] - segmentOne[0,i]
segmentTwo[:,i]= segmentTwo[:,i] - segmentTwo[0,i]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.tick_params(labelsize=30)
ax.set_xlabel("$\Theta_{1}$ [deg]", size=30)
ax.set_ylabel("$\Theta_{2}$ [deg]", size=30)
ax.set_zlabel("$\Theta_{3}$ [deg]", size=30)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
for i in range(len(segmentOne)):
if i==0:
ax.plot([segmentOne[i,2], segmentTwo[i,2]],[ segmentOne[i,3], segmentTwo[i,3]], [segmentOne[i,4], segmentTwo[i,4]], lw=2,color='blue',label='Distances')
else:
ax.plot([segmentOne[i,2], segmentTwo[i,2]],[ segmentOne[i,3], segmentTwo[i,3]], [segmentOne[i,4], segmentTwo[i,4]], lw=2,color='blue')
ax.plot(segmentOne[:,2], segmentOne[:,3], segmentOne[:,4], lw=3,color='red',label='Segment 1')
ax.plot(segmentTwo[:,2], segmentTwo[:,3], segmentTwo[:,4], lw=3,color='green',label='Segment 2')
ax.legend(prop={'size':30})
plt.show()
def plotSingle(trajOne):
segmentOne=np.array(trajOne)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
ax.plot(segmentOne[:,2], segmentOne[:,3], segmentOne[:,4], lw=5,color='red',label='Segment 1')
plt.show()
def plot2DXiP():
x = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170]
y = [94, 86, 72, 58, 46, 41, 38, 31, 27, 22, 13, 10, 8, 6, 5, 3, 2, 1]
plt.plot(x, y, linewidth=4)
plt.ylim([0,94])
plt.xlim([0,170])
plt.xlabel(u"\u03BE", fontsize=30)
plt.ylabel('Number of Primitives', fontsize=30)
plt.tick_params(labelsize=25)
#plt.title('Relation between ' + u'\u03BE' + ' and primitives', fontsize=30)
plt.grid(True)
plt.show()
def saveToCSV(primitives, tau, xi):
fileName='db_var/db_'+ 'tau' +str(tau) + '_xi'+ str(xi)
# to write in CSV
with open(fileName, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_MINIMAL)
for k in range(len(primitives)):
wr.writerow(primitives[k])
# to replace '"'
contents = codecs.open(fileName, encoding='utf-8').read()
s = contents.replace('"', '')
with open(fileName, "wb") as f:
f.write(s.encode("UTF-8"))
def main():
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42 # type 42 fonts (truetype) for IEEE papercept system
#325, 350
tau=0.5 # length in seconds
xi=50
jointTrajectory= np.loadtxt('../../main/app/record/recordedTeo/state/data.log')
x=list(jointTrajectory)
plotTraj(jointTrajectory)
numberOfSlices=0
realDataMatrix=[]
slicesList=[]
temp0=np.array(x).astype('float')
#slices by time
newTimeValue= np.ceil(temp0[-1][1] - temp0[0][1])
numberOfSlices = int(newTimeValue/tau)
X = np.array(x)
## OVERLOAD
for h in range(numberOfSlices):
initial=(X.shape[0]/numberOfSlices)*h
final=(X.shape[0]/numberOfSlices)*(h+1)
if X[initial:final].shape[0] == 0:
print 'NO POINTS IN THIS SET. PROBABLY NUMBEROFPOINTS < NUMBEROFSLICES'
else:
slicesList.append(X[initial:final].tolist())
plotDistances(slicesList[20],slicesList[50])
primitives=[]
primitives.append(slicesList[0])
for i in range(numberOfSlices):
for k in range(len(primitives)):
jay = comparison(slicesList[i],primitives[k])
if jay < xi:
#print 'Similar to primitive', k,'in database. Jay', jay
jay=-1
break
if jay !=-1:
#print 'Adding segment', i,'to database'
primitives.append(slicesList[i])
print 'xi:', xi
print 'tau:', tau
print 'number of primitives:', len(primitives)
# making a copy to be modified
relativePrims=copy.deepcopy(primitives)
# making them relative
for a in range(len(primitives)):
for b in range(len(primitives[a])):
for c in range(2,5):
relativePrims[a][b][c]= primitives[a][b][c] - primitives[a][b-1][c]
if b==0:
relativePrims[a][b][c]= primitives[a][b][c] - primitives[a][0][c]
#saveToCSV(relativePrims, tau, xi)
if __name__ == '__main__':
main()
|
smorante/continuous-goal-directed-actions
|
guided-motor-primitives/src/roman14_primitive.py
|
Python
|
mit
| 5,788 | 0.023151 |
import os
import sys
import numpy as np
import math
def findBinIndexFor(aFloatValue, binsList):
#print "findBinIndexFor: %s" % aFloatValue
returnIndex = -1
for i in range(len(binsList)):
thisBin = binsList[i]
if (aFloatValue >= thisBin[0]) and (aFloatValue < thisBin[1]):
returnIndex = i
break
return returnIndex
def compute_joint_prob(joint_list, vals1, vals2, bins1=None, bins2=None, asFreq=False):
returnDict = {}
for rec in joint_list:
val1 = rec[0]
val2 = rec[1]
#Find name by which first val should appear
dictName1 = val1
if bins1 is not None:
dictName1 = findBinIndexFor(val1, bins1)
#Find name by which second val should appear
dictName2 = val2
if bins2 is not None:
dictName2 = findBinIndexFor(val2, bins2)
#If first name is not present in dict,
#then initialize it
if dictName1 not in returnDict:
returnDict[dictName1] = {}
for val in vals2:
#Determine name under which
#y-values should appear (i.e. as bin names
#or as given names)
asDictName = val
if bins2 is not None:
asDictName = findBinIndexFor(val, bins2)
returnDict[dictName1][asDictName] = 0
returnDict[dictName1][dictName2]+=1
if not asFreq:
#Normalize values
for key in returnDict:
for secondKey in returnDict[key]:
returnDict[key][secondKey] = float(returnDict[key][secondKey]) / len(joint_list)
return returnDict
def getXForFixedY(joint_prob_dist, yVal):
returnList = []
for key in joint_prob_dist:
returnList.append( joint_prob_dist[key][yVal])
return returnList
def compute_h(floatsList):
returnFloat = None
acc = 0
for f in floatsList:
if f != 0:
acc = acc - f * math.log(f, 2)
returnFloat = acc
return returnFloat
# Computes Kullback-Leibler divergence between
# P(X,Y) and P(X)
def conditional_entropy(joint_prob_dist, xVals, yVals):
returnFloat = None
h_acc = 0
marginal_y_dist = getYMarginalDist(joint_prob_dist)
for x in xVals:
for y in yVals:
joint_xy = 0
marginal_y = 0
if not x in joint_prob_dist or y not in joint_prob_dist[x]:
joint_xy = 0
else:
joint_xy = joint_prob_dist[x][y]
if not y in marginal_y_dist:
marginal_y = 0
else:
marginal_y = marginal_y_dist[y]
if joint_xy!=0 and marginal_y!=0:
h_acc-=joint_xy*math.log(joint_xy/marginal_y, 2)
# for yVal in yVals:
# new_xDist = getXForFixedY(joint_prob_dist, yVal)
# h_yVal = compute_h(new_xDist)
# p_yVal = reduce(lambda x, y: x+y, new_xDist)
# h_acc+=p_yVal * h_yVal
returnFloat = h_acc
return returnFloat
def getYMarginalDist(joint_prob_dist):
returnDict = {}
for xKey in joint_prob_dist:
for yKey in joint_prob_dist[xKey]:
if not yKey in returnDict:
returnDict[yKey] = 0
returnDict[yKey]+=joint_prob_dist[xKey][yKey]
return returnDict
def getXMarginalDist(joint_prob_dist):
returnDict = {}
for key in joint_prob_dist:
yVals = joint_prob_dist[key]
marginalVal = reduce(lambda x,y: x+y, [yVals[e] for e in yVals])
returnDict[key] = marginalVal
return returnDict
def entropy_loss(joint_prob_dist, xVals, yVals):
returnFloat = None
priorsDict = getXMarginalDist(joint_prob_dist)
priors = priorsDict.values()
h_prior = compute_h(priors)
h_conditional = conditional_entropy(joint_prob_dist, xVals, yVals)
returnFloat = h_prior - h_conditional
return returnFloat
|
omoju/Fundamentals
|
Data/twitterDataAnalysis/info_gain.py
|
Python
|
gpl-3.0
| 3,353 | 0.037578 |
from __future__ import division
import numpy as np
import tensorflow as tf
from cost_functions.huber_loss import huber_loss
from data_providers.data_provider_33_price_history_autoencoder import PriceHistoryAutoEncDataProvider
from interfaces.neural_net_model_interface import NeuralNetModelInterface
from mylibs.batch_norm import BatchNormer, batchNormWrapper, fully_connected_layer_with_batch_norm_and_l2, \
fully_connected_layer_with_batch_norm
from mylibs.jupyter_notebook_helper import DynStats, getRunTime
from tensorflow.contrib import rnn
from collections import OrderedDict
from mylibs.py_helper import merge_dicts
from mylibs.tf_helper import generate_weights_var, fully_connected_layer
from os import system
from fastdtw import fastdtw
from matplotlib import pyplot as plt
from plotter.price_hist import renderRandomMultipleTargetsVsPredictions
# DYNAMIC SEQUENCES - HEAVY MODEL
class PriceHistoryAutoencoder(NeuralNetModelInterface):
"""
NECESSARY FOR MULTIPLE SEQS:
- Make it with dynamic inputs
IDEAS FOR IMPROVEMENT:
0) introduce extra layers
1) Add the mobile attributes per instance
2) MAKE OUTPUT BE DEPENDED ON PREVIOUS OUTPUT
3) use EOS
4) Add dropout
*) Make also input be depende on previous input ??
"""
DATE_FEATURE_LEN = 6
INPUT_FEATURE_LEN = DATE_FEATURE_LEN + 1
TS_INPUT_IND = 0 # if feature len is multi
TARGET_FEATURE_LEN = 1
ADAM_DEFAULT_LEARNING_RATE = 1e-3
SEED = 16011984
DEFAULT_KEEP_PROB = 1.
DEFAULT_LAMDA2 = 0.
DEFAULT_ARR_LAMDA2 = [DEFAULT_LAMDA2] * 3
BATCH_NORM_ENABLED_BY_DEFAULT = True
DIM_REDUCTION = 2
class DECODER_FIRST_INPUT(object):
PREVIOUS_INPUT = "PREVIOUS_INPUT"
ZEROS = "ZEROS"
def __init__(self, rng, dtype, config):
super(PriceHistoryAutoencoder, self).__init__()
self.rng = rng
self.dtype = dtype
self.config = config
self.train_data = None
self.valid_data = None
self.init = None
self.error = None
self.inputs = None
self.predictions = None
self.train_step = None
self.is_training = None
self.decoder_extra_inputs = None
self.keep_prob_rnn_out = None
self.keep_prob_readout = None
self.twod = None
self.sequence_lens = None
self.sequence_len_mask = None
@staticmethod
def DEFAULT_ACTIVATION_RNN():
return tf.nn.tanh # tf.nn.elu
def run(self, npz_path, epochs, batch_size, enc_num_units, dec_num_units, ts_len,
hidden_enc_num_units,
hidden_enc_dim,
hidden_dec_dim,
hidden_dec_num_units,
learning_rate=ADAM_DEFAULT_LEARNING_RATE,
preds_gather_enabled=True,
):
graph = self.getGraph(batch_size=batch_size, verbose=False, enc_num_units=enc_num_units,
dec_num_units=dec_num_units, ts_len=ts_len,
learning_rate=learning_rate, hidden_enc_num_units=hidden_enc_num_units,
hidden_enc_dim=hidden_enc_dim,
hidden_dec_dim=hidden_dec_dim,
hidden_dec_num_units=hidden_dec_num_units)
# input_keep_prob=input_keep_prob, hidden_keep_prob=hidden_keep_prob,
train_data = PriceHistoryAutoEncDataProvider(npz_path=npz_path, batch_size=batch_size, rng=self.rng,
which_set='train')
# during cross validation we execute our experiment multiple times and we get a score at the end
# so this means that we need to retrain the model one final time in order to output the predictions
# from this training procedure
preds_dp = PriceHistoryAutoEncDataProvider(npz_path=npz_path, batch_size=batch_size, rng=self.rng,
shuffle_order=False,
which_set='test',
) if preds_gather_enabled else None
self.__print_hyperparams(learning_rate=learning_rate, epochs=epochs, enc_num_units=enc_num_units,
dec_num_units=dec_num_units)
return self.train_validate(train_data=train_data, valid_data=None, graph=graph, epochs=epochs,
preds_gather_enabled=preds_gather_enabled, preds_dp=preds_dp,
batch_size=batch_size)
def train_validate(self, train_data, valid_data, **kwargs):
graph = kwargs['graph']
epochs = kwargs['epochs']
batch_size = kwargs['batch_size']
verbose = kwargs['verbose'] if 'verbose' in kwargs.keys() else True
preds_dp = kwargs['preds_dp'] if 'preds_dp' in kwargs.keys() else None
preds_gather_enabled = kwargs['preds_gather_enabled'] if 'preds_gather_enabled' in kwargs.keys() else True
test_error = None
preds_dict = None
twod_dict = None
with tf.Session(graph=graph, config=self.config) as sess:
sess.run(self.init) # sess.run(tf.initialize_all_variables())
dynStats = DynStats(validation=valid_data is not None)
for epoch in range(epochs):
train_error, runTime = getRunTime(
lambda:
self.trainEpoch(
sess=sess,
data_provider=train_data,
extraFeedDict={
self.is_training: True,
}
)
)
if np.isnan(train_error):
raise Exception('do something with your learning rate because it is extremely high')
if valid_data is None:
if verbose:
# print 'EndEpoch%02d(%.3f secs):err(train)=%.4f,acc(train)=%.2f,err(valid)=%.2f,acc(valid)=%.2f, ' % \
# (epoch + 1, runTime, train_error, train_accuracy, valid_error, valid_accuracy)
print 'End Epoch %02d (%.3f secs): err(train) = %.6f' % (
epoch + 1, runTime, train_error)
dynStats.gatherStats(train_error=train_error)
else:
# if (epoch + 1) % 1 == 0:
valid_error = self.validateEpoch(
sess=sess,
data_provider=valid_data,
extraFeedDict={self.is_training: False},
)
if np.isnan(valid_error):
raise Exception('do something with your learning rate because it is extremely high')
if verbose:
print 'End Epoch %02d (%.3f secs): err(train) = %.6f, err(valid)=%.6f' % (
epoch + 1, runTime, train_error, valid_error)
dynStats.gatherStats(train_error=train_error, valid_error=valid_error)
preds_dict, test_error, twod_dict = self.getPredictions(batch_size=batch_size, data_provider=preds_dp,
sess=sess) if preds_gather_enabled else (
None, None, None)
if verbose:
if preds_gather_enabled:
print "total test error: {}".format(test_error)
print
if preds_gather_enabled:
return dynStats, self.trimPredsDict(preds_dict,
data_provider=preds_dp), preds_dp.get_targets_dict_trimmed(), twod_dict
else:
return dynStats
def getGraph(self,
batch_size,
enc_num_units,
hidden_enc_num_units,
hidden_enc_dim,
hidden_dec_dim,
hidden_dec_num_units,
dec_num_units,
ts_len,
learning_rate=ADAM_DEFAULT_LEARNING_RATE, # default of Adam is 1e-3
verbose=True):
# momentum = 0.5
# tf.reset_default_graph() #kind of redundant statement
graph = tf.Graph() # create new graph
with graph.as_default():
with tf.name_scope('parameters'):
self.is_training = tf.placeholder(tf.bool, name="is_training")
with tf.name_scope('data'):
inputs = tf.placeholder(dtype=self.dtype,
shape=(batch_size, ts_len, self.INPUT_FEATURE_LEN), name="inputs")
targets = inputs[:, :, self.TS_INPUT_IND]
if verbose:
print "targets"
print targets
print
decoder_extra_inputs = tf.placeholder(dtype=self.dtype,
shape=(batch_size, ts_len, self.DATE_FEATURE_LEN),
name="decoder_extra_inputs")
self.decoder_extra_inputs = decoder_extra_inputs
sequence_lens = tf.placeholder(tf.int32, shape=(batch_size,), name="sequence_lens_placeholder")
self.sequence_lens = sequence_lens
sequence_len_mask = tf.placeholder(tf.int32, shape=(batch_size, ts_len),
name="sequence_len_mask_placeholder")
self.sequence_len_mask = sequence_len_mask
with tf.name_scope('encoder_rnn_layer'):
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.GRUCell(num_units=enc_num_units, activation=self.DEFAULT_ACTIVATION_RNN()),
inputs=inputs,
initial_state=None,
dtype=self.dtype,
sequence_length=sequence_lens
)
if verbose:
print encoder_outputs
print encoder_final_state
print
with tf.variable_scope('hidden_encoder_rnn_layer'):
hidden_encoder_outputs, hidden_encoder_final_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.GRUCell(num_units=hidden_enc_num_units,
activation=self.DEFAULT_ACTIVATION_RNN()),
inputs=encoder_outputs,
initial_state=encoder_final_state,
dtype=self.dtype,
sequence_length=sequence_lens
)
if verbose:
print hidden_encoder_outputs
print hidden_encoder_final_state
print
with tf.name_scope('encoder_state_out_hidden_process'):
hidden_enc_layer = fully_connected_layer_with_batch_norm(fcId='encoder_state_out_hidden_process',
inputs=hidden_encoder_final_state,
input_dim=hidden_enc_num_units,
output_dim=hidden_enc_dim,
is_training=self.is_training,
nonlinearity=tf.nn.elu)
if verbose:
print hidden_enc_layer
print
with tf.name_scope('encoder_state_out_process'):
# don't really care for encoder outputs, but only for its final state
# the encoder consumes all the input to get a sense of the trend of price history
# fully_connected_layer_with_batch_norm_and_l2(fcId='encoder_state_out_process',
# inputs=encoder_final_state,
# input_dim=enc_num_units, output_dim=self.DIM_REDUCTION,
# is_training=self.is_training, lamda2=0)
ww_enc_out = generate_weights_var(ww_id='encoder_state_out_process', input_dim=hidden_enc_dim,
output_dim=self.DIM_REDUCTION,
dtype=self.dtype)
nonlinearity = tf.nn.elu
avoidDeadNeurons = 0.1 if nonlinearity == tf.nn.relu else 0. # prevent zero when relu
bb_enc_out = tf.Variable(avoidDeadNeurons * tf.ones([self.DIM_REDUCTION]),
name='biases_{}'.format('encoder_state_out_process'))
# out_affine = tf.matmul(inputs, weights) + biases
affine_enc_out = tf.add(tf.matmul(hidden_enc_layer, ww_enc_out), bb_enc_out)
self.twod = affine_enc_out ######### HERE WE GET THE TWO DIM REPRESENTATION OF OUR TIMESERIES ##########
batchNorm = batchNormWrapper('encoder_state_out_process', affine_enc_out, self.is_training)
nonlinear_enc_out = nonlinearity(batchNorm)
if verbose:
print nonlinear_enc_out
print
with tf.name_scope('decoder_state_in_hidden_process'):
hidden_dec_layer = fully_connected_layer_with_batch_norm(fcId='decoder_state_in_hidden_process',
inputs=nonlinear_enc_out,
input_dim=self.DIM_REDUCTION,
output_dim=hidden_dec_dim,
is_training=self.is_training,
nonlinearity=tf.nn.elu)
if verbose:
print hidden_dec_layer
print
with tf.name_scope('decoder_state_in_process'):
dec_init_state = fully_connected_layer_with_batch_norm(fcId='decoder_state_in_process',
inputs=hidden_dec_layer,
input_dim=hidden_dec_dim,
output_dim=hidden_dec_num_units,
is_training=self.is_training,
nonlinearity=tf.nn.elu)
if verbose:
print dec_init_state
print
with tf.variable_scope('hidden_decoder_rnn_layer'):
hidden_decoder_outputs, hidden_decoder_final_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.GRUCell(num_units=hidden_dec_num_units,
activation=self.DEFAULT_ACTIVATION_RNN()),
inputs=decoder_extra_inputs,
initial_state=dec_init_state,
dtype=self.dtype,
sequence_length=sequence_lens
)
if verbose:
print hidden_decoder_outputs
print hidden_decoder_final_state
print
with tf.variable_scope('decoder_rnn_layer'):
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.GRUCell(num_units=dec_num_units, activation=self.DEFAULT_ACTIVATION_RNN()),
inputs=hidden_decoder_outputs,
initial_state=hidden_decoder_final_state,
dtype=self.dtype,
sequence_length=sequence_lens
)
if verbose:
print decoder_outputs
print decoder_final_state
print
# No gathering because in this situation we want to keep the entire sequence
# along with whatever the dynamic_rnn pads at the end
with tf.name_scope('decoder_outs'):
flattened_dec_outs = tf.reshape(decoder_outputs, shape=(-1, dec_num_units))
if verbose:
print flattened_dec_outs
print
with tf.name_scope('readout_affine'):
processed_dec_outs = fully_connected_layer(inputs=flattened_dec_outs,
input_dim=dec_num_units,
output_dim=self.TARGET_FEATURE_LEN,
nonlinearity=tf.identity)
outputs = tf.reshape(processed_dec_outs, shape=(batch_size, ts_len))
if verbose:
print processed_dec_outs
print outputs
print
with tf.name_scope('error'):
losses = huber_loss(y_true=targets, y_pred=outputs) # both have shape: (batch_size, target_len)
# CONVENTION: loss is the error/loss seen by the optimizer, while error is the error reported
# in the outside world (to us) and usually these two are the same
lossed_fixed = losses * tf.cast(sequence_len_mask, tf.float32)
if verbose:
print lossed_fixed
print
loss = tf.reduce_mean(lossed_fixed)
error = loss
if verbose:
print loss
print error
print
with tf.name_scope('training_step'):
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
init = tf.global_variables_initializer()
self.init = init
self.inputs = inputs
self.error = error
self.train_step = train_step
self.predictions = outputs
return graph
def trimPredsDict(self, preds_dict, data_provider):
assert np.all(np.array(list(data_provider.current_order)) == np.array(list(preds_dict.keys())))
preds_dict_trimmed = OrderedDict()
for seqlen, (key, preds) in zip(data_provider.seqlens, preds_dict.iteritems()):
preds_dict_trimmed[key] = preds[:seqlen]
return preds_dict_trimmed
def getPredictions(self, sess, data_provider, batch_size, extraFeedDict=None):
if extraFeedDict is None:
extraFeedDict = {}
assert data_provider.data_len % batch_size == 0 # provider can support and intermediate values
total_error = 0.
instances_order = data_provider.current_order
target_len = data_provider.targets.shape[1]
all_predictions = np.zeros(shape=(data_provider.data_len, target_len))
all_two_dims = np.zeros(shape=(data_provider.data_len, self.DIM_REDUCTION))
for inst_ind, (input_batch, dec_extra_ins, seq_lens, seq_len_mask) in enumerate(data_provider):
cur_error, cur_preds, cur_twod = sess.run(
[self.error, self.predictions, self.twod],
feed_dict=merge_dicts({self.inputs: input_batch,
self.decoder_extra_inputs: dec_extra_ins,
self.sequence_lens: seq_lens,
self.sequence_len_mask: seq_len_mask,
self.is_training: False,
}, extraFeedDict))
assert np.all(instances_order == data_provider.current_order), \
"making sure that the order does not change as we iterate over our batches"
cur_batch_slice = slice(inst_ind * batch_size, (inst_ind + 1) * batch_size)
all_predictions[cur_batch_slice, :] = cur_preds
all_two_dims[cur_batch_slice, :] = cur_twod
total_error += cur_error
total_error /= data_provider.num_batches
if np.any(all_predictions == 0):
print "all predictions are expected to be something else than absolute zero".upper()
system('play --no-show-progress --null --channels 1 synth {} sine {}'.format(0.5, 800))
# assert np.all(all_predictions != 0), "all predictions are expected to be something else than absolute zero"
preds_dict = OrderedDict(zip(instances_order, all_predictions))
twod_dict = OrderedDict(zip(instances_order, all_two_dims))
return preds_dict, total_error, twod_dict
def validateEpoch(self, sess, data_provider, extraFeedDict=None):
if extraFeedDict is None:
extraFeedDict = {}
total_error = 0.
num_batches = data_provider.num_batches
for step, (input_batch, dec_extra_ins, seq_lens, seq_len_mask) in enumerate(data_provider):
feed_dic = merge_dicts({self.inputs: input_batch,
self.decoder_extra_inputs: dec_extra_ins,
self.sequence_lens: seq_lens,
self.sequence_len_mask: seq_len_mask,
}, extraFeedDict)
batch_error = sess.run(self.error, feed_dict=feed_dic)
total_error += batch_error
total_error /= num_batches
return total_error
def trainEpoch(self, sess, data_provider, extraFeedDict=None):
if extraFeedDict is None:
extraFeedDict = {}
train_error = 0.
num_batches = data_provider.num_batches
for step, (input_batch, dec_extra_ins, seq_lens, seq_len_mask) in enumerate(data_provider):
feed_dic = merge_dicts({self.inputs: input_batch,
self.decoder_extra_inputs: dec_extra_ins,
self.sequence_lens: seq_lens,
self.sequence_len_mask: seq_len_mask,
}, extraFeedDict)
_, batch_error = sess.run([self.train_step, self.error], feed_dict=feed_dic)
train_error += batch_error
train_error /= num_batches
return train_error
@staticmethod
def __print_hyperparams(**kwargs):
for key in kwargs:
print "{}: {}".format(key, kwargs[key])
|
pligor/predicting-future-product-prices
|
04_time_series_prediction/models/model_34_price_history_autoencoder.py
|
Python
|
agpl-3.0
| 22,721 | 0.004181 |
import sys
sys.path.insert(1,"../../../")
import h2o, tests
def deepLearningDemo():
# Training data
train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv"))
train_data = train_data.drop('Site')
train_data['Angaus'] = train_data['Angaus'].asfactor()
print train_data.describe()
train_data.head()
# Testing data
test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv"))
test_data['Angaus'] = test_data['Angaus'].asfactor()
print test_data.describe()
test_data.head()
# Run GBM
gbm = h2o.gbm(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
ntrees=100,
distribution="bernoulli")
gbm.show()
# Run DeepLearning
dl = h2o.deeplearning(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
loss = 'CrossEntropy',
epochs = 1000,
hidden = [20, 20, 20])
dl.show()
if __name__ == "__main__":
tests.run_test(sys.argv, deepLearningDemo)
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_demoDeeplearning.py
|
Python
|
apache-2.0
| 1,322 | 0.04236 |
import os
import textwrap
import pytest
from pip._internal.status_codes import ERROR
from tests.lib.path import Path
def fake_wheel(data, wheel_path):
data.packages.join(
'simple.dist-0.1-py2.py3-none-any.whl'
).copy(data.packages.join(wheel_path))
@pytest.mark.network
def test_download_if_requested(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip(
'download', '-d', 'pip_downloads', 'INITools==0.1', expect_error=True
)
assert Path('scratch') / 'pip_downloads' / 'INITools-0.1.tar.gz' \
in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_setuptools(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip('download', 'setuptools')
setuptools_prefix = str(Path('scratch') / 'setuptools')
assert any(
path.startswith(setuptools_prefix) for path in result.files_created
)
def test_download_wheel(script, data):
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
'download',
'--no-index',
'-f', data.packages,
'-d', '.', 'meta'
)
assert (
Path('scratch') / 'meta-1.0-py2.py3-none-any.whl'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
@pytest.mark.network
def test_single_download_from_requirements_file(script):
"""
It should support download (in the scratch path) from PyPi from a
requirements file
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
expect_error=True,
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_should_download_dependencies(script):
"""
It should download dependencies (in the scratch path)
"""
result = script.pip(
'download', 'Paste[openid]==1.7.5.1', '-d', '.', expect_error=True,
)
assert Path('scratch') / 'Paste-1.7.5.1.tar.gz' in result.files_created
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert script.site_packages / 'openid' not in result.files_created
def test_download_wheel_archive(script, data):
"""
It should download a wheel archive path
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--no-deps'
)
assert Path('scratch') / wheel_filename in result.files_created
def test_download_should_download_wheel_deps(script, data):
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
dep_filename = 'translationstring-1.1.tar.gz'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--find-links', data.find_links, '--no-index'
)
assert Path('scratch') / wheel_filename in result.files_created
assert Path('scratch') / dep_filename in result.files_created
@pytest.mark.network
def test_download_should_skip_existing_files(script):
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
expect_error=True,
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
# adding second package to test-req.txt
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""
INITools==0.1
python-openid==2.2.5
"""))
# only the second package should be downloaded
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
expect_error=True,
)
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert Path('scratch') / 'INITools-0.1.tar.gz' not in result.files_created
assert script.site_packages / 'initools' not in result.files_created
assert script.site_packages / 'openid' not in result.files_created
@pytest.mark.network
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
assert (
Path('scratch') / 'pip-test-package-0.1.1.zip'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--no-deps',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--no-binary=fake',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_9_x86_64',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl')
fake_wheel(data, 'fake-2.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_10_x86_64',
'fake'
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl'
in result.files_created
)
# OSX platform wheels are not backward-compatible.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_8_x86_64',
'fake',
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==1',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==2'
)
assert (
Path('scratch') / 'fake-2.0-py2.py3-none-linux_x86_64.whl'
in result.files_created
)
def test_download_platform_manylinux(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-manylinux1_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'manylinux1_x86_64',
'fake',
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-manylinux1_x86_64.whl'
in result.files_created
)
# When specifying the platform, manylinux1 needs to be the
# explicit platform--it won't ever be added to the compatible
# tags.
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
def test_download_specify_python_version(script, data):
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '27',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '33',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2-none-any.whl')
fake_wheel(data, 'fake-2.0-py3-none-any.whl')
# No py3 provided for version 1.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake==1.0',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '26',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-2.0-py3-none-any.whl'
in result.files_created
)
def test_download_specify_abi(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'fake_abi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'none',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--abi', 'cp27m',
'fake',
expect_error=True,
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2-fakeabi-fake_platform.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'fakeabi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2-fakeabi-fake_platform.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'none',
'fake',
expect_error=True,
)
def test_download_specify_implementation(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2.fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2.fk3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '2',
'fake',
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(script):
"""
Test download exit status code when no requirements specified
"""
result = script.pip('download', expect_error=True)
assert (
"You must give at least one requirement to download" in result.stderr
)
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(script):
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.join("blank.txt").write("\n")
script.pip('download', '-r', 'blank.txt')
|
zvezdan/pip
|
tests/functional/test_download.py
|
Python
|
mit
| 17,722 | 0 |
from bottle import route, default_app
app = default_app()
data = {
"id": 78874,
"seriesName": "Firefly",
"aliases": [
"Serenity"
],
"banner": "graphical/78874-g3.jpg",
"seriesId": "7097",
"status": "Ended",
"firstAired": "2002-09-20",
"network": "FOX (US)",
"networkId": "",
"runtime": "45",
"genre": [
"Drama",
"Science-Fiction"
],
"overview": "In the far-distant future, Captain Malcolm \"Mal\" Reynolds is a renegade former brown-coat sergeant, now turned smuggler & rogue, "
"who is the commander of a small spacecraft, with a loyal hand-picked crew made up of the first mate, Zoe Warren; the pilot Hoban \"Wash\" Washburn; "
"the gung-ho grunt Jayne Cobb; the engineer Kaylee Frye; the fugitives Dr. Simon Tam and his psychic sister River. "
"Together, they travel the far reaches of space in search of food, money, and anything to live on.",
"lastUpdated": 1486759680,
"airsDayOfWeek": "",
"airsTime": "",
"rating": "TV-14",
"imdbId": "tt0303461",
"zap2itId": "EP00524463",
"added": "",
"addedBy": None,
"siteRating": 9.5,
"siteRatingCount": 472,
}
@route('/api')
def api():
return data
|
romanvm/WsgiBoostServer
|
benchmarks/test_app.py
|
Python
|
mit
| 1,229 | 0.003255 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from . import const_settings as setting
from . import const_localize as localize
from . import const_sort_methods as sort_method
from . import const_content_types as content_type
from . import const_paths as paths
__all__ = ['setting', 'localize', 'sort_method', 'content_type', 'paths']
|
jdf76/plugin.video.youtube
|
resources/lib/youtube_plugin/kodion/constants/__init__.py
|
Python
|
gpl-2.0
| 526 | 0 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# beta.1 EPG FórmulaTV.com
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
#------------------------------------------------------------
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import time
from datetime import datetime
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
tmp = xbmc.translatePath(os.path.join('special://userdata/playlists/tmp', ''))
LIST = "list"
THUMBNAIL = "thumbnail"
MOVIES = "movies"
TV_SHOWS = "tvshows"
SEASONS = "seasons"
EPISODES = "episodes"
FANART = "fanart"
OTHER = "other"
MUSIC = "music"
def epg_ftv(title):
plugintools.log('[%s %s].epg_ftv %s' % (addonName, addonVersion, title))
channel = title.lower()
channel = channel.replace("Opción 1", "").replace("HD", "").replace("720p", "").replace("1080p", "").replace("SD", "").replace("HQ", "").replace("LQ", "").strip()
channel = channel.replace("Opción 2", "")
channel = channel.replace("Opción 3", "")
channel = channel.replace("Op. 1", "")
channel = channel.replace("Op. 2", "")
channel = channel.replace("Op. 3", "")
plugintools.log("Canal: "+channel)
params = plugintools.get_params()
params["url"]='http://www.formulatv.com/programacion/'
if channel == "la 1" or channel == "la 1 hd":
channel = "la 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "la 2":
channel = "la 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "antena 3" or channel == "antena 3 hd":
channel = "antena 3 televisión"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cuatro" or channel == "cuatro hd":
channel = "cuatro"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "telecinco hd" or channel == "telecinco":
channel == "telecinco"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "la sexta" or channel == "la sexta hd":
channel = "lasexta"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+1" or channel == "canal+ 1" or channel == "canal plus" or channel == "canal+ hd":
channel = "canal+1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+2" or channel == "canal+ 2" or channel == "canal plus 2" or channel == "canal+ 2 hd":
channel = "canal+ 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 1 ...30" or channel == "canal+ 1... 30":
channel = "canal+ 1 ...30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ series":
channel = "canal+ series"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "goltv" or channel == "golt":
channel = "gol televisión"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "40 TV":
channel = "40 tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal sur" or channel == "andalucia tv":
channel = "canal sur"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn white":
channel = "axn white"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "xtrm":
channel = "xtrm"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "calle 13" or channel == "calle 13 hd":
channel = "calle 13"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "amc" or channel == "amc españa":
channel = "amc (españa)"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "barça tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "andalucía tv" or channel == "andalucia tv":
channel = "andalucia-tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "canal barca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 30" or channel == "canal+ ...30" or channel == "canal plus 30":
channel = "canal+ 1... 30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ accion" or channel == "canal+ acción" or channel=="canal plus accion":
channel = "canal+ acción"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ comedia" or channel == "canal plus comedia":
channel = "canal+ comedia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ decine" or channel == "canal plus decine":
channel = "canal+ dcine"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ deporte" or channel == "canal plus deporte":
channel = "canal+ deporte"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ futbol" or channel == "canal+ fútbol" or channel == "canal plus fútbol" or channel == "canal plus futbol":
channel = "canal+ fútbol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ liga":
channel = "canal+ liga"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ golf" or channel == "canal plus golf":
channel = "golf+"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ toros" or channel == "canal plus toros":
channel = "canal+ toros"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ extra" or channel=="canal+ xtra":
channel = "canal+ xtra"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal 33" or channel == "canal33":
channel = "canal33"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal cocina":
channel = "canal cocina"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cartoon network" or channel == "cartoon network hd":
channel = "cartoon network"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "castilla-la mancha televisión" or channel == "castilla-la mancha tv":
channel = "castilla-la-mancha"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "caza y pesca":
channel = "caza-y-pesca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "clan" or channel == "clan tve 50" or channel == "clan tve":
channel = "clan tve"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nickelodeon":
channel = "nickelodeon"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "boing":
channel = "boing"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cnbc":
channel = "cnbc"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cnn-international" or channel == "cnn int":
channel = "cnn international"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cosmopolitan" or channel == "cosmopolitan tv":
channel = "cosmopolitan"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "a&e" or channel == "a&e españa":
channel = "a&e españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ dcine" or channel == "canal plus dcine":
channel = "dcine espanol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "decasa":
channel = "decasa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "discovery channel":
channel = "discovery channel"
epg_channel = epg_formulatv(params, channel)
elif channel == "national geographic":
channel = "national geographic"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "discovery max":
channel = "discovery max"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney channel":
channel = "disney channel"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney-cinemagic":
channel = "disney cinemagic"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney xd":
channel = "disney xd"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney junior" or channel == "disney jr":
channel = "disney junior"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "divinity":
channel = "divinity"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "energy":
channel = "energy"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "etb1" or channel == "etb 1":
channel = "euskal telebista 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "etb 2" or channel == "etb2":
channel = "euskal telebista 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "factoría de ficción" or channel == "factoria de ficcion" or channel == "fdf":
channel = "fdf"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "buzz":
channel = "buzz"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "fox" or channel == "fox hd":
channel = "fox españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "fox life":
channel = "fox life"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "fox news":
channel = "fox news"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "historia" or channel == "historia hd":
channel = "canal de historia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "natura" or channel == "canal natura":
channel = "canal natura"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cosmopolitan" or channel == "cosmopolitan tv":
channel = "cosmopolitan"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "hollywood" or channel == "hollywood channel":
channel = "canal hollywood"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "ib3 televisio" or channel == "ib3 televisió":
channel = "ib3 televisio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "intereconomia" or channel == "intereconomía" or channel == "intereconomía tv":
channel = "intereconomia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "mtv" or channel == "mtv españa" or channel == "mtv espana":
channel = "mtv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nat geo wild":
channel = "nat geo wild"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "neox":
channel = "neox"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nick jr." or channel == "nick jr":
channel = "nick jr."
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "odisea" or channel == "odisea hd":
channel = "odisea"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nova":
channel = "nova"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "panda":
channel = "panda"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "paramount channel":
channel = "paramount channel"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "playboy tv":
channel = "playboy tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "playhouse disney":
channel = "playhouse disney"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "rtv murcia 7" or channel == "radiotelevisión de murcia" or channel == "rtv murcia":
channel = "7 región de murcia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "real madrid tv":
channel = "real madrid tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "syfy" or channel== "syfy españa":
channel = "syfy españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "sony entertainment":
channel = "sony entertainment"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "sportmania" or channel == "sportmania hd":
channel = "sportmania"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "tcm":
channel = "tcm"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "teledeporte" or channel == "intereconomía" or channel == "intereconomía tv":
channel = "teledeporte"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "telemadrid" or channel == "telemadrid hd":
channel = "telemadrid"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "televisión canaria" or channel == "televisión canaria":
channel = "television canaria"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "televisión de galicia" or channel == "television de galicia" or channel == "tvg":
channel = "tvg"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "tnt" or channel == "tnt hd":
channel = "tnt españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "tv3" or channel == "tv3 hd":
channel = "tv3"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "vh1":
channel = "vh1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "viajar":
channel = "canal viajar"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "baby tv":
channel = "baby tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal panda":
channel = "canal panda"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "arenasports 1":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-1')
return epg_channel
elif channel == "arenasports 2":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-2')
return epg_channel
elif channel == "arenasports 3":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-3')
return epg_channel
elif channel == "arenasports 4":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-4')
return epg_channel
elif channel == "arenasports 5":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-5')
return epg_channel
elif channel == "sportklub 1" or channel == "sport klub 1":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-sport-klub-1')
return epg_channel
elif channel == "sportklub 2" or channel == "sport klub 2":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-sport-klub-2')
return epg_channel
else:
return False
def epg_formulatv(params, channel):
plugintools.log('[%s %s].epg_formulatv %s' % (addonName, addonVersion, repr(params)))
thumbnail = params.get("thumbnail")
fanart = params.get("extra")
canal_buscado = channel
canal_buscado= canal_buscado.replace(" hd", "")
epg_channel = []
params["plot"]=""
backup_ftv = tmp + 'backup_ftv.txt'
if os.path.exists(backup_ftv):
pass
else:
backup_epg = open(backup_ftv, "a")
data = plugintools.read(params.get("url"))
backup_epg.write(data)
backup_epg.close()
# Abrimos backup
backup_epg = open(backup_ftv, "r")
data = backup_epg.read()
#plugintools.log("data= "+data)
# Calculando hora actual
ahora = datetime.now()
minutejo = str(ahora.minute)
if ahora.minute < 10: # Añadimos un cero delante del minuto actual por si es inferior a la decena
minuto_ahora = '0'+str(ahora.minute)
else:
minuto_ahora = str(ahora.minute)
hora_ahora = str(ahora.hour)+":"+minuto_ahora
epg_channel.append(hora_ahora) # index 0
# Vamos a leer la fuente de datos
body = plugintools.find_multiple_matches(data, '<td class="prga-i">(.*?)</tr>')
for entry in body:
channel = plugintools.find_single_match(entry, 'alt=\"([^"]+)')
channel = channel.lower()
plugintools.log("Buscando canal: "+canal_buscado)
plugintools.log("Channel: "+channel)
if channel == canal_buscado:
print 'channel',channel
evento_ahora = plugintools.find_single_match(entry, '<p>(.*?)</p>')
epg_channel.append(evento_ahora) # index 1
hora_luego = plugintools.find_single_match(entry, 'class="fec1">(.*)</span>')
hora_luego = hora_luego.split("</span>")
hora_luego = hora_luego[0]
#print 'hora_luego',hora_luego
epg_channel.append(hora_luego) # index 2
diff_luego = plugintools.find_single_match(entry, 'class="fdiff">([^<]+)').strip()
#print 'diff_luego',diff_luego
epg_channel.append(diff_luego) # index 3
evento_luego = plugintools.find_single_match(entry, '<span class="tprg1">(.*?)</span>')
#print 'evento_luego',evento_luego
epg_channel.append(evento_luego) # index 4
hora_mastarde = plugintools.find_single_match(entry, 'class="fec2">(.*)</span>')
hora_mastarde = hora_mastarde.split("</span>")
hora_mastarde = hora_mastarde[0]
epg_channel.append(hora_mastarde) # index 5
evento_mastarde = plugintools.find_single_match(entry, '<span class="tprg2">(.*?)</span>')
#print 'evento_mastarde',evento_mastarde
epg_channel.append(evento_mastarde) # index 6
sinopsis = '[COLOR lightgreen][I]('+diff_luego+') [/I][/COLOR][COLOR white][B]'+hora_luego+' [/COLOR][/B]'+evento_luego+'[CR][COLOR white][B][CR]'+hora_mastarde+' [/COLOR][/B] '+evento_mastarde
plugintools.log("Sinopsis: "+sinopsis)
datamovie = {}
datamovie["Plot"]=sinopsis
#plugintools.add_item(action="", title= '[COLOR orange][B]'+channel+' [/B][COLOR lightyellow]'+ahora+'[/COLOR] [COLOR lightgreen][I]('+diff_luego+') [/I][/COLOR][COLOR white][B]'+hora_luego+' [/COLOR][/B] '+evento_luego, info_labels = datamovie , thumbnail = thumbnail , fanart = fanart , folder = False, isPlayable = False)
#plugintools.log("entry= "+entry)
return epg_channel
# Petición de la URL
def gethttp_headers(params):
plugintools.log('[%s %s].gethttp_headers %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",'http://www.digitele.com/pluginfiles/canales/'])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("body= "+body)
return body
|
iptvgratis/TUPLAY
|
resources/tools/epg_formulatv.py
|
Python
|
gpl-3.0
| 24,492 | 0.007035 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
#import struct
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.cont import continuation, loop_label, call_cont
from pycket.arity import Arity
from pycket import values_parameter
from pycket import values_struct
from pycket import values_regex
from pycket import vector as values_vector
from pycket.error import SchemeException, UserException
from pycket.foreign import W_CPointer, W_CType
from pycket.hash.equal import W_EqualHashTable
from pycket.hash.base import W_HashTable
from pycket.hash.simple import (W_EqImmutableHashTable, W_EqvImmutableHashTable, W_EqMutableHashTable, W_EqvMutableHashTable, make_simple_immutable_table)
from pycket.prims.expose import (unsafe, default, expose, expose_val, prim_env,
procedure, define_nyi, subclass_unsafe, make_procedure)
from pycket.prims.primitive_tables import *
from pycket.prims import string
from pycket.racket_paths import racket_sys_paths
from pycket.env import w_global_config
from rpython.rlib import jit, objectmodel, unroll, rgc
from rpython.rlib.rsre import rsre_re as re
# import for side effects
from pycket.prims import control
from pycket.prims import continuation_marks
from pycket.prims import char
from pycket.prims import box
from pycket.prims import equal as eq_prims
from pycket.prims import foreign
from pycket.prims import hash
from pycket.prims import impersonator
from pycket.prims import input_output
from pycket.prims import logging
from pycket.prims import numeric
from pycket.prims import parameter
from pycket.prims import random
from pycket.prims import regexp
from pycket.prims import string
from pycket.prims import struct_structinfo
from pycket.prims import undefined
from pycket.prims import vector
from rpython.rlib import jit
def make_pred(name, cls):
@expose(name, [values.W_Object], simple=True)
def predicate_(a):
return values.W_Bool.make(isinstance(a, cls))
predicate_.__name__ += cls.__name__
def make_dummy_char_pred(name):
@expose(name, [values.W_Character], simple=True)
def predicate_(a):
return values.w_false
predicate_.__name__ += name
def make_pred_eq(name, val):
typ = type(val)
@expose(name, [values.W_Object], simple=True)
def pred_eq(a):
return values.W_Bool.make(a is val)
for args in [
("output-port?", values.W_OutputPort),
("pair?", values.W_Cons),
("mpair?", values.W_MCons),
("number?", values.W_Number),
("complex?", values.W_Number),
("fixnum?", values.W_Fixnum),
("flonum?", values.W_Flonum),
("vector?", values.W_MVector),
("string?", values_string.W_String),
("symbol?", values.W_Symbol),
("boolean?", values.W_Bool),
("inspector?", values_struct.W_StructInspector),
("struct-type?", values_struct.W_StructType),
("struct-constructor-procedure?", values_struct.W_StructConstructor),
("struct-predicate-procedure?", values_struct.W_StructPredicate),
("struct-type-property?", values_struct.W_StructProperty),
("struct-type-property-accessor-procedure?",
values_struct.W_StructPropertyAccessor),
("box?", values.W_Box),
("variable-reference?", values.W_VariableReference),
("thread-cell?", values.W_ThreadCell),
("thread-cell-values?", values.W_ThreadCellValues),
("semaphore?", values.W_Semaphore),
("semaphore-peek-evt?", values.W_SemaphorePeekEvt),
("path?", values.W_Path),
("bytes?", values.W_Bytes),
("pseudo-random-generator?", values.W_PseudoRandomGenerator),
("char?", values.W_Character),
("continuation?", values.W_Continuation),
("continuation-mark-set?", values.W_ContinuationMarkSet),
("continuation-mark-key?", values.W_ContinuationMarkKey),
("primitive?", values.W_Prim),
("keyword?", values.W_Keyword),
("weak-box?", values.W_WeakBox),
("ephemeron?", values.W_Ephemeron),
("placeholder?", values.W_Placeholder),
("hash-placeholder?", values.W_HashTablePlaceholder),
("module-path-index?", values.W_ModulePathIndex),
("resolved-module-path?", values.W_ResolvedModulePath),
("impersonator-property-accessor-procedure?",
imp.W_ImpPropertyAccessor),
("impersonator-property?", imp.W_ImpPropertyDescriptor),
("parameter?", values_parameter.W_BaseParameter),
("parameterization?", values_parameter.W_Parameterization),
("hash?", W_HashTable),
("cpointer?", W_CPointer),
("ctype?", W_CType),
("continuation-prompt-tag?", values.W_ContinuationPromptTag),
("logger?", values.W_Logger),
("log-receiver?", values.W_LogReciever),
("evt?", values.W_Evt),
("unquoted-printing-string?", values.W_UnquotedPrintingString),
("port?", values.W_Port),
("security-guard?", values.W_SecurityGuard),
# FIXME
("will-executor?", values.W_WillExecutor),
("bytes-converter?", values.W_Impossible),
("fsemaphore?", values.W_Impossible),
("thread-group?", values.W_Impossible),
("udp?", values.W_Impossible),
("extflonum?", values.W_ExtFlonum),
("custodian-box?", values.W_Impossible),
("custodian?", values.W_Impossible),
("future?", values.W_Impossible),
]:
make_pred(*args)
for args in [
("void?", values.w_void),
("false?", values.w_false),
("null?", values.w_null),
]:
make_pred_eq(*args)
@expose("hash-weak?", [values.W_Object], simple=True)
def hash_weah_huh(obj):
# FIXME
return values.w_false
@expose("hash-strong?", [values.W_Object], simple=True)
def hash_strong_huh(obj):
# FIXME: /pypy/rpython/rlib/rweakref.py
return values.W_Bool.make(isinstance(obj, W_HashTable))
@expose("hash-ephemeron?", [values.W_Object], simple=True)
def hash_strong_huh(obj):
# FIXME
return values.w_false
@expose("hash-equal?", [values.W_Object], simple=True)
def hash_eq(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
return values.W_Bool.make(isinstance(inner, W_EqualHashTable))
@expose("hash-eq?", [values.W_Object], simple=True)
def hash_eq(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
eq_mutable = isinstance(inner, W_EqMutableHashTable)
eq_immutable = isinstance(inner, W_EqImmutableHashTable)
return values.W_Bool.make(eq_mutable or eq_immutable)
@expose("hash-eqv?", [values.W_Object], simple=True)
def hash_eqv(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
eqv_mutable = isinstance(inner, W_EqvMutableHashTable)
eqv_immutable = isinstance(inner, W_EqvImmutableHashTable)
return values.W_Bool.make(eqv_mutable or eqv_immutable)
def struct_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return (w_in is not None) or (w_out is not None)
def struct_port_prop_huh(w_struct):
w_type = w_struct.struct_type()
in_property = out_property = None
for property in w_type.properties:
w_property, w_value = property
if w_property is values_struct.w_prop_input_port:
in_property = w_value
elif w_property is values_struct.w_prop_output_port:
out_property = w_value
return in_property, out_property
def struct_input_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return w_in is not None
def struct_output_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return w_out is not None
@expose("input-port?", [values.W_Object], simple=True)
def input_port_huh(a):
if isinstance(a, values.W_InputPort):
return values.w_true
elif isinstance(a, values_struct.W_Struct):
if struct_input_port_huh(a):
return values.w_true
return values.w_false
@expose("datum-intern-literal", [values.W_Object])
def datum_intern_literal(v):
return v
@expose("byte?", [values.W_Object])
def byte_huh(val):
if isinstance(val, values.W_Fixnum):
return values.W_Bool.make(0 <= val.value <= 255)
return values.w_false
@expose("regexp?", [values.W_Object])
def regexp_huh(r):
if isinstance(r, values_regex.W_Regexp) or isinstance(r, values_regex.W_PRegexp):
return values.w_true
return values.w_false
@expose("pregexp?", [values.W_Object])
def pregexp_huh(r):
if isinstance(r, values_regex.W_PRegexp):
return values.w_true
return values.w_false
@expose("byte-regexp?", [values.W_Object])
def byte_regexp_huh(r):
if isinstance(r, values_regex.W_ByteRegexp) or isinstance(r, values_regex.W_BytePRegexp):
return values.w_true
return values.w_false
@expose("byte-pregexp?", [values.W_Object])
def byte_pregexp_huh(r):
if isinstance(r, values_regex.W_BytePRegexp):
return values.w_true
return values.w_false
@expose("true-object?", [values.W_Object])
def true_object_huh(val):
if val is values.w_true:
return values.w_true
return values.w_false
@expose("procedure?", [values.W_Object])
def procedurep(n):
return values.W_Bool.make(n.iscallable())
@expose("syntax-original?", [values.W_Object], only_old=True)
def syntax_original(v):
return values.w_false
@expose("syntax-tainted?", [values.W_Object], only_old=True)
def syntax_tainted(v):
return values.w_false
@expose("syntax-source-module", [values.W_Object, default(values.W_Object, values.w_false)], only_old=True)
def syntax_source_module(stx, src):
# XXX Obviously not correct
return values.W_ResolvedModulePath(values.W_Symbol.make("fake symbol"))
@expose("srcloc->string", [values.W_Object])
def srcloc_to_string(obj):
return values.w_false
expose_val("null", values.w_null)
expose_val("true", values.w_true)
expose_val("false", values.w_false)
expose_val("break-enabled-key", values.break_enabled_key)
expose_val("exception-handler-key", values.exn_handler_key)
# FIXME: need stronger guards for all of these
for name in ["prop:evt",
"prop:impersonator-of",
"prop:method-arity-error"]:
expose_val(name, values_struct.W_StructProperty(
values.W_Symbol.make(name), values.w_false))
for name in ["exn:srclocs",
"custom-print-quotable"]:
prop = values_struct.W_StructProperty(values.W_Symbol.make(name), values.w_false)
expose_val("prop:"+name, prop)
expose_val(name+"?", values_struct.W_StructPropertyPredicate(prop))
expose_val(name+"-accessor", values_struct.W_StructPropertyAccessor(prop))
expose_val("prop:authentic", values_struct.w_prop_authentic)
expose_val("prop:sealed", values_struct.w_prop_sealed)
expose_val("prop:object-name", values_struct.w_prop_object_name)
expose_val("prop:procedure", values_struct.w_prop_procedure)
expose_val("prop:checked-procedure", values_struct.w_prop_checked_procedure)
expose_val("prop:arity-string", values_struct.w_prop_arity_string)
expose_val("prop:incomplete-arity", values_struct.w_prop_incomplete_arity)
expose_val("prop:custom-write", values_struct.w_prop_custom_write)
expose_val("prop:equal+hash", values_struct.w_prop_equal_hash)
expose_val("prop:chaperone-unsafe-undefined",
values_struct.w_prop_chaperone_unsafe_undefined)
expose_val("prop:set!-transformer", values_struct.w_prop_set_bang_transformer, only_old=True)
expose_val("prop:rename-transformer", values_struct.w_prop_rename_transformer, only_old=True)
expose_val("prop:expansion-contexts", values_struct.w_prop_expansion_contexts, only_old=True)
expose_val("prop:output-port", values_struct.w_prop_output_port)
expose_val("prop:input-port", values_struct.w_prop_input_port)
@continuation
def check_cont(proc, v, v1, v2, app, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
if val is not values.w_false:
return v.ref_with_extra_info(1, app, env, cont)
return proc.call([v, v1, v2], env, cont)
@continuation
def receive_first_field(proc, v, v1, v2, app, env, cont, _vals):
from pycket.interpreter import check_one_val
first_field = check_one_val(_vals)
return first_field.call([v1, v2], env,
check_cont(proc, v, v1, v2, app, env, cont))
@expose("checked-procedure-check-and-extract",
[values_struct.W_StructType, values.W_Object, procedure,
values.W_Object, values.W_Object], simple=False, extra_info=True)
@jit.unroll_safe
def do_checked_procedure_check_and_extract(type, v, proc, v1, v2, env, cont, calling_app):
from pycket.interpreter import check_one_val, return_value
if isinstance(v, values_struct.W_RootStruct):
struct_type = jit.promote(v.struct_type())
if type.has_subtype(struct_type):
offset = struct_type.get_offset(type)
assert offset != -1
return v.ref_with_extra_info(offset, calling_app, env,
receive_first_field(proc, v, v1, v2, calling_app, env, cont))
return proc.call([v, v1, v2], env, cont)
################################################################
# printing
@expose("system-library-subpath", [default(values.W_Object, values.w_false)])
def sys_lib_subpath(mode):
# Pycket is 64bit only a.t.m.
if w_system_sym == w_windows_sym:
return values.W_Path(r"win32\\x86_64")
elif w_system_sym == w_macosx_sym:
return values.W_Path("x86_64-macosx")
else:
# FIXME: pretend all unicies are linux for now
return values.W_Path("x86_64-linux")
@expose("primitive-closure?", [values.W_Object])
def prim_clos(v):
return values.w_false
################################################################
# built-in struct types
def define_struct(name, w_super=values.w_null, fields=[]):
immutables = range(len(fields))
symname = values.W_Symbol.make(name)
w_struct_type = values_struct.W_StructType.make_simple(
w_name=symname,
w_super_type=w_super,
init_field_count=len(fields),
auto_field_count=0,
immutables=immutables)
expose_val("struct:" + name, w_struct_type)
expose_val(name, w_struct_type.constructor)
# this is almost always also provided
expose_val("make-" + name, w_struct_type.constructor)
expose_val(name + "?", w_struct_type.predicate)
struct_acc = w_struct_type.accessor
for field, field_name in enumerate(fields):
w_name = values.W_Symbol.make(field_name)
acc = values_struct.W_StructFieldAccessor(struct_acc, field, w_name)
expose_val(name + "-" + field_name, acc)
return w_struct_type
exn = \
define_struct("exn", values.w_null, ["message", "continuation-marks"])
exn_fail = \
define_struct("exn:fail", exn)
exn_fail_contract = \
define_struct("exn:fail:contract", exn_fail)
exn_fail_contract_arity = \
define_struct("exn:fail:contract:arity", exn_fail)
exn_fail_contract_divide_by_zero = \
define_struct("exn:fail:contract:divide-by-zero", exn_fail)
exn_fail_contract_non_fixnum_result = \
define_struct("exn:fail:contract:non-fixnum-result", exn_fail)
exn_fail_contract_continuation = \
define_struct("exn:fail:contract:continuation", exn_fail)
exn_fail_contract_variable = \
define_struct("exn:fail:contract:variable", exn_fail, ["id"])
exn_fail_syntax = \
define_struct("exn:fail:syntax", exn_fail, ["exprs"])
exn_fail_syntax_unbound = \
define_struct("exn:fail:syntax:unbound", exn_fail_syntax)
exn_fail_syntax_missing_module = \
define_struct("exn:fail:syntax:missing-module", exn_fail_syntax, ["path"])
exn_fail_read = \
define_struct("exn:fail:read", exn_fail, ["srclocs"])
exn_fail_read_eof = \
define_struct("exn:fail:read:eof", exn_fail_read)
exn_fail_read_non_char = \
define_struct("exn:fail:read:non-char", exn_fail_read)
exn_fail_fs = \
define_struct("exn:fail:filesystem", exn_fail)
exn_fail_fs_exists = \
define_struct("exn:fail:filesystem:exists", exn_fail_fs)
exn_fail_fs_version = \
define_struct("exn:fail:filesystem:version", exn_fail_fs)
exn_fail_fs_errno = \
define_struct("exn:fail:filesystem:errno", exn_fail_fs, ["errno"])
exn_fail_fs_missing_module = \
define_struct("exn:fail:filesystem:missing-module", exn_fail_fs, ["path"])
exn_fail_network = \
define_struct("exn:fail:network", exn_fail)
exn_fail_network_errno = \
define_struct("exn:fail:network:errno", exn_fail_network, ["errno"])
exn_fail_out_of_memory = \
define_struct("exn:fail:out-of-memory", exn_fail)
exn_fail_unsupported = \
define_struct("exn:fail:unsupported", exn_fail)
exn_fail_user = \
define_struct("exn:fail:user", exn_fail)
exn_break = \
define_struct("exn:break", exn)
exn_break_hang_up = \
define_struct("exn:break:hang-up", exn_break)
exn_break_terminate = \
define_struct("exn:break:terminate", exn_break)
srcloc = define_struct("srcloc",
fields=["source", "line", "column", "position", "span"])
date_struct = define_struct("date", fields=["second",
"minute",
"hour",
"day",
"month",
"year",
"week-day",
"year-day",
"dst?"
"time-zone-offset"])
date_star_struct = define_struct("date*", date_struct,
fields=["nanosecond", "time-zone-name"])
arity_at_least = define_struct("arity-at-least", values.w_null, ["value"])
for args in [ ("char-symbolic?",),
("char-graphic?",),
("char-blank?",),
("char-iso-control?",),
("char-punctuation?",),
("char-upper-case?",),
("char-title-case?",),
("char-lower-case?",),
]:
make_dummy_char_pred(*args)
for args in [ ("subprocess?",),
("file-stream-port?",),
("terminal-port?",),
("byte-ready?",),
("char-ready?",),
("handle-evt?",),
("thread?",),
("thread-running?",),
("thread-dead?",),
("semaphore-try-wait?",),
("link-exists?",),
("chaperone-channel",),
("impersonate-channel",),
]:
define_nyi(*args)
@expose("unsafe-make-place-local", [values.W_Object])
def unsafe_make_place_local(v):
return values.W_MBox(v)
@expose("unsafe-place-local-ref", [values.W_MBox], simple=False)
def unsafe_make_place_local(p, env, cont):
return p.unbox(env, cont)
@expose("unsafe-place-local-set!", [values.W_MBox, values.W_Object], simple=False)
def unsafe_make_place_local(p, v, env, cont):
return p.set_box(v, env, cont)
@expose("set!-transformer?", [values.W_Object], only_old=True)
def set_bang_transformer(v):
if isinstance(v, values.W_AssignmentTransformer):
return values.w_true
elif isinstance(v, values_struct.W_RootStruct):
w_property = v.struct_type().read_property(
values_struct.w_prop_set_bang_transformer)
return values.W_Bool.make(w_property is not None)
else:
return values.w_false
@expose("object-name", [values.W_Object])
def object_name(v):
if isinstance(v, values.W_Prim):
return v.name
elif isinstance(v, values_regex.W_AnyRegexp) or isinstance(v, values.W_Port):
return v.obj_name()
return values_string.W_String.fromstr_utf8(v.tostring()) # XXX really?
@expose("find-main-config", [])
def find_main_config():
return values.w_false
@expose("version", [])
def version():
from pycket.env import w_version
version = w_version.get_version()
if version == '':
version = "old-pycket"
return values_string.W_String.fromascii("unknown version" if version is None else version)
@continuation
def sem_post_cont(sem, env, cont, vals):
sem.post()
from pycket.interpreter import return_multi_vals
return return_multi_vals(vals, env, cont)
@expose("call-with-semaphore", simple=False, extra_info=True)
def call_with_sem(args, env, cont, extra_call_info):
if len(args) < 2:
raise SchemeException("error call-with-semaphore")
sem = args[0]
f = args[1]
if len(args) == 2:
new_args = []
fail = None
else:
new_args = args[3:]
if args[2] is values.w_false:
fail = None
else:
fail = args[2]
assert isinstance(sem, values.W_Semaphore)
assert f.iscallable()
sem.wait()
return f.call_with_extra_info(new_args, env, sem_post_cont(sem, env, cont), extra_call_info)
c_thread = values.W_Thread()
@expose("current-thread", [])
def current_thread():
return c_thread
# FIXME : implementation
@expose("current-memory-use", [default(values.W_Object, values.w_false)])
def current_memory_use(mode):
# mode is : (or/c #f 'cumulative custodian?)
return values.W_Fixnum(1)
@expose("semaphore-post", [values.W_Semaphore])
def sem_post(s):
s.post()
@expose("semaphore-wait", [values.W_Semaphore])
def sem_wait(s):
s.wait()
@expose("procedure-rename", [procedure, values.W_Object])
def procedure_rename(p, n):
return p
@expose("procedure->method", [procedure])
def procedure_to_method(proc):
# TODO provide a real implementation
return proc
@jit.unroll_safe
def make_arity_list(arity, extra=None):
jit.promote(arity)
acc = values.w_null
if extra is not None:
acc = values.W_Cons.make(extra, acc)
for item in reversed(arity.arity_list):
i = values.W_Fixnum(item)
acc = values.W_Cons.make(i, acc)
return acc
@continuation
def proc_arity_cont(arity, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
if not arity.arity_list:
return return_value(val, env, cont)
result = make_arity_list(arity, val)
return return_value(result, env, cont)
def arity_to_value(arity, env, cont):
from pycket.interpreter import return_value
if arity.at_least != -1:
val = [values.W_Fixnum(arity.at_least)]
constructor = arity_at_least.constructor
return constructor.call(val, env, proc_arity_cont(arity, env, cont))
if len(arity.arity_list) == 1:
item = values.W_Fixnum(arity.arity_list[0])
return return_value(item, env, cont)
result = make_arity_list(arity)
return return_value(result, env, cont)
@expose("procedure-arity", [procedure], simple=False)
@jit.unroll_safe
def do_procedure_arity(proc, env, cont):
arity = proc.get_arity()
return arity_to_value(arity, env, cont)
@expose("procedure-arity-mask", [procedure], simple=True)
@jit.unroll_safe
def do_procedure_arity_mask(proc):
arity = proc.get_arity()
return arity.arity_bits()
@make_procedure("default-read-handler",[values.W_InputPort, default(values.W_Object, None)], simple=False)
def default_read_handler(ip, src, env, cont):
# default to the "read" and "read-syntax" defined in the expander linklet
if src is None:
return prim_env[values.W_Symbol.make("read")].call([ip], env, cont)
else:
return prim_env[values.W_Symbol.make("read-syntax")].call([ip, src], env, cont)
@continuation
def get_read_handler_cont(env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
ip = check_one_val(_vals)
assert isinstance(ip, values.W_InputPort)
if ip.get_read_handler():
return return_value(ip.get_read_handler(), env, cont)
else:
return return_value(default_read_handler, env, cont)
@expose("port-read-handler", [values.W_Object, default(values.W_Procedure, None)], simple=False)
def do_port_read_handler(ip, proc, env, cont):
from pycket.interpreter import return_value
if not isinstance(ip, values.W_InputPort):
assert isinstance(ip, values_struct.W_Struct)
st = ip.struct_type()
return st.accessor.call([ip, values.W_Fixnum(0)], env, get_read_handler_cont(env, cont))
if proc is None:
#get
if ip.get_read_handler():
return return_value(ip.get_read_handler(), env, cont)
else:
return return_value(default_read_handler, env, cont)
else:
#set
if proc is default_read_handler:
ip.set_read_handler(default_read_handler)
else:
ip.set_read_handler(proc)
return return_value(values.w_void, env, cont)
@expose("procedure-arity?", [values.W_Object])
@jit.unroll_safe
def do_is_procedure_arity(n):
if isinstance(n, values.W_Fixnum):
return values.W_Bool.make(n.value >= 0)
elif (isinstance(n, values_struct.W_RootStruct) and
n.struct_type() is arity_at_least):
return values.w_true
elif isinstance(n, values.W_List) and n.is_proper_list():
for item in values.from_list_iter(n):
if not (isinstance(item, values.W_Fixnum) or
(isinstance(item, values_struct.W_RootStruct) and
item.struct_type() is arity_at_least)):
return values.w_false
return values.w_true
return values.w_false
@expose("procedure-arity-includes?",
[procedure, values.W_Integer, default(values.W_Object, values.w_false)])
def procedure_arity_includes(proc, k, kw_ok):
if kw_ok is values.w_false and isinstance(proc, values_struct.W_RootStruct):
w_prop_val = proc.struct_type().read_property(values_struct.w_prop_incomplete_arity)
if w_prop_val is not None:
return values.w_false
if isinstance(k, values.W_Integer):
try:
k_val = k.toint()
except OverflowError:
pass
else:
arity = proc.get_arity(promote=True)
return values.W_Bool.make(arity.arity_includes(k_val))
return values.w_false
@expose("procedure-result-arity", [procedure], simple=False)
def procedure_result_arity(proc, env, cont):
from pycket.interpreter import return_multi_vals
arity = proc.get_result_arity()
if arity is None:
return return_multi_vals(values.w_false, env, cont)
return arity_to_value(arity, env, cont)
@expose("procedure-reduce-arity", [procedure, values.W_Object, default(values.W_Object, None)])
def procedure_reduce_arity(proc, arity, e):
# FIXME : this code is all wrong
#assert isinstance(arity, Arity)
#proc.set_arity(arity)
return proc
@expose("procedure-reduce-arity-mask", [procedure, values.W_Fixnum, default(values.W_Object, values.w_false)])
def procedure_reduce_arity_mask(proc, mask, name):
import math
return proc # FIXME: do this without mutation
v = mask.value
# turn the given mask into an arity
if v < 0:
# it's an at least value
ar_value = int(math.log(abs(v))/math.log(2))
# for some reason the 2 argument log doesn't exist
ar = Arity([], ar_value)
else:
ar_value = int(math.log(v)/math.log(2))
ar = Arity([ar_value], -1)
# FIXME: what if the mask represents a list? see math_arity_cont
# FIXME: mutation is wrong!
proc.set_arity(ar)
return proc
@expose("procedure-struct-type?", [values_struct.W_StructType])
def do_is_procedure_struct_type(struct_type):
return values.W_Bool.make(struct_type.prop_procedure is not None)
@expose("procedure-extract-target", [procedure], simple=False)
def do_procedure_extract_target(proc, env, cont):
from pycket.interpreter import return_value
if not isinstance(proc, values_struct.W_RootStruct):
return return_value(values.w_false, env, cont)
struct_type = proc.struct_type()
prop_procedure = struct_type.prop_procedure
if isinstance(prop_procedure, values.W_Fixnum):
idx = prop_procedure.value
return struct_type.accessor.access(proc, idx, env, cont)
return return_value(values.w_false, env, cont)
@expose("variable-reference-constant?",
[values.W_VariableReference], simple=False)
def varref_const(varref, env, cont):
from pycket.interpreter import return_value
return return_value(values.W_Bool.make(not(varref.varref.is_mutable(env))),
env, cont)
@expose("variable-reference->resolved-module-path",
[values.W_VariableReference], only_old=True)
def varref_rmp(varref):
return values.W_ResolvedModulePath(values.W_Path(varref.varref.path))
@expose("variable-reference->module-source", [values.W_VariableReference], only_old=True)
def varref_ms(varref):
# FIXME: not implemented
return values.W_Symbol.make("dummy_module")
@expose("variable-reference->module-path-index", [values.W_VariableReference], only_old=True)
def varref_to_mpi(ref):
from pycket.interpreter import ModuleVar
if not isinstance(ref, ModuleVar):
return values.w_false
return values.W_ModulePathIndex()
@expose("variable-reference->module-base-phase", [values.W_VariableReference], only_old=True)
def varref_to_mbp(ref):
# XXX Obviously not correct
return values.W_Fixnum.ZERO
@expose("resolved-module-path-name", [values.W_ResolvedModulePath], only_old=True)
def rmp_name(rmp):
return rmp.name
def is_module_path(v):
if isinstance(v, values.W_Symbol):
# FIXME: not always right
return True
if isinstance(v, values.W_Path):
return True
if isinstance(v, values_string.W_String):
return True
if isinstance(v, values.W_List):
vs = values.from_list(v)
for p in vs:
if not is_module_path(p):
return False
return True
# FIXME
return False
@expose("module-path?", [values.W_Object], only_old=True)
def module_pathp(v):
return values.W_Bool.make(is_module_path(v))
@expose("values")
def do_values(args_w):
return values.Values.make(args_w)
@expose("call-with-values", [procedure] * 2, simple=False, extra_info=True)
def call_with_values (producer, consumer, env, cont, extra_call_info):
# FIXME: check arity
return producer.call_with_extra_info([], env, call_cont(consumer, env, cont), extra_call_info)
@continuation
def time_apply_cont(initial, initial_user, initial_gc, env, cont, vals):
from pycket.interpreter import return_multi_vals
final = time.time()
final_gc = current_gc_time()
final_user = time.clock()
ms = values.W_Fixnum(int((final - initial) * 1000))
ms_gc = values.W_Fixnum(int((final_gc - initial_gc)))
ms_user = values.W_Fixnum(int((final_user - initial_user) * 1000))
vals_w = vals.get_all_values()
results = values.Values.make([values.to_list(vals_w),
ms_user, ms, ms_gc])
return return_multi_vals(results, env, cont)
@jit.dont_look_inside
def current_gc_time():
if objectmodel.we_are_translated():
memory = rgc.get_stats(rgc.TOTAL_GC_TIME)
else:
memory = 0
return memory
@expose("time-apply", [procedure, values.W_List], simple=False, extra_info=True)
def time_apply(a, args, env, cont, extra_call_info):
initial = time.time()
initial_user = time.clock()
initial_gc = current_gc_time()
return a.call_with_extra_info(values.from_list(args),
env, time_apply_cont(initial, initial_user, initial_gc, env, cont),
extra_call_info)
@expose("apply", simple=False, extra_info=True)
def apply(args, env, cont, extra_call_info):
if len(args) < 2:
raise SchemeException("apply expected at least 2 arguments, given %s" % len(args))
fn = args[0]
if not fn.iscallable():
raise SchemeException("apply expected a procedure, got something else")
lst = args[-1]
try:
fn_arity = fn.get_arity(promote=True)
if fn_arity is Arity.unknown or fn_arity.at_least == -1:
unroll_to = 3
elif fn_arity.arity_list:
unroll_to = fn_arity.arity_list[-1]
else:
unroll_to = fn_arity.at_least + 7
rest = values.from_list(lst, unroll_to=unroll_to, force=True)
except SchemeException:
raise SchemeException(
"apply expected a list as the last argument, got something else")
args_len = len(args) - 1
assert args_len >= 0
others = args[1:args_len]
new_args = others + rest
return fn.call_with_extra_info(new_args, env, cont, extra_call_info)
@expose("make-semaphore", [default(values.W_Fixnum, values.W_Fixnum.ZERO)])
def make_semaphore(n):
return values.W_Semaphore(n.value)
@expose("semaphore-peek-evt", [values.W_Semaphore])
def sem_peek_evt(s):
return values.W_SemaphorePeekEvt(s)
@expose("not", [values.W_Object])
def notp(a):
return values.W_Bool.make(a is values.w_false)
@jit.elidable
def elidable_length(lst):
n = 0
while isinstance(lst, values.W_Cons):
n += 1
lst = lst.cdr()
return n
@objectmodel.always_inline
def unroll_pred(lst, idx, unroll_to=0):
if not jit.we_are_jitted():
return False
return not jit.isvirtual(lst) and idx > unroll_to
@jit.unroll_safe
def virtual_length(lst, unroll_to=0):
n = 0
while isinstance(lst, values.W_Cons):
if unroll_pred(lst, n, unroll_to):
return elidable_length(lst) + n
n += 1
lst = lst.cdr()
return n
@expose("length", [values.W_List])
def length(a):
if not a.is_proper_list():
raise SchemeException("length: not given a proper list (either cyclic or not null terminated)")
return values.W_Fixnum(virtual_length(a, unroll_to=2))
@expose("list")
def do_list(args):
return values.to_list(args)
@expose("list*")
def do_liststar(args):
if not args:
raise SchemeException("list* expects at least one argument")
return values.to_improper(args[:-1], args[-1])
@expose("assq", [values.W_Object, values.W_List])
def assq(a, b):
while isinstance(b, values.W_Cons):
head, b = b.car(), b.cdr()
if not isinstance(head, values.W_Cons):
raise SchemeException("assq: found a non-pair element")
if eq_prims.eqp_logic(a, head.car()):
return head
if b is not values.w_null:
raise SchemeException("assq: reached a non-pair")
return values.w_false
@expose("memq", [values.W_Object, values.W_List])
def memq(w_o, w_l):
while isinstance(w_l, values.W_Cons):
if eq_prims.eqp_logic(w_o, w_l.car()):
return w_l
w_l = w_l.cdr()
return values.w_false
@expose("memv", [values.W_Object, values.W_List])
def memv(w_o, w_l):
while isinstance(w_l, values.W_Cons):
if w_o.eqv(w_l.car()):
return w_l
w_l = w_l.cdr()
return values.w_false
@expose("cons", [values.W_Object, values.W_Object])
def do_cons(a, b):
return values.W_Cons.make(a, b)
def make_list_eater(name):
"""
For generating car, cdr, caar, cadr, etc...
"""
spec = name[1:-1]
unrolled = unroll.unrolling_iterable(reversed(spec))
contract = "pair?"
for letter in spec[1::-1]:
if letter == 'a':
contract = "(cons/c %s any/c)" % contract
elif letter == 'd':
contract = "(cons/c any/c %s)" % contract
else:
assert False, "Bad list eater specification"
@expose(name, [values.W_Object])
def process_list(_lst):
lst = _lst
for letter in unrolled:
if not isinstance(lst, values.W_Cons):
raise SchemeException("%s: expected %s given %s" % (name, contract, _lst))
if letter == 'a':
lst = lst.car()
elif letter == 'd':
lst = lst.cdr()
else:
assert False, "Bad list eater specification"
return lst
process_list.__name__ = "do_" + name
return process_list
def list_eater_names(n):
names = []
for i in range(n):
names = [n + 'a' for n in names] + [n + 'd' for n in names] + ['a', 'd']
return ["c%sr" % name for name in names]
for name in list_eater_names(4):
make_list_eater(name)
@expose("mlist")
def do_mlist(args):
return values.to_mlist(args)
@expose("mcons", [values.W_Object, values.W_Object])
def do_mcons(a, b):
return values.W_MCons(a,b)
@expose("mcar", [values.W_MCons])
def do_mcar(a):
return a.car()
@expose("mcdr", [values.W_MCons])
def do_mcdr(a):
return a.cdr()
@expose("set-mcar!", [values.W_MCons, values.W_Object])
def do_set_mcar(a, b):
a.set_car(b)
@expose("set-mcdr!", [values.W_MCons, values.W_Object])
def do_set_mcdr(a, b):
a.set_cdr(b)
@expose("map", simple=False, arity=Arity.geq(2))
def do_map(args, env, cont):
# XXX this is currently not properly jitted
if len(args) < 2:
raise SchemeException("map expected at least two argument, got %s"%len(args))
fn, lists = args[0], args[1:]
if not fn.iscallable():
raise SchemeException("map expected a procedure, got something else")
# FIXME: more errorchecking
assert len(args) >= 0
return map_loop(fn, lists, env, cont)
@loop_label
def map_loop(f, lists, env, cont):
from pycket.interpreter import return_value
lists_new = []
args = []
for l in lists:
if not isinstance(l, values.W_Cons):
if l is not values.w_null:
raise SchemeException("map: not given a proper list")
return return_value(values.w_null, env, cont)
args.append(l.car())
lists_new.append(l.cdr())
return f.call(args, env, map_first_cont(f, lists_new, env, cont))
@continuation
def map_first_cont(f, lists, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
return map_loop(f, lists, env, map_cons_cont(f, lists, val, env, cont))
@continuation
def map_cons_cont(f, lists, val, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
rest = check_one_val(_vals)
return return_value(values.W_Cons.make(val, rest), env, cont)
@expose("for-each", simple=False, arity=Arity.geq(2))
@jit.unroll_safe
def for_each(args, env, cont):
from pycket.interpreter import return_value
if len(args) < 2:
raise SchemeException("for-each: expected at least a procedure and a list")
f = args[0]
if not f.iscallable():
raise SchemeException("for-each: expected a procedure, but got %s" % f)
ls = args[1:]
for l in ls:
if not l.is_proper_list():
raise SchemeException("for-each: expected a list, but got %s" % l)
return for_each_loop(f, ls, env, cont)
@loop_label
@jit.unroll_safe
def for_each_loop(func, args, env, cont):
from pycket.interpreter import return_value
nargs = jit.promote(len(args))
heads = [None] * nargs
tails = [None] * nargs
for i in range(nargs):
arg = args[i]
if arg is values.w_null:
for v in args:
if v is not values.w_null:
raise SchemeException("for-each: all lists must have same size")
return return_value(values.w_void, env, cont)
assert isinstance(arg, values.W_Cons)
heads[i] = arg.car()
tails[i] = arg.cdr()
return func.call(heads, env,
for_each_cont(func, tails, env, cont))
@continuation
def for_each_cont(func, tails, env, cont, _vals):
return for_each_loop(func, tails, env, cont)
@expose("andmap", simple=False, arity=Arity.geq(2))
def andmap(args, env, cont):
from pycket.interpreter import return_value
if len(args) < 2:
raise SchemeException("andmap: expected at least a procedure and a list")
f = args[0]
if not f.iscallable():
raise SchemeException("andmap: expected a procedure, but got %s"%f)
ls = args[1:]
for l in ls:
if not isinstance(l, values.W_List):
raise SchemeException("andmap: expected a list, but got %s"%l)
return return_value(values.w_void, env, andmap_cont(f, ls, env, cont))
@continuation
def andmap_cont(f, ls, env, cont, vals):
# XXX this is currently not properly jitted
from pycket.interpreter import return_value, check_one_val
val = check_one_val(vals)
if val is values.w_false:
return return_value(val, env, cont)
for l in ls:
if l is values.w_null:
return return_value(values.w_true, env, cont)
cars = [l.car() for l in ls]
cdrs = [l.cdr() for l in ls]
return f.call(cars, env, andmap_cont(f, cdrs, env, cont))
@expose("ormap", simple=False, arity=Arity.geq(2))
def ormap(args, env, cont):
from pycket.interpreter import return_value
if len(args) < 2:
raise SchemeException("ormap: expected at least a procedure and a list")
f = args[0]
if not f.iscallable():
raise SchemeException("ormap: expected a procedure, but got %s"%f)
ls = args[1:]
for l in ls:
if not isinstance(l, values.W_List):
raise SchemeException("ormap: expected a list, but got %s"%l)
return return_value(values.w_false, env, ormap_cont(f, ls, env, cont))
@continuation
def ormap_cont(f, ls, env, cont, vals):
# XXX this is currently not properly jitted
from pycket.interpreter import return_value, check_one_val
val = check_one_val(vals)
if val is not values.w_false:
return return_value(val, env, cont)
for l in ls:
if l is values.w_null:
return return_value(values.w_false, env, cont)
cars = [l.car() for l in ls]
cdrs = [l.cdr() for l in ls]
return f.call(cars, env, ormap_cont(f, cdrs, env, cont))
@expose("append", arity=Arity.geq(0))
@jit.look_inside_iff(
lambda l: jit.loop_unrolling_heuristic(l, len(l), values.UNROLLING_CUTOFF))
def append(lists):
if not lists:
return values.w_null
acc = lists[-1]
for i in range(len(lists) - 2, -1, -1):
curr = lists[i]
if not curr.is_proper_list():
raise SchemeException("append: expected proper list")
acc = append_two(curr, acc)
return acc
def append_two(l1, l2):
first = None
last = None
while isinstance(l1, values.W_Cons):
v = l1.clone()
if first is None:
first = v
else:
last._unsafe_set_cdr(v)
last = v
l1 = l1.cdr()
if last is None:
return l2
last._unsafe_set_cdr(l2)
return first
@expose("reverse", [values.W_List])
def reverse(w_l):
acc = values.w_null
while isinstance(w_l, values.W_Cons):
val, w_l = w_l.car(), w_l.cdr()
acc = values.W_Cons.make(val, acc)
if w_l is not values.w_null:
raise SchemeException("reverse: not given proper list")
return acc
@expose("void", arity=Arity.geq(0))
def do_void(args):
return values.w_void
@expose("make-ephemeron", [values.W_Object] * 2)
def make_ephemeron(key, val):
return values.W_Ephemeron(key, val)
@expose("ephemeron-value",
[values.W_Ephemeron, default(values.W_Object, values.w_false)])
def ephemeron_value(ephemeron, default):
v = ephemeron.get()
return v if v is not None else default
@expose("make-placeholder", [values.W_Object])
def make_placeholder(val):
return values.W_Placeholder(val)
@expose("placeholder-set!", [values.W_Placeholder, values.W_Object])
def placeholder_set(ph, datum):
ph.value = datum
return values.w_void
@expose("placeholder-get", [values.W_Placeholder])
def placeholder_get(ph):
return ph.value
@expose("make-hash-placeholder", [values.W_List])
def make_hash_placeholder(vals):
return values.W_HashTablePlaceholder([], [])
@expose("make-hasheq-placeholder", [values.W_List])
def make_hasheq_placeholder(vals):
return values.W_HashTablePlaceholder([], [])
@expose("make-hasheqv-placeholder", [values.W_List])
def make_hasheqv_placeholder(vals):
return values.W_HashTablePlaceholder([], [])
@expose("list?", [values.W_Object])
def listp(v):
return values.W_Bool.make(v.is_proper_list())
@expose("list-pair?", [values.W_Object])
def list_pair(v):
return values.W_Bool.make(isinstance(v, values.W_Cons) and v.is_proper_list())
def enter_list_ref_iff(lst, pos):
if jit.isconstant(lst) and jit.isconstant(pos):
return True
return jit.isconstant(pos) and pos <= 16
@jit.look_inside_iff(enter_list_ref_iff)
def list_ref_impl(lst, pos):
if pos < 0:
raise SchemeException("list-ref: negative index")
for i in range(pos):
lst = lst.cdr()
if not isinstance(lst, values.W_Cons):
raise SchemeException("list-ref: index out of range")
return lst.car()
@expose("list-ref", [values.W_Cons, values.W_Fixnum])
def list_ref(lst, pos):
return list_ref_impl(lst, pos.value)
@expose("unsafe-list-ref", [subclass_unsafe(values.W_Cons), values.W_Fixnum])
def unsafe_list_ref(lst, pos):
return list_ref_impl(lst, pos.value)
@expose("unsafe-list-tail", [subclass_unsafe(values.W_Object), values.W_Fixnum])
def unsafe_list_tail(lst, pos):
return list_tail_impl(lst, pos)
@expose("list-tail", [values.W_Object, values.W_Fixnum])
def list_tail(lst, pos):
return list_tail_impl(lst, pos)
def list_tail_impl(lst, pos):
start_pos = pos.value
while start_pos > 0:
if not isinstance(lst, values.W_Cons):
msg = "index too large for list" if lst is values.w_null else "index reaches a non-pair"
raise SchemeException("list-tail : %s\n -- lst : %s\n -- index : %s\n" % (msg, lst.tostring(), start_pos))
lst = lst.cdr()
start_pos -= 1
return lst
@expose("assoc", [values.W_Object, values.W_List, default(values.W_Object, values.w_false)])
def assoc(v, lst, is_equal):
if is_equal is not values.w_false:
raise SchemeException("assoc: using a custom equal? is not yet implemented")
while isinstance(lst, values.W_Cons):
c = lst.car()
if not isinstance(lst, values.W_Cons):
raise SchemeException("assoc: non-pair found in list: %s in %s" % (c.tostring(), lst.tostring()))
cc = c.car()
if v.equal(cc):
return c
lst = lst.cdr()
return values.w_false
@expose("current-seconds", [])
def current_seconds():
tick = int(time.time())
return values.W_Fixnum(tick)
@expose("current-inexact-milliseconds", [])
def curr_millis():
return values.W_Flonum(time.time() * 1000.0)
@expose("seconds->date", [values.W_Fixnum])
def seconds_to_date(s):
# TODO: Proper implementation
return values.w_false
def _error(args, is_user=False):
reason = ""
if len(args) == 1:
sym = args[0]
reason = "error: %s" % sym.tostring()
else:
first_arg = args[0]
if isinstance(first_arg, values_string.W_String):
from rpython.rlib.rstring import StringBuilder
msg = StringBuilder()
msg.append(first_arg.tostring())
v = args[1:]
for item in v:
msg.append(" %s" % item.tostring())
reason = msg.build()
else:
src = first_arg
form = args[1]
v = args[2:]
assert isinstance(src, values.W_Symbol)
assert isinstance(form, values_string.W_String)
reason = "%s: %s" % (
src.tostring(), input_output.format(form, v, "error"))
if is_user:
raise UserException(reason)
else:
raise SchemeException(reason)
@expose("error", arity=Arity.geq(1))
def error(args):
return _error(args, False)
@expose("raise-user-error", arity=Arity.geq(1))
def error(args):
return _error(args, True)
@expose("raise-arity-error", arity=Arity.geq(2))
def raise_arity_error(args):
return _error(args, False)
@expose("raise-result-arity-error", arity=Arity.geq(3))
def raise_result_arity_error(args):
return _error(args, False)
@expose("list->vector", [values.W_List])
def list2vector(l):
return values_vector.W_Vector.fromelements(values.from_list(l))
# FIXME: make this work with chaperones/impersonators
@expose("vector->list", [values.W_MVector], simple=False)
def vector2list(v, env, cont):
from pycket.interpreter import return_value
if isinstance(v, values_vector.W_Vector):
# Fast path for unproxied vectors
result = values.vector_to_improper(v, values.w_null)
return return_value(result, env, cont)
return vector_to_list_loop(v, v.length() - 1, values.w_null, env, cont)
@loop_label
def vector_to_list_loop(vector, idx, acc, env, cont):
from pycket.interpreter import return_value
if idx < 0:
return return_value(acc, env, cont)
return vector.vector_ref(idx, env,
vector_to_list_read_cont(vector, idx, acc, env, cont))
@continuation
def vector_to_list_read_cont(vector, idx, acc, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
acc = values.W_Cons.make(val, acc)
return vector_to_list_loop(vector, idx - 1, acc, env, cont)
# Unsafe pair ops
@expose("unsafe-car", [subclass_unsafe(values.W_Cons)])
def unsafe_car(p):
return p.car()
@expose("unsafe-mcar", [subclass_unsafe(values.W_MCons)])
def unsafe_mcar(p):
return p.car()
@expose("unsafe-cdr", [subclass_unsafe(values.W_Cons)])
def unsafe_cdr(p):
return p.cdr()
@expose("unsafe-mcdr", [subclass_unsafe(values.W_MCons)])
def unsafe_mcdr(p):
return p.cdr()
@continuation
def struct_port_loc_cont(input_huh, env, cont, _vals):
from pycket.interpreter import check_one_val, return_multi_vals
pr = check_one_val(_vals)
if not isinstance(pr, values.W_Port):
if input_huh:
# empty string input port is used for prop:input-port
pr = values.W_StringInputPort("")
else:
# a port that discards all data is used for prop:output-port
pr = values.W_StringOutputPort()
assert isinstance(pr, values.W_Port)
lin = pr.get_line()
col = pr.get_column()
pos = pr.get_position()
return return_multi_vals(values.Values.make([lin, col, pos]), env, cont)
@expose("port-next-location", [values.W_Object], simple=False)
def port_next_loc(p, env, cont):
from pycket.interpreter import return_multi_vals
lin = col = pos = values.w_false
if isinstance(p, values_struct.W_Struct):
i, o = struct_port_prop_huh(p)
if (i is None) and (o is None):
raise SchemeException("given struct doesn't have neither prop:input-port nor prop:output-port")
if i:
if isinstance(i, values.W_InputPort):
lin = i.get_line()
col = i.get_column()
pos = i.get_position()
elif isinstance(i, values.W_Fixnum):
port_index = i.value
return p.struct_type().accessor.call([p, values.W_Fixnum(port_index)], env, struct_port_loc_cont(True, env, cont))
else:
raise SchemeException("invalid value %s for prop:input-port of the given struct : %s" % (i, p.tostring()))
elif o:
if isinstance(o, values.W_OutputPort):
lin = o.get_line()
col = o.get_column()
pos = o.get_position()
elif isinstance(o, values.W_Fixnum):
port_index = o.value
return p.struct_type().accessor.call([p, values.W_Fixnum(port_index)], env, struct_port_loc_cont(False, env, cont))
else:
raise SchemeException("invalid value %s for prop:output-port of the given struct : %s" % (o, p.tostring()))
else:
assert isinstance(p, values.W_Port)
lin = p.get_line()
col = p.get_column()
pos = p.get_position()
return return_multi_vals(values.Values.make([lin, col, pos]), env, cont)
@expose("port-writes-special?", [values.W_Object])
def port_writes_special(v):
return values.w_false
@expose("port-writes-atomic?", [values.W_Object])
def port_writes_atomic(v):
return values.w_false
@expose("port-provides-progress-evts?", [values.W_Object])
def port_ppe(v):
return values.w_false
@expose("file-position*", [values.W_Object])
def file_pos_star(v):
return values.w_false
@expose("symbol-unreadable?", [values.W_Symbol])
def sym_unreadable(v):
if v.unreadable:
return values.w_true
return values.w_false
@expose("symbol-interned?", [values.W_Symbol])
def string_to_symbol(v):
return values.W_Bool.make(v.is_interned())
@expose("symbol<?", arity=Arity.geq(1))
def symbol_lt(args):
name = "symbol<?"
if len(args) < 2:
raise SchemeException(name + ": requires at least 2 arguments")
head = args[0]
if not isinstance(head, values.W_Symbol):
raise SchemeException(name + ": not given a string")
for i in range(1, len(args)):
t = args[i]
if not isinstance(t, values.W_Symbol):
raise SchemeException(name + ": not given a string")
# FIXME: shouldn't need to convert to W_String
# but this is much easier than recreating the logic
if string.symbol_to_string_impl(head).cmp(string.symbol_to_string_impl(t)) >= 0:
return values.w_false
head = t
return values.w_true
@expose("immutable?", [values.W_Object])
def immutable(v):
return values.W_Bool.make(v.immutable())
@expose("make-thread-cell",
[values.W_Object, default(values.W_Bool, values.w_false)])
def make_thread_cell(v, pres):
return values.W_ThreadCell(v, False if pres is values.w_false else True)
@expose("thread-cell-ref", [values.W_ThreadCell])
def thread_cell_ref(cell):
return cell.value
@expose("thread-cell-set!", [values.W_ThreadCell, values.W_Object])
def thread_cell_set(cell, v):
cell.value = v
return values.w_void
@expose("current-preserved-thread-cell-values",
[default(values.W_ThreadCellValues, None)])
def current_preserved_thread_cell_values(v):
# Generate a new thread-cell-values object
if v is None:
return values.W_ThreadCellValues()
# Otherwise, we restore the values
for cell, val in v.assoc.iteritems():
assert cell.preserved
cell.value = val
return values.w_void
@expose("place-enabled?")
def do_is_place_enabled(args):
return values.w_false
@expose("gensym", [default(values.W_Object, values.W_Symbol.make("g"))])
def gensym(init):
from pycket.interpreter import Gensym
if not isinstance(init, values.W_Symbol) and not isinstance(init, values_string.W_String):
raise SchemeException("gensym exptected a string or symbol but got : %s" % init.tostring())
gensym_key = init.tostring()
return Gensym.gensym(gensym_key)
@expose("keyword<?", [values.W_Keyword, values.W_Keyword])
def keyword_less_than(a_keyword, b_keyword):
return values.W_Bool.make(a_keyword.value < b_keyword.value)
initial_env_vars = values.W_EnvVarSet({}, True)
expose_val("current-environment-variables", values_parameter.W_Parameter(initial_env_vars))
@expose("environment-variables-ref", [values.W_EnvVarSet, values.W_Bytes])
def env_var_ref(set, name):
r = set.get(name.as_str())
if r is None:
return values.w_false
else:
return values.W_Bytes.from_string(r)
@expose("environment-variables-set!", [values.W_EnvVarSet, values.W_Bytes, values.W_Bytes, default(values.W_Object, None)])
def env_var_ref(set, name, val, fail):
return set.set(name.as_str(), val.as_str())
@expose("make-environment-variables")
def make_env_var(args):
return values.W_EnvVarSet({}, False)
@expose("environment-variables-names", [values.W_EnvVarSet])
def env_var_names(set):
names = set.get_names()
return values.to_list([values.W_Bytes.from_string(n) for n in names])
@expose("check-for-break", [])
def check_for_break():
return values.w_false
@expose("find-system-path", [values.W_Symbol], simple=True)
def find_sys_path(kind):
return racket_sys_paths.get_path(kind)
@expose("find-main-collects", [])
def find_main_collects():
return values.w_false
@expose("module-path-index-join",
[values.W_Object, values.W_Object, default(values.W_Object, None)], only_old=True)
def mpi_join(a, b, c):
return values.W_ModulePathIndex()
@expose("module-path-index-resolve",
[values.W_ModulePathIndex], only_old=True)
def mpi_resolve(a):
return values.W_ResolvedModulePath(values.W_Path("."))
# Loading
# FIXME: Proper semantics.
@expose("load", [values_string.W_String], simple=False, only_old=True)
def load(lib, env, cont):
from pycket.expand import ensure_json_ast_run
lib_name = lib.tostring()
json_ast = ensure_json_ast_run(lib_name)
if json_ast is None:
raise SchemeException(
"can't gernerate load-file for %s " % lib.tostring())
#ast = load_json_ast_rpython(json_ast)
raise NotImplementedError(
"would crash anyway when trying to interpret the Module")
#return ast, env, cont
expose_val("current-load-relative-directory", values_parameter.W_Parameter(values.w_false))
expose_val("current-write-relative-directory", values_parameter.W_Parameter(values.w_false))
initial_security_guard = values.W_SecurityGuard()
expose_val("current-security-guard", values_parameter.W_Parameter(initial_security_guard))
@expose("make-security-guard", [values.W_SecurityGuard, values.W_Procedure, values.W_Procedure, default(values.W_Procedure, values.w_false)])
def make_security_guard(parent, file, network, link):
return values.W_SecurityGuard()
@expose("unsafe-make-security-guard-at-root")
def unsafe_make_sec_guard(args):
return values.W_SecurityGuard()
@make_procedure("current-directory-guard", [values.W_Object], simple=False)
def current_directory_guard(path, env, cont):
from pycket.interpreter import return_value
# "cd"s at the os level
if not (isinstance(path, values_string.W_String) or isinstance(path, values.W_Path)):
raise SchemeException("current-directory: exptected a path-string? as argument 0, but got : %s" % path.tostring())
path_str = input_output.extract_path(path)
# if path is a complete-path?, set it
if path_str[0] == os.path.sep:
new_current_dir = path_str
else: # relative to the current one
current_dir = current_directory_param.get(cont)
current_path_str = input_output.extract_path(current_dir)
# let's hope that there's no symbolic links etc.
new_current_dir = os.path.normpath(os.path.sep.join([current_path_str, path_str]))
try:
os.chdir(new_current_dir)
except OSError:
raise SchemeException("path doesn't exist : %s" % path_str)
out_port = input_output.current_out_param.get(cont)
assert isinstance(out_port, values.W_OutputPort)
out_port.write("; now in %s\n" % new_current_dir)
return return_value(values.W_Path(new_current_dir), env, cont)
current_directory_param = values_parameter.W_Parameter(values.W_Path(os.getcwd()), current_directory_guard)
expose_val("current-directory", current_directory_param)
w_unix_sym = values.W_Symbol.make("unix")
w_windows_sym = values.W_Symbol.make("windows")
w_macosx_sym = values.W_Symbol.make("macosx")
_platform = sys.platform
def detect_platform():
if _platform == "darwin":
return w_macosx_sym
elif _platform in ['win32', 'cygwin']:
return w_windows_sym
else:
return w_unix_sym
w_system_sym = detect_platform()
w_os_sym = values.W_Symbol.make("os")
w_os_so_suffix = values.W_Symbol.make("so-suffix")
w_os_so_mode_sym = values.W_Symbol.make("so-mode")
w_fs_change_mode = values.W_Symbol.make("fs-change")
w_local_mode = values.W_Symbol.make("local")
w_unix_so_suffix = values.W_Bytes.from_string(".so")
w_word_sym = values.W_Symbol.make("word")
w_link_sym = values.W_Symbol.make("link")
w_vm_sym = values.W_Symbol.make("vm")
w_gc_sym = values.W_Symbol.make("gc")
w_machine_sym = values.W_Symbol.make("machine")
w_cross_sym = values.W_Symbol.make("cross")
w_fs_supported = values.W_Symbol.make("supported")
w_fs_scalable = values.W_Symbol.make("scalable")
w_fs_low_latency = values.W_Symbol.make("low-latency")
w_fs_file_level = values.W_Symbol.make("file-level")
w_target_machine_sym = values.W_Symbol.make("target-machine")
def system_type(w_what):
# os
if w_what is w_os_sym:
return w_system_sym
# word
if w_what is w_word_sym:
#return values.W_Fixnum(8*struct.calcsize("P"))
return values.W_Fixnum(64)
# vm
if w_what is w_vm_sym:
return values.W_Symbol.make("pycket")
# gc
if w_what is w_gc_sym:
return values.W_Symbol.make("3m") # ??
# link
#
# 'static (Unix)
# 'shared (Unix)
# 'dll (Windows)
# 'framework (Mac OS)
if w_what is w_link_sym:
return values.W_Symbol.make("static")
# machine
if w_what is w_machine_sym:
return values_string.W_String.make("further details about the current machine in a platform-specific format")
# so-suffix
if w_what is w_os_so_suffix:
return w_unix_so_suffix
# so-mode
if w_what is w_os_so_mode_sym:
return w_local_mode
# fs-change
if w_what is w_fs_change_mode:
from pycket.prims.vector import vector
w_f = values.w_false
# FIXME: Is there a way to get this info from sys or os?
if w_system_sym is w_unix_sym:
return vector([w_fs_supported, w_fs_scalable, w_f, w_fs_file_level])
else:
return vector([w_f, w_f, w_f, w_f])
# cross
if w_what is w_cross_sym:
return values.W_Symbol.make("infer")
# cross
if w_what is w_target_machine_sym:
return values.W_Symbol.make("pycket")
raise SchemeException("unexpected system-type symbol '%s" % w_what.utf8value)
expose("system-type", [default(values.W_Symbol, w_os_sym)])(system_type)
def system_path_convention_type():
if w_system_sym is w_windows_sym:
return w_windows_sym
else:
return w_unix_sym
expose("system-path-convention-type", [])(system_path_convention_type)
@expose("bytes->path", [values.W_Bytes, default(values.W_Symbol, system_path_convention_type())])
def bytes_to_path(bstr, typ):
# FIXME : ignores the type, won't work for windows
return values.W_Path(bstr.as_str())
major_gc_sym = values.W_Symbol.make("major")
minor_gc_sym = values.W_Symbol.make("minor")
incremental_gc_sym = values.W_Symbol.make("incremental")
@expose("collect-garbage", [default(values.W_Symbol, major_gc_sym)])
@jit.dont_look_inside
def do_collect_garbage(request):
from rpython.rlib import rgc
rgc.collect()
return values.w_void
@continuation
def vec2val_cont(vals, vec, n, s, l, env, cont, new_vals):
from pycket.interpreter import return_multi_vals, check_one_val
new = check_one_val(new_vals)
vals[n] = new
if s+n+1 == l:
return return_multi_vals(values.Values.make(vals), env, cont)
else:
return vec.vector_ref(s+n+1, env, vec2val_cont(vals, vec, n+1, s, l, env, cont))
@expose("vector->values", [values_vector.W_Vector,
default(values.W_Fixnum, values.W_Fixnum.ZERO),
default(values.W_Fixnum, None)],
simple=False)
def vector_to_values(v, start, end, env, cont):
from pycket.interpreter import return_multi_vals
l = end.value if end else v.length()
s = start.value
if s == l:
return return_multi_vals(values.Values.make([]), env, cont)
else:
vals = [None] * (l - s)
return v.vector_ref(s, env, vec2val_cont(vals, v, 0, s, l, env, cont))
class ReaderGraphBuilder(object):
def __init__(self):
self.state = {}
def reader_graph_loop_cons(self, v):
assert isinstance(v, values.W_Cons)
p = values.W_WrappedConsMaybe(values.w_unsafe_undefined, values.w_unsafe_undefined)
self.state[v] = p
car = self.reader_graph_loop(v.car())
cdr = self.reader_graph_loop(v.cdr())
p._car = car
p._cdr = cdr
# FIXME: should change this to say if it's a proper list now ...
return p
def reader_graph_loop_vector(self, v):
assert isinstance(v, values_vector.W_Vector)
len = v.length()
p = values_vector.W_Vector.fromelement(values.w_false, len)
self.state[v] = p
for i in range(len):
vi = v.ref(i)
p.set(i, self.reader_graph_loop(vi))
return p
def reader_graph_loop_struct(self, v):
assert isinstance(v, values_struct.W_Struct)
type = v.struct_type()
if not type.isprefab:
return v
size = v._get_size_list()
p = values_struct.W_Struct.make_n(size, type)
self.state[v] = p
for i in range(size):
val = self.reader_graph_loop(v._ref(i))
p._set_list(i, val)
return p
def reader_graph_loop_proxy(self, v):
assert v.is_proxy()
inner = self.reader_graph_loop(v.get_proxied())
p = v.replace_proxied(inner)
self.state[v] = p
return p
def reader_graph_loop_equal_hash(self, v):
from pycket.hash.equal import W_EqualHashTable
assert isinstance(v, W_EqualHashTable)
empty = v.make_empty()
self.state[v] = empty
for key, val in v.hash_items():
key = self.reader_graph_loop(key)
val = self.reader_graph_loop(val)
empty._set(key, val)
return empty
def reader_graph_loop(self, v):
assert v is not None
from pycket.hash.equal import W_EqualHashTable
if v in self.state:
return self.state[v]
if v.is_proxy():
return self.reader_graph_loop_proxy(v)
if isinstance(v, values.W_Cons):
return self.reader_graph_loop_cons(v)
if isinstance(v, values_vector.W_Vector):
return self.reader_graph_loop_vector(v)
if isinstance(v, values_struct.W_Struct):
return self.reader_graph_loop_struct(v)
if isinstance(v, W_EqualHashTable):
return self.reader_graph_loop_equal_hash(v)
if isinstance(v, values.W_Placeholder):
return self.reader_graph_loop(v.value)
# XXX FIXME: doesn't handle stuff
return v
@expose("make-reader-graph", [values.W_Object])
@jit.dont_look_inside
def make_reader_graph(v):
from rpython.rlib.nonconst import NonConstant
builder = ReaderGraphBuilder()
if NonConstant(False):
# XXX JIT seems be generating questionable code when the argument of
# make-reader-graph is a virtual cons cell. The car and cdr fields get
# set by the generated code after the call, causing reader_graph_loop to
# crash. I suspect the problem has to do with the translators effect analysis.
# Example:
# p29 = new_with_vtable(descr=<SizeDescr 24>)
# p31 = call_r(ConstClass(make_reader_graph), p29, descr=<Callr 8 r EF=5>)
# setfield_gc(p29, p15, descr=<FieldP pycket.values.W_WrappedCons.inst__car 8 pure>)
# setfield_gc(p29, ConstPtr(ptr32), descr=<FieldP pycket.values.W_WrappedCons.inst__cdr 16 pure>)
if isinstance(v, values.W_WrappedCons):
print v._car.tostring()
print v._cdr.tostring()
return builder.reader_graph_loop(v)
@expose("procedure-specialize", [procedure])
def procedure_specialize(proc):
from pycket.ast_visitor import copy_ast
# XXX This is the identity function simply for compatibility.
# Another option is to wrap closures in a W_PromotableClosure, which might
# get us a similar effect from the RPython JIT.
if not isinstance(proc, values.W_Closure1AsEnv):
return proc
code = copy_ast(proc.caselam)
vals = proc._get_full_list()
new_closure = values.W_Closure1AsEnv.make(vals, code, proc._prev)
return proc
@expose("processor-count", [])
def processor_count():
return values.W_Fixnum.ONE
cached_values = {}
@continuation
def thunk_cont(index, env, cont, _vals):
from pycket.interpreter import check_one_val, return_value
val = check_one_val(_vals)
cached_values[index] = val
return return_value(val, env, cont)
@expose("cache-configuration", [values.W_Fixnum, values.W_Object], simple=False)
def cache_configuration(index, proc, env, cont):
from pycket.interpreter import return_value
if index in cached_values:
return return_value(cached_values[index], env, cont)
return proc.call([], env, thunk_cont(index, env, cont))
@expose("make-readtable", [values.W_Object, values.W_Character, values.W_Symbol, procedure], only_old=True)
def make_readtable(parent, char, sym, proc):
print "making readtable", [parent, char, sym, proc]
return values.W_ReadTable(parent, char, sym, proc)
@expose("read/recursive", only_old=True)
def read_recursive(args):
return values.w_false
def make_stub_predicates(names):
for name in names:
message = "%s: not yet implemented" % name
@expose(name, [values.W_Object])
def predicate(obj):
if not objectmodel.we_are_translated():
print message
return values.w_false
predicate.__name__ = "stub_predicate(%s)" % name
def make_stub_predicates_no_linklet():
STUB_PREDICATES_NO_LINKLET = ["namespace-anchor?",
"rename-transformer?",
"readtable?",
"liberal-define-context?",
"compiled-expression?",
"special-comment?",
"internal-definition-context?",
"namespace?",
"compiled-module-expression?"]
make_stub_predicates(STUB_PREDICATES_NO_LINKLET)
if not w_global_config.is_expander_loaded():
make_stub_predicates_no_linklet()
@expose("unsafe-start-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("unsafe-start-breakable-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("unsafe-end-breakable-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("unsafe-end-atomic", [])
def unsafe_start_atomic():
return values.w_void
@expose("__dummy-function__", [])
def __dummy__():
from rpython.rlib.rbigint import ONERBIGINT
from rpython.rlib.runicode import str_decode_utf_8
ex = ONERBIGINT.touint()
print ex
@expose("primitive-table", [values.W_Object])
def primitive_table(v):
if v not in select_prim_table:
return values.w_false
if v in prim_table_cache:
return prim_table_cache[v]
expose_env = {}
for prim_name_sym in select_prim_table[v]:
if prim_name_sym in prim_env:
expose_env[prim_name_sym] = prim_env[prim_name_sym]
table = make_simple_immutable_table(W_EqImmutableHashTable,
expose_env.keys(),
expose_env.values())
prim_table_cache[v] = table
return table
@expose("unquoted-printing-string", [values_string.W_String])
def up_string(s):
return values.W_UnquotedPrintingString(s)
@expose("unquoted-printing-string-value", [values.W_UnquotedPrintingString])
def ups_val(v):
return v.string
# Any primitive on Pycket can use "w_global_config.is_debug_active()"
# to control debug outputs (or breakpoints in the interpreter) (with
# an even greater output control with the console_log with verbosity
# levels)
@expose("pycket:activate-debug", [])
def activate_debug():
w_global_config.activate_debug()
@expose("pycket:deactivate-debug", [])
def activate_debug():
w_global_config.deactivate_debug()
@expose("pycket:is-debug-active", [])
def debug_status():
return values.W_Bool.make(w_global_config.is_debug_active())
# Maybe we should do it with just one Racket level parameter
@expose("pycket:get-verbosity", [])
def get_verbosity():
lvl = w_global_config.get_config_val('verbose')
return values.W_Fixnum(lvl)
@expose("pycket:set-verbosity", [values.W_Fixnum])
def set_verbosity(v):
w_global_config.set_config_val('verbose', v.value)
@expose("pycket:activate-keyword", [values.W_Symbol])
def activate_debug_keyword(v):
w_global_config.activate_keyword(v.variable_name())
@expose("pycket:deactivate-keyword", [values.W_Symbol])
def deactivate_debug_keyword(v):
w_global_config.deactivate_keyword(v.variable_name())
@expose("pycket:report-undefined-prims", [])
def report_undefined_prims():
from pycket.prims.primitive_tables import report_undefined_prims
report_undefined_prims()
addr_sym = values.W_Symbol.make("mem-address")
@expose("pycket:print", [values.W_Object, default(values.W_Symbol, addr_sym)])
def pycket_print(o, sym):
from pycket.util import console_log
if sym is addr_sym:
console_log("PYCKET:PRINT : %s" % o, debug=True)
else:
console_log("PYCKET:PRINT : %s" % o.tostring(), debug=True)
@expose("pycket:eq?", [values.W_Object, values.W_Object])
def pycket_eq(o1, o2):
return values.W_Bool.make(o1 is o2)
expose_val("error-print-width", values_parameter.W_Parameter(values.W_Fixnum.make(256)))
@expose("banner", [])
def banner():
from pycket.env import w_version
version = w_version.get_version()
return values_string.W_String.make("Welcome to Pycket %s.\n"%version)
executable_yield_handler = values_parameter.W_Parameter(do_void.w_prim)
expose_val("executable-yield-handler", executable_yield_handler)
current_load_extension = values_parameter.W_Parameter(do_void.w_prim)
expose_val("current-load-extension", current_load_extension)
@expose("system-language+country", [])
def lang_country():
return values_string.W_String.make("en_US.UTF-8")
@expose("unsafe-add-post-custodian-shutdown", [values.W_Object])
def add_post(p):
return values.w_void
@expose("make-will-executor", [])
def make_will_exec():
return values.W_WillExecutor()
@expose("will-register", [values.W_WillExecutor, values.W_Object, values.W_Object])
def will_register(w, v, p):
return values.w_void
@expose("will-execute", [values.W_WillExecutor])
def will_exec(w):
return values.w_void
@expose("will-try-execute", [values.W_WillExecutor, default(values.W_Object, values.w_false)])
def will_exec(w, v):
return v
@expose("thread", [values.W_Object])
def thread(p):
return values.W_Thread()
@expose("thread/suspend-to-kill", [values.W_Object])
def thread_susp(p):
return values.W_Thread()
@expose("make-channel", [])
def make_channel():
return values.W_Channel()
@expose("primitive-lookup", [values.W_Symbol], simple=True)
def primitive_lookup(sym):
return prim_env.get(sym, values.w_false)
|
pycket/pycket
|
pycket/prims/general.py
|
Python
|
mit
| 75,231 | 0.004878 |
import sys
from resources.datatables import Options
from resources.datatables import StateStatus
def addPlanetSpawns(core, planet):
stcSvc = core.staticService
objSvc = core.objectService
#junkdealer
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5694), float(6.5), float(4182), float(0.707), float(-0.707))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5717), float(6.5), float(4159), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5086), float(6), float(4142), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5147), float(6.5), float(4158), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5114), float(6.5), float(4161), float(0.71), float(-0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5222), float(6), float(4217), float(0.71), float(-0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5127), float(6), float(4239), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5761), float(6.6), float(4234), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5475), float(6), float(4105), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-4999), float(6), float(4119), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5883), float(6), float(4214), float(0.71), float(0.71))
return
|
ProjectSWGCore/NGECore2
|
scripts/static_spawns/naboo/theed.py
|
Python
|
lgpl-3.0
| 1,499 | 0.02068 |
#!/usr/bin/env python
#
# Copyright 2016 timercrack
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def str_to_number(s):
try:
if not isinstance(s, str):
return s
return int(s)
except ValueError:
return float(s)
|
timercrack/pydatacoll
|
pydatacoll/utils/__init__.py
|
Python
|
apache-2.0
| 755 | 0 |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from collections import Counter
import logging
import itertools
import json
import time
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from dateutil.parser import parse as parse_date
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
CrossAccountAccessFilter, Filter, AgeFilter, ValueFilter,
ANNOTATION_KEY)
from c7n.filters.health import HealthEventFilter
from c7n.filters.related import RelatedResourceFilter
from c7n.manager import resources
from c7n.resources.kms import ResourceKmsKeyAlias
from c7n.resources.securityhub import PostFinding
from c7n.query import QueryResourceManager, TypeInfo
from c7n.tags import Tag, coalesce_copy_user_tags
from c7n.utils import (
camelResource,
chunks,
get_retry,
local_session,
select_keys,
set_annotation,
type_schema,
QueryParser,
)
from c7n.resources.ami import AMI
log = logging.getLogger('custodian.ebs')
@resources.register('ebs-snapshot')
class Snapshot(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ec2'
arn_type = 'snapshot'
enum_spec = (
'describe_snapshots', 'Snapshots', None)
id = 'SnapshotId'
id_prefix = 'snap-'
filter_name = 'SnapshotIds'
filter_type = 'list'
name = 'SnapshotId'
date = 'StartTime'
default_report_fields = (
'SnapshotId',
'VolumeId',
'tag:InstanceId',
'VolumeSize',
'StartTime',
'State',
)
def resources(self, query=None):
qfilters = SnapshotQueryParser.parse(self.data.get('query', []))
query = query or {}
if qfilters:
query['Filters'] = qfilters
if query.get('OwnerIds') is None:
query['OwnerIds'] = ['self']
if 'MaxResults' not in query:
query['MaxResults'] = 1000
return super(Snapshot, self).resources(query=query)
def get_resources(self, ids, cache=True, augment=True):
if cache:
resources = self._get_cached_resources(ids)
if resources is not None:
return resources
while ids:
try:
return self.source.get_resources(ids)
except ClientError as e:
bad_snap = ErrorHandler.extract_bad_snapshot(e)
if bad_snap:
ids.remove(bad_snap)
continue
raise
return []
class ErrorHandler:
@staticmethod
def remove_snapshot(rid, resource_set):
found = None
for r in resource_set:
if r['SnapshotId'] == rid:
found = r
break
if found:
resource_set.remove(found)
@staticmethod
def extract_bad_snapshot(e):
"""Handle various client side errors when describing snapshots"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_snap_id = None
if error == 'InvalidSnapshot.NotFound':
e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
log.warning("Snapshot not found %s" % e_snap_id)
elif error == 'InvalidSnapshotID.Malformed':
e_snap_id = msg[msg.find('"') + 1:msg.rfind('"')]
log.warning("Snapshot id malformed %s" % e_snap_id)
return e_snap_id
@staticmethod
def extract_bad_volume(e):
"""Handle various client side errors when describing volumes"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_vol_id = None
if error == 'InvalidVolume.NotFound':
e_vol_id = msg[msg.find("'") + 1:msg.rfind("'")]
log.warning("Volume not found %s" % e_vol_id)
elif error == 'InvalidVolumeID.Malformed':
e_vol_id = msg[msg.find('"') + 1:msg.rfind('"')]
log.warning("Volume id malformed %s" % e_vol_id)
return e_vol_id
class SnapshotQueryParser(QueryParser):
QuerySchema = {
'description': str,
'owner-alias': ('amazon', 'amazon-marketplace', 'microsoft'),
'owner-id': str,
'progress': str,
'snapshot-id': str,
'start-time': str,
'status': ('pending', 'completed', 'error'),
'tag': str,
'tag-key': str,
'volume-id': str,
'volume-size': str,
}
type_name = 'EBS'
@Snapshot.action_registry.register('tag')
class SnapshotTag(Tag):
permissions = ('ec2:CreateTags',)
def process_resource_set(self, client, resource_set, tags):
while resource_set:
try:
return super(SnapshotTag, self).process_resource_set(
client, resource_set, tags)
except ClientError as e:
bad_snap = ErrorHandler.extract_bad_snapshot(e)
if bad_snap:
ErrorHandler.remove_snapshot(bad_snap, resource_set)
continue
raise
@Snapshot.filter_registry.register('age')
class SnapshotAge(AgeFilter):
"""EBS Snapshot Age Filter
Filters an EBS snapshot based on the age of the snapshot (in days)
:example:
.. code-block:: yaml
policies:
- name: ebs-snapshots-week-old
resource: ebs-snapshot
filters:
- type: age
days: 7
op: ge
"""
schema = type_schema(
'age',
days={'type': 'number'},
op={'$ref': '#/definitions/filters_common/comparison_operators'})
date_attribute = 'StartTime'
def _filter_ami_snapshots(self, snapshots):
if not self.data.get('value', True):
return snapshots
# try using cache first to get a listing of all AMI snapshots and compares resources to the list
# This will populate the cache.
amis = self.manager.get_resource_manager('ami').resources()
ami_snaps = []
for i in amis:
for dev in i.get('BlockDeviceMappings'):
if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']:
ami_snaps.append(dev['Ebs']['SnapshotId'])
matches = []
for snap in snapshots:
if snap['SnapshotId'] not in ami_snaps:
matches.append(snap)
return matches
@Snapshot.filter_registry.register('cross-account')
class SnapshotCrossAccountAccess(CrossAccountAccessFilter):
permissions = ('ec2:DescribeSnapshotAttribute',)
def process(self, resources, event=None):
self.accounts = self.get_accounts()
results = []
client = local_session(self.manager.session_factory).client('ec2')
with self.executor_factory(max_workers=3) as w:
futures = []
for resource_set in chunks(resources, 50):
futures.append(w.submit(
self.process_resource_set, client, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception checking cross account access \n %s" % (
f.exception()))
continue
results.extend(f.result())
return results
def process_resource_set(self, client, resource_set):
results = []
for r in resource_set:
attrs = self.manager.retry(
client.describe_snapshot_attribute,
SnapshotId=r['SnapshotId'],
Attribute='createVolumePermission')['CreateVolumePermissions']
shared_accounts = {
g.get('Group') or g.get('UserId') for g in attrs}
delta_accounts = shared_accounts.difference(self.accounts)
if delta_accounts:
r['c7n:CrossAccountViolations'] = list(delta_accounts)
results.append(r)
return results
@Snapshot.filter_registry.register('unused')
class SnapshotUnusedFilter(Filter):
"""Filters snapshots based on usage
true: snapshot is not used by launch-template, launch-config, or ami.
false: snapshot is being used by launch-template, launch-config, or ami.
:example:
.. code-block:: yaml
policies:
- name: snapshot-unused
resource: ebs-snapshot
filters:
- type: unused
value: true
"""
schema = type_schema('unused', value={'type': 'boolean'})
def get_permissions(self):
return list(itertools.chain(*[
self.manager.get_resource_manager(m).get_permissions()
for m in ('asg', 'launch-config', 'ami')]))
def _pull_asg_snapshots(self):
asgs = self.manager.get_resource_manager('asg').resources()
snap_ids = set()
lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)
lcfg_mgr = self.manager.get_resource_manager('launch-config')
if lcfgs:
for lc in lcfg_mgr.resources():
for b in lc.get('BlockDeviceMappings'):
if 'Ebs' in b and 'SnapshotId' in b['Ebs']:
snap_ids.add(b['Ebs']['SnapshotId'])
tmpl_mgr = self.manager.get_resource_manager('launch-template-version')
for tversion in tmpl_mgr.get_resources(
list(tmpl_mgr.get_asg_templates(asgs).keys())):
for bd in tversion['LaunchTemplateData'].get('BlockDeviceMappings', ()):
if 'Ebs' in bd and 'SnapshotId' in bd['Ebs']:
snap_ids.add(bd['Ebs']['SnapshotId'])
return snap_ids
def _pull_ami_snapshots(self):
amis = self.manager.get_resource_manager('ami').resources()
ami_snaps = set()
for i in amis:
for dev in i.get('BlockDeviceMappings'):
if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']:
ami_snaps.add(dev['Ebs']['SnapshotId'])
return ami_snaps
def process(self, resources, event=None):
snaps = self._pull_asg_snapshots().union(self._pull_ami_snapshots())
if self.data.get('value', True):
return [r for r in resources if r['SnapshotId'] not in snaps]
return [r for r in resources if r['SnapshotId'] in snaps]
@Snapshot.filter_registry.register('skip-ami-snapshots')
class SnapshotSkipAmiSnapshots(Filter):
"""
Filter to remove snapshots of AMIs from results
This filter is 'true' by default.
:example:
implicit with no parameters, 'true' by default
.. code-block:: yaml
policies:
- name: delete-ebs-stale-snapshots
resource: ebs-snapshot
filters:
- type: age
days: 28
op: ge
- skip-ami-snapshots
:example:
explicit with parameter
.. code-block:: yaml
policies:
- name: delete-snapshots
resource: ebs-snapshot
filters:
- type: age
days: 28
op: ge
- type: skip-ami-snapshots
value: false
"""
schema = type_schema('skip-ami-snapshots', value={'type': 'boolean'})
def get_permissions(self):
return AMI(self.manager.ctx, {}).get_permissions()
def process(self, snapshots, event=None):
resources = _filter_ami_snapshots(self, snapshots)
return resources
@Snapshot.filter_registry.register('volume')
class SnapshotVolumeFilter(RelatedResourceFilter):
"""
Filter EBS snapshots by their volume attributes.
.. code-block:: yaml
policies:
- name: snapshot-with-no-volume
description: Find any snapshots that do not have a corresponding volume.
resource: aws.ebs-snapshot
filters:
- type: volume
key: VolumeId
value: absent
- name: find-snapshots-from-volume
resource: aws.ebs-snapshot
filters:
- type: volume
key: VolumeId
value: vol-foobarbaz
"""
RelatedResource = 'c7n.resources.ebs.EBS'
RelatedIdsExpression = 'VolumeId'
AnnotationKey = 'Volume'
schema = type_schema(
'volume', rinherit=ValueFilter.schema)
@Snapshot.action_registry.register('delete')
class SnapshotDelete(BaseAction):
"""Deletes EBS snapshots
:example:
.. code-block:: yaml
policies:
- name: delete-stale-snapshots
resource: ebs-snapshot
filters:
- type: age
days: 28
op: ge
actions:
- delete
"""
schema = type_schema(
'delete', **{'skip-ami-snapshots': {'type': 'boolean'}})
permissions = ('ec2:DeleteSnapshot',)
def process(self, snapshots):
self.image_snapshots = set()
# Be careful re image snapshots, we do this by default
# to keep things safe by default, albeit we'd get an error
# if we did try to delete something associated to an image.
pre = len(snapshots)
snapshots = list(filter(None, _filter_ami_snapshots(self, snapshots)))
post = len(snapshots)
log.info("Deleting %d snapshots, auto-filtered %d ami-snapshots",
post, pre - post)
client = local_session(self.manager.session_factory).client('ec2')
with self.executor_factory(max_workers=2) as w:
futures = []
for snapshot_set in chunks(reversed(snapshots), size=50):
futures.append(
w.submit(self.process_snapshot_set, client, snapshot_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting snapshot set \n %s" % (
f.exception()))
return snapshots
def process_snapshot_set(self, client, snapshots_set):
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
for s in snapshots_set:
if s['SnapshotId'] in self.image_snapshots:
continue
try:
retry(client.delete_snapshot,
SnapshotId=s['SnapshotId'],
DryRun=self.manager.config.dryrun)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidSnapshot.NotFound":
continue
raise
@Snapshot.action_registry.register('copy')
class CopySnapshot(BaseAction):
"""Copy a snapshot across regions
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html
:example:
.. code-block:: yaml
policies:
- name: copy-snapshot-east-west
resource: ebs-snapshot
filters:
- type: age
days: 7
op: le
actions:
- type: copy
target_region: us-west-2
target_key: target_kms_key
encrypted: true
"""
schema = type_schema(
'copy',
target_region={'type': 'string'},
target_key={'type': 'string'},
encrypted={'type': 'boolean'},
)
permissions = (
'ec2:CreateTags', 'ec2:CopySnapshot', 'ec2:DescribeSnapshots')
def validate(self):
if self.data.get('encrypted', True):
key = self.data.get('target_key')
if not key:
raise PolicyValidationError(
"Encrypted snapshot copy requires kms key on %s" % (
self.manager.data,))
return self
def process(self, resources):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_resource_set, chunks(resources, 20)))
def process_resource_set(self, resource_set):
client = self.manager.session_factory(
region=self.data['target_region']).client('ec2')
cross_region = self.data['target_region'] != self.manager.config.region
params = {}
params['Encrypted'] = self.data.get('encrypted', True)
if params['Encrypted']:
params['KmsKeyId'] = self.data['target_key']
for snapshot_set in chunks(resource_set, 5):
for r in snapshot_set:
snapshot_id = client.copy_snapshot(
SourceRegion=self.manager.config.region,
SourceSnapshotId=r['SnapshotId'],
Description=r.get('Description', ''),
**params)['SnapshotId']
if r.get('Tags'):
client.create_tags(
Resources=[snapshot_id], Tags=r['Tags'])
r['c7n:CopiedSnapshot'] = snapshot_id
if not cross_region or len(snapshot_set) < 5:
continue
copy_ids = [r['c7n:CopiedSnapshot'] for r in snapshot_set]
self.log.debug(
"Waiting on cross-region snapshot copy %s", ",".join(copy_ids))
waiter = client.get_waiter('snapshot_completed')
waiter.config.delay = 60
waiter.config.max_attempts = 60
waiter.wait(SnapshotIds=copy_ids)
self.log.debug(
"Cross region copy complete %s", ",".join(copy_ids))
@Snapshot.action_registry.register('set-permissions')
class SetPermissions(BaseAction):
"""Action to set permissions for creating volumes from a snapshot
Use the 'add' and 'remove' parameters to control which accounts to
add or remove respectively. The default is to remove any create
volume permissions granted to other AWS accounts.
Combining this action with the 'cross-account' filter allows you
greater control over which accounts will be removed, e.g. using a
whitelist:
:example:
.. code-block:: yaml
policies:
- name: ebs-dont-share-cross-account
resource: ebs-snapshot
filters:
- type: cross-account
whitelist:
- '112233445566'
actions:
- type: set-permissions
remove: matched
"""
schema = type_schema(
'set-permissions',
remove={
'oneOf': [
{'enum': ['matched']},
{'type': 'array', 'items': {
'type': 'string', 'minLength': 12, 'maxLength': 12}},
]},
add={
'type': 'array', 'items': {
'type': 'string', 'minLength': 12, 'maxLength': 12}},
)
permissions = ('ec2:ModifySnapshotAttribute',)
def validate(self):
if self.data.get('remove') == 'matched':
found = False
for f in self.manager.iter_filters():
if isinstance(f, SnapshotCrossAccountAccess):
found = True
break
if not found:
raise PolicyValidationError(
"policy:%s filter:%s with matched requires cross-account filter" % (
self.manager.ctx.policy.name, self.type))
def process(self, snapshots):
client = local_session(self.manager.session_factory).client('ec2')
for i in snapshots:
self.process_image(client, i)
def process_image(self, client, snapshot):
add_accounts = self.data.get('add', [])
remove_accounts = self.data.get('remove', [])
if not add_accounts and not remove_accounts:
return client.reset_snapshot_attribute(
SnapshotId=snapshot['SnapshotId'], Attribute="createVolumePermission")
if remove_accounts == 'matched':
remove_accounts = snapshot.get(
'c7n:' + SnapshotCrossAccountAccess.annotation_key)
remove = []
remove.extend([{'UserId': a} for a in remove_accounts if a != 'all'])
if 'all' in remove_accounts:
remove.append({'Group': 'all'})
remove_accounts.remove('all')
add = [{'UserId': a} for a in add_accounts]
if remove:
client.modify_snapshot_attribute(
SnapshotId=snapshot['SnapshotId'],
CreateVolumePermission={'Remove': remove},
OperationType='remove')
if add:
client.modify_snapshot_attribute(
SnapshotId=snapshot['SnapshotId'],
CreateVolumePermission={'Add': add},
OperationType='add')
@resources.register('ebs')
class EBS(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ec2'
arn_type = 'volume'
enum_spec = ('describe_volumes', 'Volumes', None)
name = id = 'VolumeId'
id_prefix = 'vol-'
filter_name = 'VolumeIds'
filter_type = 'list'
date = 'createTime'
dimension = 'VolumeId'
metrics_namespace = 'AWS/EBS'
cfn_type = config_type = "AWS::EC2::Volume"
default_report_fields = (
'VolumeId',
'Attachments[0].InstanceId',
'Size',
'VolumeType',
'KmsKeyId'
)
def get_resources(self, ids, cache=True, augment=True):
if cache:
resources = self._get_cached_resources(ids)
if resources is not None:
return resources
while ids:
try:
return self.source.get_resources(ids)
except ClientError as e:
bad_vol = ErrorHandler.extract_bad_volume(e)
if bad_vol:
ids.remove(bad_vol)
continue
raise
return []
@EBS.action_registry.register('post-finding')
class EBSPostFinding(PostFinding):
resource_type = 'AwsEc2Volume'
def format_resource(self, r):
envelope, payload = self.format_envelope(r)
details = select_keys(
r, ['KmsKeyId', 'Size', 'SnapshotId', 'Status', 'CreateTime', 'Encrypted'])
details['CreateTime'] = details['CreateTime'].isoformat()
self.filter_empty(details)
for attach in r.get('Attachments', ()):
details.setdefault('Attachments', []).append(
self.filter_empty({
'AttachTime': attach['AttachTime'].isoformat(),
'InstanceId': attach.get('InstanceId'),
'DeleteOnTermination': attach['DeleteOnTermination'],
'Status': attach['State']}))
payload.update(details)
return envelope
@EBS.action_registry.register('detach')
class VolumeDetach(BaseAction):
"""
Detach an EBS volume from an Instance.
If 'Force' Param is True, then we'll do a forceful detach
of the Volume. The default value for 'Force' is False.
:example:
.. code-block:: yaml
policies:
- name: detach-ebs-volumes
resource: ebs
filters:
- VolumeId : volumeid
actions:
- detach
"""
schema = type_schema('detach', force={'type': 'boolean'})
permissions = ('ec2:DetachVolume',)
def process(self, volumes, event=None):
client = local_session(self.manager.session_factory).client('ec2')
for vol in volumes:
for attachment in vol.get('Attachments', []):
client.detach_volume(InstanceId=attachment['InstanceId'],
VolumeId=attachment['VolumeId'],
Force=self.data.get('force', False))
@EBS.filter_registry.register('instance')
class AttachedInstanceFilter(ValueFilter):
"""Filter volumes based on filtering on their attached instance
:example:
.. code-block:: yaml
policies:
- name: instance-ebs-volumes
resource: ebs
filters:
- type: instance
key: tag:Name
value: OldManBySea
"""
schema = type_schema('instance', rinherit=ValueFilter.schema)
schema_alias = False
def get_permissions(self):
return self.manager.get_resource_manager('ec2').get_permissions()
def process(self, resources, event=None):
original_count = len(resources)
resources = [r for r in resources if r.get('Attachments')]
self.log.debug('Filtered from %d volumes to %d attached volumes' % (
original_count, len(resources)))
self.instance_map = self.get_instance_mapping(resources)
return list(filter(self, resources))
def __call__(self, r):
instance = self.instance_map[r['Attachments'][0]['InstanceId']]
if self.match(instance):
r['Instance'] = instance
set_annotation(r, ANNOTATION_KEY, "instance-%s" % self.k)
return True
def get_instance_mapping(self, resources):
instance_ids = [r['Attachments'][0]['InstanceId'] for r in resources]
instances = self.manager.get_resource_manager(
'ec2').get_resources(instance_ids)
self.log.debug("Queried %d instances for %d volumes" % (
len(instances), len(resources)))
return {i['InstanceId']: i for i in instances}
@EBS.filter_registry.register('kms-alias')
class KmsKeyAlias(ResourceKmsKeyAlias):
def process(self, resources, event=None):
return self.get_matching_aliases(resources)
@EBS.filter_registry.register('fault-tolerant')
class FaultTolerantSnapshots(Filter):
"""
This filter will return any EBS volume that does/does not have a
snapshot within the last 7 days. 'Fault-Tolerance' in this instance
means that, in the event of a failure, the volume can be restored
from a snapshot with (reasonable) data loss
.. code-block:: yaml
policies:
- name: ebs-volume-tolerance
resource: ebs
filters:
- type: fault-tolerant
tolerant: True
"""
schema = type_schema('fault-tolerant', tolerant={'type': 'boolean'})
check_id = 'H7IgTzjTYb'
permissions = ('support:RefreshTrustedAdvisorCheck',
'support:DescribeTrustedAdvisorCheckResult')
def pull_check_results(self):
result = set()
client = local_session(self.manager.session_factory).client('support')
client.refresh_trusted_advisor_check(checkId=self.check_id)
results = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language='en')['result']
for r in results['flaggedResources']:
result.update([r['metadata'][1]])
return result
def process(self, resources, event=None):
flagged = self.pull_check_results()
if self.data.get('tolerant', True):
return [r for r in resources if r['VolumeId'] not in flagged]
return [r for r in resources if r['VolumeId'] in flagged]
@EBS.filter_registry.register('health-event')
class HealthFilter(HealthEventFilter):
schema_alias = False
schema = type_schema(
'health-event',
types={'type': 'array', 'items': {
'type': 'string',
'enum': ['AWS_EBS_DEGRADED_EBS_VOLUME_PERFORMANCE',
'AWS_EBS_VOLUME_LOST']}},
statuses={'type': 'array', 'items': {
'type': 'string',
'enum': ['open', 'upcoming', 'closed']
}})
permissions = HealthEventFilter.permissions + (
'config:GetResourceConfigHistory',)
def process(self, resources, event=None):
if 'AWS_EBS_VOLUME_LOST' not in self.data['types']:
return super(HealthFilter, self).process(resources, event)
if not resources:
return resources
client = local_session(self.manager.session_factory).client(
'health', region_name='us-east-1')
f = self.get_filter_parameters()
resource_map = {}
paginator = client.get_paginator('describe_events')
events = list(itertools.chain(
*[p['events']for p in paginator.paginate(filter=f)]))
entities = self.process_event(client, events)
event_map = {e['arn']: e for e in events}
config = local_session(self.manager.session_factory).client('config')
for e in entities:
rid = e['entityValue']
if not resource_map.get(rid):
resource_map[rid] = self.load_resource(config, rid)
resource_map[rid].setdefault(
'c7n:HealthEvent', []).append(event_map[e['eventArn']])
return list(resource_map.values())
def load_resource(self, config, rid):
resources_histories = config.get_resource_config_history(
resourceType='AWS::EC2::Volume',
resourceId=rid,
limit=2)['configurationItems']
for r in resources_histories:
if r['configurationItemStatus'] != u'ResourceDeleted':
return camelResource(json.loads(r['configuration']))
return {"VolumeId": rid}
@EBS.action_registry.register('copy-instance-tags')
class CopyInstanceTags(BaseAction):
"""Copy instance tags to its attached volume.
Useful for cost allocation to ebs volumes and tracking usage
info for volumes.
Mostly useful for volumes not set to delete on termination, which
are otherwise candidates for garbage collection, copying the
instance tags gives us more semantic information to determine if
their useful, as well letting us know the last time the volume
was actually used.
:example:
.. code-block:: yaml
policies:
- name: ebs-copy-instance-tags
resource: ebs
filters:
- type: value
key: "Attachments[0].Device"
value: not-null
actions:
- type: copy-instance-tags
tags:
- Name
"""
schema = type_schema(
'copy-instance-tags',
tags={'type': 'array', 'items': {'type': 'string'}})
def get_permissions(self):
perms = self.manager.get_resource_manager('ec2').get_permissions()
perms.append('ec2:CreateTags')
return perms
def process(self, volumes):
vol_count = len(volumes)
volumes = [v for v in volumes if v['Attachments']]
if len(volumes) != vol_count:
self.log.warning(
"ebs copy tags action implicitly filtered from %d to %d",
vol_count, len(volumes))
self.initialize(volumes)
client = local_session(self.manager.session_factory).client('ec2')
with self.executor_factory(max_workers=10) as w:
futures = []
for instance_set in chunks(sorted(
self.instance_map.keys(), reverse=True), size=100):
futures.append(
w.submit(self.process_instance_set, client, instance_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception copying instance tags \n %s" % (
f.exception()))
def initialize(self, volumes):
instance_vol_map = {}
for v in volumes:
instance_vol_map.setdefault(
v['Attachments'][0]['InstanceId'], []).append(v)
instance_map = {
i['InstanceId']: i for i in
self.manager.get_resource_manager('ec2').get_resources(
list(instance_vol_map.keys()))}
self.instance_vol_map = instance_vol_map
self.instance_map = instance_map
def process_instance_set(self, client, instance_ids):
for i in instance_ids:
try:
self.process_instance_volumes(
client,
self.instance_map[i],
self.instance_vol_map[i])
except Exception as e:
self.log.exception(
"Error copy instance:%s tags to volumes: %s \n %s",
i, ",".join([v['VolumeId'] for v in self.instance_vol_map[i]]),
e)
def process_instance_volumes(self, client, instance, volumes):
for v in volumes:
copy_tags = self.get_volume_tags(v, instance, v['Attachments'][0])
if not copy_tags:
continue
# Can't add more tags than the resource supports could try
# to delete extant ones inline, else trim-tags action.
if len(copy_tags) > 40:
log.warning(
"action:%s volume:%s instance:%s too many tags to copy" % (
self.__class__.__name__.lower(),
v['VolumeId'], instance['InstanceId']))
continue
try:
self.manager.retry(
client.create_tags,
Resources=[v['VolumeId']],
Tags=copy_tags,
DryRun=self.manager.config.dryrun)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidVolume.NotFound":
continue
raise
def get_volume_tags(self, volume, instance, attachment):
only_tags = self.data.get('tags', []) # specify which tags to copy
copy_tags = []
extant_tags = dict([
(t['Key'], t['Value']) for t in volume.get('Tags', [])])
for t in instance.get('Tags', ()):
if only_tags and not t['Key'] in only_tags:
continue
if t['Key'] in extant_tags and t['Value'] == extant_tags[t['Key']]:
continue
if t['Key'].startswith('aws:'):
continue
copy_tags.append(t)
# Don't add attachment tags if we're already current
if 'LastAttachInstance' in extant_tags \
and extant_tags['LastAttachInstance'] == attachment['InstanceId']:
return copy_tags
copy_tags.append(
{'Key': 'LastAttachTime',
'Value': attachment['AttachTime'].isoformat()})
copy_tags.append(
{'Key': 'LastAttachInstance', 'Value': attachment['InstanceId']})
return copy_tags
@EBS.action_registry.register('encrypt-instance-volumes')
class EncryptInstanceVolumes(BaseAction):
"""Encrypt extant volumes attached to an instance
- Requires instance restart
- Not suitable for autoscale groups.
Multistep process:
- Stop instance (if running)
- For each volume
- Create snapshot
- Wait on snapshot creation
- Copy Snapshot to create encrypted snapshot
- Wait on snapshot creation
- Create encrypted volume from snapshot
- Wait on volume creation
- Delete transient snapshots
- Detach Unencrypted Volume
- Attach Encrypted Volume
- Set DeleteOnTermination instance attribute equal to source volume
- For each volume
- Delete unencrypted volume
- Start Instance (if originally running)
- For each newly encrypted volume
- Delete transient tags
:example:
.. code-block:: yaml
policies:
- name: encrypt-unencrypted-ebs
resource: ebs
filters:
- Encrypted: false
actions:
- type: encrypt-instance-volumes
key: alias/encrypted
"""
schema = type_schema(
'encrypt-instance-volumes',
required=['key'],
key={'type': 'string'},
delay={'type': 'number'},
verbose={'type': 'boolean'})
permissions = (
'ec2:CopySnapshot',
'ec2:CreateSnapshot',
'ec2:CreateVolume',
'ec2:DescribeInstances',
'ec2:DescribeSnapshots',
'ec2:DescribeVolumes',
'ec2:StopInstances',
'ec2:StartInstances',
'ec2:ModifyInstanceAttribute',
'ec2:DeleteTags')
def validate(self):
self.verbose = self.data.get('verbose', False)
return self
def process(self, volumes):
original_count = len(volumes)
volumes = [v for v in volumes
if not v['Encrypted'] or not v['Attachments']]
log.debug(
"EncryptVolumes filtered from %d to %d "
" unencrypted attached volumes" % (
original_count, len(volumes)))
# Group volumes by instance id
instance_vol_map = {}
for v in volumes:
instance_id = v['Attachments'][0]['InstanceId']
instance_vol_map.setdefault(instance_id, []).append(v)
# Query instances to find current instance state
self.instance_map = {
i['InstanceId']: i for i in
self.manager.get_resource_manager('ec2').get_resources(
list(instance_vol_map.keys()), cache=False)}
client = local_session(self.manager.session_factory).client('ec2')
with self.executor_factory(max_workers=3) as w:
futures = {}
for instance_id, vol_set in instance_vol_map.items():
futures[w.submit(
self.process_volume, client,
instance_id, vol_set)] = instance_id
for f in as_completed(futures):
if f.exception():
instance_id = futures[f]
log.error(
"Exception processing instance:%s volset: %s \n %s" % (
instance_id, instance_vol_map[instance_id],
f.exception()))
def process_volume(self, client, instance_id, vol_set):
"""Encrypt attached unencrypted ebs volumes
vol_set corresponds to all the unencrypted volumes on a given instance.
"""
key_id = self.get_encryption_key()
if self.verbose:
self.log.debug("Using encryption key: %s" % key_id)
# Only stop and start the instance if it was running.
instance_running = self.stop_instance(client, instance_id)
if instance_running is None:
return
# Create all the volumes before patching the instance.
paired = []
for v in vol_set:
vol_id = self.create_encrypted_volume(client, v, key_id, instance_id)
paired.append((v, vol_id))
# Next detach and reattach
for v, vol_id in paired:
client.detach_volume(
InstanceId=instance_id, VolumeId=v['VolumeId'])
# 5/8/2016 The detach isn't immediately consistent
time.sleep(self.data.get('delay', 15))
client.attach_volume(
InstanceId=instance_id, VolumeId=vol_id,
Device=v['Attachments'][0]['Device'])
# Set DeleteOnTermination attribute the same as source volume
if v['Attachments'][0]['DeleteOnTermination']:
client.modify_instance_attribute(
InstanceId=instance_id,
BlockDeviceMappings=[
{
'DeviceName': v['Attachments'][0]['Device'],
'Ebs': {
'VolumeId': vol_id,
'DeleteOnTermination': True
}
}
]
)
if instance_running:
client.start_instances(InstanceIds=[instance_id])
if self.verbose:
self.log.debug(
"Deleting unencrypted volumes for: %s" % instance_id)
for v in vol_set:
client.delete_volume(VolumeId=v['VolumeId'])
# Clean-up transient tags on newly created encrypted volume.
for v, vol_id in paired:
client.delete_tags(
Resources=[vol_id],
Tags=[
{'Key': 'maid-crypt-remediation'},
{'Key': 'maid-origin-volume'},
{'Key': 'maid-instance-device'}
]
)
def stop_instance(self, client, instance_id):
instance_state = self.instance_map[instance_id]['State']['Name']
if instance_state in ('shutting-down', 'terminated'):
self.log.debug('Skipping terminating instance: %s' % instance_id)
return
elif instance_state in ('running',):
client.stop_instances(InstanceIds=[instance_id])
self.wait_on_resource(client, instance_id=instance_id)
return True
return False
def create_encrypted_volume(self, ec2, v, key_id, instance_id):
# Create a current snapshot
results = ec2.create_snapshot(
VolumeId=v['VolumeId'],
Description="maid transient snapshot for encryption",)
transient_snapshots = [results['SnapshotId']]
ec2.create_tags(
Resources=[results['SnapshotId']],
Tags=[
{'Key': 'maid-crypto-remediation', 'Value': 'true'}])
self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
# Create encrypted snapshot from current
results = ec2.copy_snapshot(
SourceSnapshotId=results['SnapshotId'],
SourceRegion=v['AvailabilityZone'][:-1],
Description='maid transient snapshot for encryption',
Encrypted=True,
KmsKeyId=key_id)
transient_snapshots.append(results['SnapshotId'])
ec2.create_tags(
Resources=[results['SnapshotId']],
Tags=[
{'Key': 'maid-crypto-remediation', 'Value': 'true'}
])
self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
# Create encrypted volume, also tag so we can recover
results = ec2.create_volume(
Size=v['Size'],
VolumeType=v['VolumeType'],
SnapshotId=results['SnapshotId'],
AvailabilityZone=v['AvailabilityZone'],
Encrypted=True)
ec2.create_tags(
Resources=[results['VolumeId']],
Tags=[
{'Key': 'maid-crypt-remediation', 'Value': instance_id},
{'Key': 'maid-origin-volume', 'Value': v['VolumeId']},
{'Key': 'maid-instance-device',
'Value': v['Attachments'][0]['Device']}])
# Wait on encrypted volume creation
self.wait_on_resource(ec2, volume_id=results['VolumeId'])
# Delete transient snapshots
for sid in transient_snapshots:
ec2.delete_snapshot(SnapshotId=sid)
return results['VolumeId']
def get_encryption_key(self):
kms = local_session(self.manager.session_factory).client('kms')
key_alias = self.data.get('key')
result = kms.describe_key(KeyId=key_alias)
key_id = result['KeyMetadata']['KeyId']
return key_id
def wait_on_resource(self, *args, **kw):
# Sigh this is dirty, but failure in the middle of our workflow
# due to overly long resource creation is complex to unwind,
# with multi-volume instances. Wait up to three times (actual
# wait time is a per resource type configuration.
# Note we wait for all resource creation before attempting to
# patch an instance, so even on resource creation failure, the
# instance is not modified
try:
return self._wait_on_resource(*args, **kw)
except Exception:
try:
return self._wait_on_resource(*args, **kw)
except Exception:
return self._wait_on_resource(*args, **kw)
def _wait_on_resource(
self, client, snapshot_id=None, volume_id=None, instance_id=None):
# boto client waiters poll every 15 seconds up to a max 600s (5m)
if snapshot_id:
if self.verbose:
self.log.debug(
"Waiting on snapshot completion %s" % snapshot_id)
waiter = client.get_waiter('snapshot_completed')
waiter.wait(SnapshotIds=[snapshot_id])
if self.verbose:
self.log.debug("Snapshot: %s completed" % snapshot_id)
elif volume_id:
if self.verbose:
self.log.debug("Waiting on volume creation %s" % volume_id)
waiter = client.get_waiter('volume_available')
waiter.wait(VolumeIds=[volume_id])
if self.verbose:
self.log.debug("Volume: %s created" % volume_id)
elif instance_id:
if self.verbose:
self.log.debug("Waiting on instance stop")
waiter = client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=[instance_id])
if self.verbose:
self.log.debug("Instance: %s stopped" % instance_id)
@EBS.action_registry.register('snapshot')
class CreateSnapshot(BaseAction):
"""Snapshot an EBS volume.
Tags may be optionally added to the snapshot during creation.
- `copy-volume-tags` copies all the tags from the specified
volume to the corresponding snapshot.
- `copy-tags` copies the listed tags from each volume
to the snapshot. This is mutually exclusive with
`copy-volume-tags`.
- `tags` allows new tags to be added to each snapshot. If
no tags are specified, then the tag `custodian_snapshot`
is added.
The default behavior is `copy-volume-tags: true`.
:example:
.. code-block:: yaml
policies:
- name: snapshot-volumes
resource: ebs
filters:
- Attachments: []
- State: available
actions:
- type: snapshot
copy-tags:
- Name
tags:
custodian_snapshot: True
"""
schema = type_schema(
'snapshot',
**{'copy-tags': {'type': 'array', 'items': {'type': 'string'}},
'copy-volume-tags': {'type': 'boolean'},
'tags': {'type': 'object'}})
permissions = ('ec2:CreateSnapshot', 'ec2:CreateTags',)
def validate(self):
if self.data.get('copy-tags') and 'copy-volume-tags' in self.data:
raise PolicyValidationError(
"Can specify copy-tags or copy-volume-tags, not both")
def process(self, volumes):
client = local_session(self.manager.session_factory).client('ec2')
retry = get_retry(['Throttled'], max_attempts=5)
for vol in volumes:
vol_id = vol['VolumeId']
tags = [{
'ResourceType': 'snapshot',
'Tags': self.get_snapshot_tags(vol)
}]
retry(self.process_volume, client=client, volume=vol_id, tags=tags)
def process_volume(self, client, volume, tags):
try:
client.create_snapshot(VolumeId=volume, TagSpecifications=tags)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidVolume.NotFound':
return
raise
def get_snapshot_tags(self, resource):
user_tags = self.data.get('tags', {}) or {'custodian_snapshot': ''}
copy_tags = self.data.get('copy-tags', []) or self.data.get('copy-volume-tags', True)
return coalesce_copy_user_tags(resource, copy_tags, user_tags)
@EBS.action_registry.register('delete')
class Delete(BaseAction):
"""Delete an ebs volume.
If the force boolean is true, we will detach an attached volume
from an instance. Note this cannot be done for running instance
root volumes.
:example:
.. code-block:: yaml
policies:
- name: delete-unattached-volumes
resource: ebs
filters:
- Attachments: []
- State: available
actions:
- delete
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = (
'ec2:DetachVolume', 'ec2:DeleteVolume', 'ec2:DescribeVolumes')
def process(self, volumes):
client = local_session(self.manager.session_factory).client('ec2')
with self.executor_factory(max_workers=3) as w:
futures = {}
for v in volumes:
futures[
w.submit(self.process_volume, client, v)] = v
for f in as_completed(futures):
v = futures[f]
if f.exception():
self.log.error(
"Error processing volume:%s error:%s",
v['VolumeId'], f.exception())
def process_volume(self, client, volume):
try:
if self.data.get('force') and len(volume['Attachments']):
client.detach_volume(VolumeId=volume['VolumeId'], Force=True)
waiter = client.get_waiter('volume_available')
waiter.wait(VolumeIds=[volume['VolumeId']])
self.manager.retry(
client.delete_volume, VolumeId=volume['VolumeId'])
except ClientError as e:
if e.response['Error']['Code'] == "InvalidVolume.NotFound":
return
raise
@EBS.filter_registry.register('modifyable')
class ModifyableVolume(Filter):
"""Check if an ebs volume is modifyable online.
Considerations:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/limitations.html
Consideration Summary
- only current instance types are supported (one exception m3.medium)
Current Generation Instances (2017-2)
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#current-gen-instances
- older magnetic volume types are not supported
- shrinking volumes is not supported
- must wait at least 6hrs between modifications to the same volume.
- volumes must have been attached after nov 1st, 2016.
See :ref:`modify action <aws.ebs.actions.modify>` for examples.
"""
schema = type_schema('modifyable')
older_generation = {
'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge', 'cc2.8xlarge',
'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge',
'hi1.4xlarge', 'hs1.8xlarge', 'cg1.4xlarge', 't1.micro',
# two legs good, not all current gen work either.
'm3.large', 'm3.xlarge', 'm3.2xlarge'
}
permissions = ("ec2:DescribeInstances",)
def process(self, resources, event=None):
results = []
filtered = []
attached = []
stats = Counter()
marker_date = parse_date('2016-11-01T00:00:00+00:00')
# Filter volumes
for r in resources:
# unsupported type
if r['VolumeType'] == 'standard':
stats['vol-type'] += 1
filtered.append(r['VolumeId'])
continue
# unattached are easy
if not r.get('Attachments'):
results.append(r)
continue
# check for attachment date older then supported date
if r['Attachments'][0]['AttachTime'] < marker_date:
stats['attach-time'] += 1
filtered.append(r['VolumeId'])
continue
attached.append(r)
# Filter volumes attached to unsupported instance types
ec2 = self.manager.get_resource_manager('ec2')
instance_map = {}
for v in attached:
instance_map.setdefault(
v['Attachments'][0]['InstanceId'], []).append(v)
instances = ec2.get_resources(list(instance_map.keys()))
for i in instances:
if i['InstanceType'] in self.older_generation:
stats['instance-type'] += len(instance_map[i['InstanceId']])
filtered.extend([v['VolumeId'] for v in instance_map.pop(i['InstanceId'])])
else:
results.extend(instance_map.pop(i['InstanceId']))
# Filter volumes that are currently under modification
client = local_session(self.manager.session_factory).client('ec2')
modifying = set()
# Re 197 - Max number of filters is 200, and we have to use
# three additional attribute filters.
for vol_set in chunks(list(results), 197):
vol_ids = [v['VolumeId'] for v in vol_set]
mutating = client.describe_volumes_modifications(
Filters=[
{'Name': 'volume-id',
'Values': vol_ids},
{'Name': 'modification-state',
'Values': ['modifying', 'optimizing', 'failed']}])
for vm in mutating.get('VolumesModifications', ()):
stats['vol-mutation'] += 1
filtered.append(vm['VolumeId'])
modifying.add(vm['VolumeId'])
self.log.debug(
"filtered %d of %d volumes due to %s",
len(filtered), len(resources), sorted(stats.items()))
return [r for r in results if r['VolumeId'] not in modifying]
@EBS.action_registry.register('modify')
class ModifyVolume(BaseAction):
"""Modify an ebs volume online.
**Note this action requires use of modifyable filter**
Intro Blog & Use Cases:
https://aws.amazon.com/blogs/aws/amazon-ebs-update-new-elastic-volumes-change-everything/
Docs:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modify-volume.html
Considerations:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/limitations.html
:example:
Find under utilized provisioned iops volumes older than a week
and change their type.
.. code-block:: yaml
policies:
- name: ebs-remove-piops
resource: ebs
filters:
- type: value
key: CreateDate
value_type: age
value: 7
op: greater-than
- VolumeType: io1
- type: metrics
name: VolumeConsumedReadWriteOps
statistics: Maximum
value: 100
op: less-than
days: 7
- modifyable
actions:
- type: modify
volume-type: gp2
`iops-percent` and `size-percent` can be used to modify
respectively iops on io1 volumes and volume size.
When converting to io1, `iops-percent` is used to set the iops
allocation for the new volume against the extant value for the old
volume.
:example:
Double storage and quadruple iops for all io1 volumes.
.. code-block:: yaml
policies:
- name: ebs-upsize-piops
resource: ebs
filters:
- VolumeType: io1
- modifyable
actions:
- type: modify
size-percent: 200
iops-percent: 400
**Note** resizing down aka shrinking requires OS and FS support
and potentially additional preparation, else data-loss may occur.
To prevent accidents, shrinking must be explicitly enabled by also
setting `shrink: true` on the action.
"""
schema = type_schema(
'modify',
**{'volume-type': {'enum': ['io1', 'gp2', 'gp3', 'st1', 'sc1']},
'shrink': False,
'size-percent': {'type': 'number'},
'iops-percent': {'type': 'number'}})
# assumptions as its the closest i can find.
permissions = ("ec2:ModifyVolumeAttribute",)
def validate(self):
if 'modifyable' not in self.manager.data.get('filters', ()):
raise PolicyValidationError(
"modify action requires modifyable filter in policy")
if self.data.get('size-percent', 100) < 100 and not self.data.get('shrink', False):
raise PolicyValidationError((
"shrinking volumes requires os/fs support "
"or data-loss may ensue, use `shrink: true` to override"))
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for resource_set in chunks(resources, 50):
self.process_resource_set(client, resource_set)
def process_resource_set(self, client, resource_set):
vtype = self.data.get('volume-type')
psize = self.data.get('size-percent')
piops = self.data.get('iops-percent')
for r in resource_set:
params = {'VolumeId': r['VolumeId']}
if piops and ('io1' in (vtype, r['VolumeType'])):
# default here if we're changing to io1
params['Iops'] = max(int(r.get('Iops', 10) * piops / 100.0), 100)
if psize:
params['Size'] = max(int(r['Size'] * psize / 100.0), 1)
if vtype:
params['VolumeType'] = vtype
self.manager.retry(client.modify_volume, **params)
|
thisisshi/cloud-custodian
|
c7n/resources/ebs.py
|
Python
|
apache-2.0
| 57,596 | 0.00026 |
from django_comments.forms import CommentForm
from bogofilter.models import BogofilterComment
import time
class BogofilterCommentForm(CommentForm):
def get_comment_model(self):
return BogofilterComment
|
stefantalpalaru/django-bogofilter
|
bogofilter/forms.py
|
Python
|
bsd-3-clause
| 216 | 0.009259 |
import subprocess
import pynotify
import time
def notify_with_subprocess(title, message):
subprocess.Popen(['notify-send', title, message])
return
def notify_with_pynotify(title, message):
pynotify.init("Test")
notice = pynotify.Notification(title, message)
notice.show()
return
def update_with_pynotify():
pynotify.init("app_name")
n = pynotify.Notification("", "message A", icon='some_icon')
n.set_urgency(pynotify.URGENCY_CRITICAL)
n.set_timeout(10)
n.show()
n.update("","message B")
n.show()
def callback_function(notification=None, action=None, data=None):
print "It worked!"
pynotify.init("app_name")
n = pynotify.Notification("Title", "body")
n.set_urgency(pynotify.URGENCY_NORMAL)
n.set_timeout(100)
n.show()
#n.add_action("clicked","Button text", callback_function, None)
#n.update("Notification", "Update for you")
#n.show()
#update_with_pynotify()
|
cloud-engineering/xfc-email-notifier
|
snippets/snippet_notfication.py
|
Python
|
mit
| 939 | 0.015974 |
#!/usr/bin/env python
# Creates and saves a JSON file to update the D3.js graphs
import MySQLdb
import MySQLdb.cursors
import json
import Reference as r
import logging
def CreateSentimentIndex(NegativeWords, PositiveWords, TotalWords):
''' Creates a sentiment value for the word counts'''
if TotalWords != 0:
Sentiment = ((PositiveWords - NegativeWords)/float(TotalWords))
return Sentiment
def CreateJsonData(QueryResults):
''' Creates a list of dictionaries containing the dates and sentiment indexes'''
Output = []
for Row in QueryResults:
RowDate = Row['DateTime'].strftime('%Y-%m-%d %H:%M:%S')
RowSentiment = CreateSentimentIndex(Row['Negative'], Row['Positive'], Row['TotalWords'])
Output.append({"date" : RowDate, "index" : RowSentiment})
return Output
def OutputJsonFile(InputDictionary):
'''Saves a dictionary to an output file in a JSON format'''
JsonOutput = json.dumps(InputDictionary)
OutputFileName = 'json/twittermetrics_sentiment.js'
FileOutput = open(OutputFileName,'w')
print >> FileOutput, JsonOutput
return True
def CreateJsonFile():
'''Extracts data from the database and saves a JSON file to the server'''
FN_NAME = "CreateJsonFile"
dbDict = MySQLdb.connect(
host=r.DB_HOST,
user=r.DB_USER,
passwd=r.DB_PASSWORD,
db=r.DB_NAME,
cursorclass=MySQLdb.cursors.DictCursor
)
curDict = dbDict.cursor()
Query = "SELECT " + r.KR_FIELD_TOTALWORDS + ", " + r.KR_FIELD_POSITIVE + ", " + r.KR_FIELD_NEGATIVE + ", " + r.KR_FIELD_DATETIME + " FROM " + r.DB_TABLE_KEYWORDSRESULTS + ";"
logging.debug(FN_NAME, Query)
curDict.execute(Query)
QueryResults = curDict.fetchall()
Output = CreateJsonData(QueryResults)
ProcessResult = OutputJsonFile(Output)
logging.info('%s - JSON file created and saved to server with result %s', FN_NAME, ProcessResult)
dbDict.close
return ProcessResult
|
AdamDynamic/TwitterMetrics
|
CreateJson.py
|
Python
|
gpl-2.0
| 2,191 | 0.016431 |
#
# DBus interface for payload Repo files image source.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from pyanaconda.modules.common.constants.interfaces import PAYLOAD_SOURCE_REPO_FILES
from pyanaconda.modules.payloads.source.source_base_interface import PayloadSourceBaseInterface
@dbus_interface(PAYLOAD_SOURCE_REPO_FILES.interface_name)
class RepoFilesSourceInterface(PayloadSourceBaseInterface):
"""Interface for the payload Repo files image source."""
pass
|
jkonecny12/anaconda
|
pyanaconda/modules/payloads/source/repo_files/repo_files_interface.py
|
Python
|
gpl-2.0
| 1,434 | 0.001395 |
import pandas as pd
import numpy as np
import sklearn.preprocessing
from sklearn.linear_model import LinearRegression, LogisticRegression
FILENAME = 'BrainMets.xlsx'
MONTHS_TO_LIVE = 9
N_TRAIN = 250
def categorical_indices(values):
"""
When we have a categorical feature like 'cancer type', we want to transform its unique values
to indices in some range [0, ..., n-1] where n is the number of categories
"""
unique = values.unique()
indices = np.zeros(len(values), dtype=int)
for (i, v) in enumerate(sorted(unique)):
indices[np.array(values == v)] = i
return indices
def load_dataframe(filename = FILENAME):
df = pd.read_excel(filename, 'DATA', header=1)
df['cancer type'] = df['cancer type'].str.lower().str.strip()
# df['cancer type'] = categorical_indices(cancer_type)
df['Brain Tumor Sx'] = df['Brain Tumor Sx'].astype('float')
# df['Brain Tumor Sx'] = categorical_indices(brain_tumor_sx)
return df
def get_expert_predictions(df):
expert_predictions = {}
experts = [
'Prediction(Cleveland Clinic)',
' Prediction (Lanie Francis)',
'Prediction(Flickinger)',
'Prediction(Loefler',
'Prediction(Knisely)',
'Prediction(Lunsford)',
'Prediction (Tahrini)',
'Prediction (Sheehan)',
'Prediction (Linskey)',
'Prediction(friedman)',
'Prediction(Stupp)',
'Prediction(Rakfal)',
'Prediction(Rush)',
' Prediction( Kondziolka)'
]
for expert in experts:
expert_predictions[expert] = df[expert]
return expert_predictions
def feature_selection(df, Y, training_set_mask):
Y_training = Y[training_set_mask]
df_training = df.ix[training_set_mask]
fields = []
n_tumors = df['# of tumors']
n_tumors_training = n_tumors[training_set_mask]
def impute(X, df, name, model, postprocess = lambda x: x, maxval = None):
Y = df[name]
missing = np.array(Y.isnull())
X_train = X[~(missing)]
Y_train = Y[~missing]
X_test = X[missing]
model.fit(X_train, Y_train)
Y_test = model.predict(X_test)
Y_test = postprocess(Y_test)
if maxval:
Y_test = np.minimum(Y_test, maxval)
Y_filled = Y.copy()
Y_filled[missing] = Y_test
df[name] = Y_filled
def impute_missing_features(df):
input_fields = df[[
'Brain Tumor Sx',
'RPA',
'ECOG',
'Prior WBRT',
'Diagnosis of Primary at the same time as Brain tumor'
]]
X = np.array(input_fields)
missing = df['Extracranial Disease Status'].isnull()
impute(X, df, 'Extracranial Disease Status', LogisticRegression())
impute(X, df, 'K Score', LinearRegression(), lambda x: 10*(x.astype('int')/10), maxval = 100)
return df
def extract_features(df, binarize_categorical):
df = df.copy()
df['log_age']= np.log2(df['age'])
df = impute_missing_features(df)
df['# of tumors > 1'] = df['# of tumors'] > 1
df['# of tumors > 4'] = df['# of tumors'] > 4
df['# of tumors > 10'] = df['# of tumors'] > 10
df['age <45'] = df['age'] < 45
df['age 45-55'] = (df['age'] >= 45) & (df['age'] < 55)
df['age 55-65'] = (df['age'] >= 55) & (df['age'] < 65)
df['age 65-75'] = (df['age'] >= 65) & (df['age'] < 75)
df['age >=75'] = (df['age'] >= 75)
df['age <40'] = df['age'] < 40
df['age 40-50'] = (df['age'] >= 40) & (df['age'] < 50)
df['age 50-60'] = (df['age'] >= 50) & (df['age'] < 60)
df['age 50-70'] = (df['age'] >= 50) & (df['age'] < 70)
df['age 60-70'] = (df['age'] >= 60) & (df['age'] < 70)
df['age 70-80'] = (df['age'] >= 70) & (df['age'] < 80)
df['age >=80'] = (df['age'] >= 80)
df['age >=70'] =df['age'] >= 70
df['age 45-60'] = (df['age'] >= 45) & (df['age'] < 60)
df['Normalized K Score'] = df['K Score'] / 100.0
continuous_fields = [
'# of tumors > 1',
'age 50-70',
'age >=70',
'Normalized K Score',
]
binary_fields = [
'Prior WBRT',
'Diagnosis of Primary at the same time as Brain tumor'
]
9, 12, 14, 15, 16, 18, 20, 22, 25
categorical_fields = [
'Extracranial Disease Status',
'cancer type',
'Brain Tumor Sx',
'RPA',
'ECOG',
]
vectors = []
for field in continuous_fields + binary_fields:
v = np.array(df[field]).astype('float')
vectors.append(v)
for field in categorical_fields:
values = df[field]
if binarize_categorical:
unique = np.unique(values)
print "Expanding %s into %d indicator variables: %s" % (field, len(unique), unique)
for i, v in enumerate(sorted(unique)):
print len(vectors), field, v, np.sum(values == v)
vec = np.zeros(len(values), dtype='float')
vec[np.array(values == v)] = 1
vectors.append(vec)
else:
vectors.append(categorical_indices(values))
X = np.vstack(vectors).T
print X.dtype, X.shape
return X
def make_dataset(df, binarize_categorical):
"""
Load dataset with continuous outputs
"""
dead = np.array(df['Dead'] == 1)
Y = np.array(np.array(df['SurvivalMonths']))
expert_predictions = get_expert_predictions(df)
test_set_mask = np.zeros(len(df), dtype=bool)
# training set is any data point for which we have no expert
# predictions
for expert_Y in expert_predictions.values():
test_set_mask |= ~expert_Y.isnull()
X = extract_features(df, binarize_categorical)
return X, Y, dead, expert_predictions, test_set_mask
def make_labeled_dataset(df, months_to_live = MONTHS_TO_LIVE, binarize_categorical = True):
X, Y_continuous, dead, expert_predictions, test_set_mask = make_dataset(df, binarize_categorical)
# get rid of patients for whom we don't have a long enough history
mask = np.array(dead | (Y_continuous >= months_to_live))
X = X[mask]
Y = dead[mask] & (Y_continuous[mask] < months_to_live)
return X, Y
# TODO: fill in missing cancer types
def annotate_5year_survival(df):
five_year_survival = {
'breast': 25,
'nsclc': 4,
'sclc' : None,
'rcc' : 12.1,
'melanoma' : 16.1,
'carcinoid' : None,
'endometrial' : 17.5,
'sarcoma' : None,
'colon' : 12.9,
'rectal' : None,
'prostate' : 28,
'uterine' : None ,
'nasopharyngeal' : None,
'thyroid' : 54.7,
}
def load_dataset(filename = FILENAME, binarize_categorical = True):
df = load_dataframe(filename)
return make_dataset(df, binarize_categorical = binarize_categorical)
def load_labeled_dataset(filename = FILENAME, months_to_live = MONTHS_TO_LIVE, binarize_categorical = True):
df = load_dataframe(filename)
return make_labeled_dataset(df, months_to_live, binarize_categorical = binarize_categorical)
def split_labeled_dataset(df, months_to_live = MONTHS_TO_LIVE, n_train = N_TRAIN, binarize_categorical = True, shuffle = True, verbose = True):
X, y = make_labeled_dataset(df, months_to_live = months_to_live, binarize_categorical = binarize_categorical)
if shuffle:
idx = np.arange(len(y))
np.random.shuffle(idx)
y = y[idx]
X = X[idx]
Xtrain = X[:n_train]
Ytrain = y[:n_train]
Xtest = X[n_train:]
Ytest = y[n_train:]
if verbose:
print Xtest[[0,1,2], :]
print Ytest[[0,1,2]]
print np.mean(Ytrain)
print np.mean(Ytest)
print Xtrain.shape
print Xtest.shape
return Xtrain, Ytrain, Xtest, Ytest
def load_dataset_splits(filename = FILENAME, months_to_live = MONTHS_TO_LIVE, n_train = N_TRAIN):
df = load_dataframe(filename)
return split_dataset(df, months_to_live, n_train)
|
iskandr/brainmets
|
data.py
|
Python
|
apache-2.0
| 7,077 | 0.046489 |
# -*- coding: utf-8 -*-
from rdflib import Namespace
ONTOLEX = Namespace("http://www.w3.org/ns/lemon/ontolex#")
LEXINFO = Namespace("http://www.lexinfo.net/ontology/2.0/lexinfo#")
DECOMP = Namespace("http://www.w3.org/ns/lemon/decomp#")
ISOCAT = Namespace("http://www.isocat.org/datcat/")
LIME = Namespace("http://www.w3.org/ns/lemon/lime#")
|
wimmuskee/ontolex-db
|
format/namespace.py
|
Python
|
mit
| 343 | 0 |
'''
'''
import sys
import os
import gzip
import regex
# 'borrowed' from CGAT - we may not need this functionality
# ultimately. When finalised, if req., make clear source
def openFile(filename, mode="r", create_dir=False):
'''open file called *filename* with mode *mode*.
gzip - compressed files are recognized by the
suffix ``.gz`` and opened transparently.
Note that there are differences in the file
like objects returned, for example in the
ability to seek.
Arguments
---------
filename : string
mode : string
File opening mode
create_dir : bool
If True, the directory containing filename
will be created if it does not exist.
Returns
-------
File or file-like object in case of gzip compressed files.
'''
_, ext = os.path.splitext(filename)
if create_dir:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if ext.lower() in (".gz", ".z"):
if sys.version_info.major >= 3:
if mode == "r":
return gzip.open(filename, 'rt', encoding="ascii")
elif mode == "w":
return gzip.open(filename, 'wt', encoding="ascii")
else:
raise NotImplementedError(
"mode '{}' not implemented".format(mode))
else:
return gzip.open(filename, mode)
else:
return open(filename, mode)
def checkError(barcode, whitelist, limit=1):
near_matches = set()
comp_regex = regex.compile("(%s){e<=1}" % barcode)
comp_regex2 = regex.compile("(%s){e<=1}" % barcode[:-1])
b_length = len(barcode)
for whitelisted_barcode in whitelist:
w_length = len(whitelisted_barcode)
if barcode == whitelisted_barcode:
continue
if (max(b_length, w_length) > (min(b_length, w_length) + 1)):
continue
if comp_regex.match(whitelisted_barcode) or comp_regex2.match(whitelisted_barcode):
near_matches.add(whitelisted_barcode)
if len(near_matches) > limit:
return near_matches
return near_matches
# partially 'borrowed' from CGAT - we may not need this functionality
# ultimately. When finalised, if req., make clear source
def FastqIterator(infile):
'''iterate over contents of fastq file.'''
while 1:
line1 = infile.readline()
if not line1:
break
if not line1.startswith('@'):
raise ValueError("parsing error: expected '@' in line %s" % line1)
line2 = infile.readline()
line3 = infile.readline()
if not line3.startswith('+'):
raise ValueError("parsing error: expected '+' in line %s" % line3)
line4 = infile.readline()
# incomplete entry
if not line4:
raise ValueError("incomplete entry for %s" % line1)
read_id, seq, qualities = line1[:-1], line2[:-1], line4[:-1]
yield ("", read_id, seq, qualities)
|
k3yavi/alevin
|
testing/src-py/Utilities.py
|
Python
|
gpl-3.0
| 3,040 | 0.000987 |
from comics.aggregator.crawler import CrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "PartiallyClips"
language = "en"
url = "http://partiallyclips.com/"
start_date = "2002-01-01"
rights = "Robert T. Balder"
active = False
class Crawler(CrawlerBase):
def crawl(self, pub_date):
pass
|
jodal/comics
|
comics/comics/partiallyclips.py
|
Python
|
agpl-3.0
| 375 | 0 |
# !/usr/bin/env python
"""Testing a sprite.
The ball should bounce off the sides of the window. You may resize the
window.
This test should just run without failing.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import unittest
from pyglet.gl import glClear
import pyglet.window
import pyglet.window.event
from pyglet import clock
from scene2d import Sprite, Image2d, FlatView
from scene2d.image import TintEffect
from scene2d.camera import FlatCamera
ball_png = os.path.join(os.path.dirname(__file__), 'ball.png')
class BouncySprite(Sprite):
def update(self):
# move, check bounds
p = self.properties
self.x += p['dx']
self.y += p['dy']
if self.left < 0:
self.left = 0
p['dx'] = -p['dx']
elif self.right > 320:
self.right = 320
p['dx'] = -p['dx']
if self.bottom < 0:
self.bottom = 0
p['dy'] = -p['dy']
elif self.top > 320:
self.top = 320
p['dy'] = -p['dy']
class SpriteOverlapTest(unittest.TestCase):
def test_sprite(self):
w = pyglet.window.Window(width=320, height=320)
image = Image2d.load(ball_png)
ball1 = BouncySprite(0, 0, 64, 64, image, properties=dict(dx=10, dy=5))
ball2 = BouncySprite(288, 0, 64, 64, image,
properties=dict(dx=-10, dy=5))
view = FlatView(0, 0, 320, 320, sprites=[ball1, ball2])
view.fx, view.fy = 160, 160
clock.set_fps_limit(60)
e = TintEffect((.5, 1, .5, 1))
while not w.has_exit:
clock.tick()
w.dispatch_events()
ball1.update()
ball2.update()
if ball1.overlaps(ball2):
if 'overlap' not in ball2.properties:
ball2.properties['overlap'] = e
ball2.add_effect(e)
elif 'overlap' in ball2.properties:
ball2.remove_effect(e)
del ball2.properties['overlap']
view.clear()
view.draw()
w.flip()
w.close()
unittest.main()
|
bitcraft/pyglet
|
contrib/scene2d/tests/scene2d/SPRITE_OVERLAP.py
|
Python
|
bsd-3-clause
| 2,162 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "itf.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
valasek/taekwondo
|
manage.py
|
Python
|
gpl-3.0
| 801 | 0 |
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestScale(TestCase):
def testRegression(self):
inputSize = 1024
input = range(inputSize)
factor = 0.5
expected = [factor * n for n in input]
output = Scale(factor=factor, clipping=False)(input)
self.assertEqualVector(output, expected)
def testZero(self):
inputSize = 1024
input = [0] * inputSize
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testEmpty(self):
input = []
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testClipping(self):
inputSize = 1024
maxAbsValue= 10
factor = 1
input = [n + maxAbsValue for n in range(inputSize)]
expected = [maxAbsValue] * inputSize
output = Scale(factor=factor, clipping=True, maxAbsValue=maxAbsValue)(input)
self.assertEqualVector(output, expected)
def testInvalidParam(self):
self.assertConfigureFails(Scale(), { 'maxAbsValue': -1 })
suite = allTests(TestScale)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
arseneyr/essentia
|
test/src/unittest/standard/test_scale.py
|
Python
|
agpl-3.0
| 2,054 | 0.003408 |
# PyParticles : Particles simulation in python
# Copyright (C) 2012 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pyparticles.pset.boundary as bd
class ReboundBoundary( bd.Boundary ):
def __init__( self , bound=(-1,1) , dim=3 ):
self.set_boundary( bound , dim )
self.set_normals()
def set_normals( self ):
self.__N = np.zeros( ( 2*self.dim , self.dim ) )
#print( self.__N )
if self.dim >= 2 :
self.__N[0,:2] = np.array( [1,0] )
self.__N[1,:2] = np.array( [-1,0] )
self.__N[2,:2] = np.array( [0,1] )
self.__N[3,:2] = np.array( [0,-1] )
if self.dim == 3 :
self.__N[4,:] = np.array( [0,0,1] )
self.__N[5,:] = np.array( [0,0,-1] )
def boundary( self , p_set ):
v_mi = np.zeros((3))
v_mx = np.zeros((3))
for i in range( self.dim ) :
j = 2*i
v_mi[:] = 0.0
v_mx[:] = 0.0
#delta = self.bound[i,1] - self.bound[i,0]
b_mi = p_set.X[:,i] < self.bound[i,0]
b_mx = p_set.X[:,i] > self.bound[i,1]
v_mi[i] = self.bound[i,0]
v_mx[i] = self.bound[i,1]
p_set.X[b_mi,:] = p_set.X[b_mi,:] + 2.0 * self.__N[j,:] * ( v_mi - p_set.X[b_mi,:] )
p_set.X[b_mx,:] = p_set.X[b_mx,:] + 2.0 * self.__N[j,:] * ( v_mx - p_set.X[b_mx,:] )
p_set.V[b_mi,i] = -p_set.V[b_mi,i]
p_set.V[b_mx,i] = -p_set.V[b_mx,i]
|
simon-r/PyParticles
|
pyparticles/pset/rebound_boundary.py
|
Python
|
gpl-3.0
| 2,289 | 0.042813 |
#!/usr/bin/python3
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from argparse import ArgumentParser
from threading import Thread
import re
class HackAlunoOnline:
def __init__( self , matricula , full_search = False ):
# Exibicao default de matricula/nome/curso/situacao/periodo/CRA
# full search para as demais informacoes
# Main url
self.aluno_online_url = 'https://www.alunoonline.uerj.br'
# parameters
self.matricula = matricula
self.full_search = full_search
# Main html
self.main_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'SinteseFormacao' } )
# Main data
self.nome = self._extract_nome()
self.cra = self._extract_cra()
self.curso = self._extract_curso()
self.situacao = self._extract_situacao()
self.periodo = self._extract_periodo()
# get and extract personal data
if ( self.full_search ):
# dados contato
self.dados_contato_html = self._get_aluno_online_html( '/recadastramento_dados_contato/recadastramento_dados_contato.php' )
self.telefone = self._extract_telefone()
self.email = self._extract_email()
self.endereco = self._extract_endereco()
self.cep = self._extract_cep()
# dados pessoais
self.dados_pessoais_html = self._get_aluno_online_html( '/recadastramento_dados_pessoais/recadastramento_dados_pessoais.php' )
self.nascimento = self._extract_nascimento()
self.sexo = self._extract_sexo()
self.estado_civil = self._extract_estado_civil()
self.naturalidade = self._extract_naturalidade()
self.nacionalidade = self._extract_nacionalidade()
self.pai = self._extract_pai()
self.mae = self._extract_mae()
self.cpf = self._extract_cpf()
self.rg = self._extract_rg() #Número, Órgão, UF, País, Data Emissão, Data Validade
self.titulo_eleitor = self._extract_titulo_eleitor() #Número, Zona, Seção, UF, Data Emissão
self.certificado_reservista = self._extract_certificado_reservista() #Número, Nro. de Série, Órgão, Tipo, Data Emissão, UF
self.ensino_medio = self._extract_ensino_medio() #Nome do Estabelecimento, País, UF, Tipo de Ensino, Data Conclusão
# disciplinas
self.disciplinas_realizadas_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'DisciplinasRealizadas' } )
self.disciplinas = self._extract_disciplinas()
def _get_aluno_online_html( self , endpoint , parameters = {} ):
result = None
try:
parameters.update( { 'matricula': self.matricula } )
data = urlencode( parameters )
request = Request( self.aluno_online_url + endpoint , data.encode( 'ascii' ) )
response = urlopen( request )
result = BeautifulSoup( response.read() , 'html.parser' )
except:
pass
return result
def _extract_nome( self ):
try:
nome = self.main_html.find( id = "table_cabecalho_rodape" ).find_all( 'font' )[2].string[15:]
except:
nome = ''
return nome
def _extract_cra( self ):
try:
cra = float( self.main_html.find_all( 'div' )[7].text[16:].replace( ',' , '.' ) )
except:
cra = ''
return cra
def _extract_curso( self ):
try:
curso = self.main_html.find_all( 'div' )[6].text[8:]
except:
curso = ''
return curso
def _extract_situacao( self ):
try:
situacao = self.main_html.find_all( 'div' )[4].text[11:]
except:
situacao = ''
return situacao
def _extract_periodo( self ):
try:
for element in self.main_html.select( 'div > b' ):
if ( element.text == "Períodos Utilizados/Em Uso para Integralização Curricular:" ):
periodo = int( element.parent.text[59:] )
except:
periodo = ''
return periodo
def _format_telefone( self , ddd , tel , ramal ):
return '({0}) {1} [{2}]'.format( ddd , tel[:4] + '-' + tel[4:] , ( 'Sem Ramal' if not ramal else ( 'Ramal ' + ramal ) ) )
def _extract_telefone( self ):
telefone = []
# Tel 1..2
for i in range( 1 , 3 ):
try:
ddd = self.dados_contato_html.find( 'input' , { 'name': 'num_ddd_' + str( i ) + '_pag' } ).get( 'value' )
tel = self.dados_contato_html.find( 'input' , { 'name': 'num_tel_' + str( i ) + '_pag' } ).get( 'value' )
ramal = self.dados_contato_html.find( 'input' , { 'name': 'num_ramal_' + str( i ) + '_pag' } ).get( 'value' )
telefone.append( self._format_telefone( ddd , tel , ramal ) )
except:
pass
return telefone
def _extract_email( self ):
try:
email = self.dados_contato_html.find( 'input' , { 'name': 'dsc_email_pag' } ).get( 'value' )
except:
email = ''
return email
def _extract_endereco( self ):
try:
endereco = self.dados_contato_html.find( 'input' , { 'name': 'txt_end_pag' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.find( 'input' , { 'name': 'cod_bairro_input' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_munic_pag"] option[selected]' )[0].text
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_uf_pag"] option[selected]' )[0].text
except:
endereco = ''
return endereco
def _extract_cep( self ):
try:
cep = self.dados_contato_html.find( 'input' , { 'name': 'num_cep_pag' } ).get( 'value' )
cep = cep[:5] + '-' + cep[5:]
except:
cep = ''
return cep
def _extract_nascimento( self ):
try:
nascimento = self.dados_pessoais_html.find_all( 'div' )[2].text[15:]
except:
nascimento = ''
return nascimento
def _extract_sexo( self ):
try:
sexo = self.dados_pessoais_html.find_all( 'div' )[3].text[6:]
except:
sexo = ''
return sexo
def _extract_estado_civil( self ):
try:
civil = self.dados_pessoais_html.find_all( 'div' )[4].text[12:]
except:
civil = ''
return civil
def _extract_naturalidade( self ):
try:
naturalidade = self.dados_pessoais_html.find_all( 'div' )[5].text[14:]
except:
naturalidade = ''
return naturalidade
def _extract_nacionalidade( self ):
try:
nacionalidade = self.dados_pessoais_html.find_all( 'div' )[6].text[15:]
except:
nacionalidade = ''
return nacionalidade
def _extract_pai( self ):
try:
pai = self.dados_pessoais_html.find_all( 'div' )[7].text[13:]
except:
pai = ''
return pai
def _extract_mae( self ):
try:
mae = self.dados_pessoais_html.find_all( 'div' )[8].text[13:]
except:
mae = ''
return mae
def _extract_cpf( self ):
try:
cpf = self.dados_pessoais_html.find_all( 'font' )[10].text
cpf = cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
except:
cpf = ''
return cpf
def _extract_dados_pessoais_divs( self , start , end , cut ):
arrayReturn = []
try:
array = self.dados_pessoais_html.find_all( 'div' )[start:end]
arrayReturn.append( array[0].text[cut:] )
for data in array[1:]:
text = data.text.strip()
if ( ( not 'Não Informado' in text ) and ( not '__/__/____' in text ) ):
arrayReturn.append( text )
except:
arrayReturn = ''
return arrayReturn
def _extract_rg( self ):
return self._extract_dados_pessoais_divs( 9 , 14 , 8 )
def _extract_titulo_eleitor( self ):
return self._extract_dados_pessoais_divs( 15 , 19 , 8 )
def _extract_certificado_reservista( self ):
return self._extract_dados_pessoais_divs( 20 , 25 , 8 )
def _extract_ensino_medio( self ):
return self._extract_dados_pessoais_divs( 26 , 31 , 25 )
def _extract_disciplinas( self ):
disciplinas = []
try:
for linha in self.disciplinas_realizadas_html.find_all( 'div' , style = re.compile( '^width:100%;font-size=12px;' ) ):
conteudoLinha = []
for coluna in linha.children:
conteudoColuna = coluna.string.strip()
if ( conteudoColuna and not re.match( '\\d{4}/\\d' , conteudoColuna ) ):
conteudoLinha.append( conteudoColuna )
disciplinas.append( ( '{0:60} {1:2} {2:3} {3:15} {4:10}' + ( ' {5:6} {6:15}' if ( len( conteudoLinha ) > 5 ) else '' ) ).format( *conteudoLinha ) )
except:
disciplinas = ''
return disciplinas
def _truncate( self , string , width ):
if ( len( string ) > width ):
string = string[:( width - 3 )] + '...'
return string
def __str__( self ):
if self.full_search:
pattern = "\n{0:12} - {1:50}\n\nMatricula: {0}\nNome: {1}\nCurso: {2}\nSituacao: {3}\nPeriodo: {4}\nCRA: {5}\n"
pattern += "\n-Contato-\n\nTelefone: {6}\nE-mail: {7}\nEndereço: {8}\nCEP: {9}\n"
pattern += "\n-Informações Pessoais-\n\nData de Nascimento: {10}\nSexo: {11}\nEstado Civil: {12}\nNaturalidade: {13}\nNacionalidade: {14}\nNome do Pai: {15}\nNome da Mãe: {16}\nCPF: {17}\nRG: {18}\nTítulo de Eleitor: {19}\nCertificado de Reservista: {20}\nEnsino Médio: {21}\n"
pattern += "\n-Disciplinas Realizadas-\n\n{22}\n\n"
parameters = [ self.matricula , self.nome , self.curso , self.situacao , self.periodo , self.cra , ', '.join( self.telefone ) , self.email , self.endereco , self.cep , self.nascimento , self.sexo , self.estado_civil , self.naturalidade , self.nacionalidade , self.pai , self.mae , self.cpf , ', '.join( self.rg ) , ', '.join( self.titulo_eleitor ) , ', '.join( self.certificado_reservista ) , ', '.join( self.ensino_medio ) , '\n'.join( self.disciplinas ) ]
else:
pattern = "{0:12}\t{1:30}\t{2:20}\t{3:10}\t{4:3}\t{5:4}"
parameters = [ self.matricula , self._truncate( self.nome , 30 ) , self._truncate( self.curso , 20 ) , self._truncate( self.situacao , 10 ) , self.periodo , self.cra ]
return pattern.format( *parameters )
# End class
def get_registry_by_name( name , searchfile ):
matriculas = []
with open( searchfile , 'r' ) as arquivo:
for line in arquivo.readlines():
matricula, nomeArquivo = line.split( ':' )
if name in nomeArquivo.lower():
matriculas.append( matricula )
return matriculas
def get_data( matricula , full_search ):
hao = HackAlunoOnline( matricula , full_search )
print( hao )
# Programa para recuperar os dados de um aluno da UERJ atraves de sua matricula
def Main():
parser = ArgumentParser( description = "Recupera informacoes de alunos da UERJ atraves de falhas do sistema academico Aluno Online" )
parser.add_argument( 'matricula' , help = "Matricula do aluno" )
parser.add_argument( '-i' , '--inputfile' , help = "Utilizar um arquivo contendo uma lista de matriculas com uma matricula por linha como entrada" , action = "store_true" )
parser.add_argument( '-r' , '--reverse' , help = "Procura reversa -> busca matricula por nome (para alunos do IPRJ)" , action = "store_true" )
parser.add_argument( '-f' , '--fullsearch' , help = "Busca completa por informações pessoais" , action = "store_true" )
parser.add_argument( '-s' , '--searchfile' , help = "Nome do arquivo contendo matricula:nome que deverá ser usado na busca reversa" , default = "matricula-nome.txt" )
args = parser.parse_args()
matriculas = []
if ( args.reverse and args.inputfile ):
with open( args.matricula , 'r' ) as arquivoNomes:
for nome in arquivoNomes:
matriculas.extend( get_registry_by_name( nome.strip( '\n' ) , args.searchfile ) )
elif args.reverse:
matriculas = get_registry_by_name( args.matricula.lower() , args.searchfile )
elif args.inputfile:
file = open( args.matricula , 'r' )
matriculas = file.readlines()
else:
matriculas.append( args.matricula )
if ( not matriculas ):
print( "Nao foram encontrados dados para esta matricula" )
else:
if ( not args.fullsearch ):
print( "{0:12}\t{1:30}\t{2:20}\t{3:10}\t{4:2}\t{5:4}".format( "Matricula", "Nome", "Curso" , "Situacao" , "Periodo" , "CRA" ) )
for matricula in matriculas:
thread = Thread( target = get_data , args = ( matricula.strip( '\n' ) , args.fullsearch ) )
thread.start()
# End Main
if __name__ == '__main__':
Main()
|
hammurabi13th/haopy
|
hao.py
|
Python
|
mit
| 12,161 | 0.068167 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.