text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.core.urlresolvers import reverse
from subtitles.models import (get_lineage, SubtitleLanguage,
SubtitleVersion)
class SubtitleVersionInline(admin.TabularInline):
def has_delete_permission(self, request, obj=None):
# subtitle versions should be immutable, don't allow deletion
return False
model = SubtitleVersion
fields = ['version_number']
max_num = 0
class SubtitleLanguageAdmin(admin.ModelAdmin):
list_display = ['video_title', 'language_code', 'version_count', 'tip',
'unofficial_signoffs',
'official_signoffs',
'pending_collaborators',
'expired_pending_collaborators',
'unexpired_pending_collaborators',
'is_forked']
list_filter = ['created', 'language_code']
inlines = [SubtitleVersionInline]
search_fields = ['video__title', 'video__video_id', 'language_code']
raw_id_fields = ['video']
def unofficial_signoffs(self, o):
return o.unofficial_signoff_count
unofficial_signoffs.admin_order_field = 'unofficial_signoff_count'
def official_signoffs(self, o):
return o.official_signoff_count
official_signoffs.admin_order_field = 'official_signoff_count'
def pending_collaborators(self, o):
return o.pending_signoff_count
pending_collaborators.short_description = 'pending'
pending_collaborators.admin_order_field = 'pending_signoff_count'
def expired_pending_collaborators(self, o):
return o.pending_signoff_expired_count
expired_pending_collaborators.short_description = 'expired pending'
expired_pending_collaborators.admin_order_field = 'pending_signoff_expired_count'
def unexpired_pending_collaborators(self, o):
return o.pending_signoff_unexpired_count
unexpired_pending_collaborators.short_description = 'unexpired pending'
unexpired_pending_collaborators.admin_order_field = 'pending_signoff_unexpired_count'
def video_title(self, sl):
return sl.video.title_display()
video_title.short_description = 'video'
def version_count(self, sl):
return sl.subtitleversion_set.full().count()
version_count.short_description = 'number of versions'
def tip(self, sl):
ver = sl.get_tip(full=True)
return ver.version_number if ver else None
tip.short_description = 'tip version'
class SubtitleVersionChangeList(ChangeList):
def get_query_set(self, request):
qs = super(SubtitleVersionChangeList, self).get_query_set(request)
# for some reason using select_related makes MySQL choose an
# absolutely insane way to perform the query. Use prefetch_related()
# instead to work around this.
return qs.prefetch_related('video', 'subtitle_language')
class SubtitleVersionAdmin(admin.ModelAdmin):
list_per_page = 20
list_display = ['video_title', 'id', 'language', 'version_num',
'visibility', 'visibility_override',
'subtitle_count', 'created']
list_select_related = False
raw_id_fields = ['video', 'subtitle_language', 'parents', 'author']
list_filter = ['created', 'visibility', 'visibility_override',
'language_code']
list_editable = ['visibility', 'visibility_override']
search_fields = ['video__video_id', 'video__title', 'title',
'language_code', 'description', 'note']
# Unfortunately Django uses .all() on related managers instead of
# .get_query_set(). We've disabled .all() on SubtitleVersion managers so we
# can't let Django do this. This means we can't edit parents in the admin,
# but you should never be doing that anyway.
exclude = ['parents', 'serialized_subtitles']
readonly_fields = ['parent_versions']
# don't allow deletion
actions = []
def get_changelist(self, request, **kwargs):
return SubtitleVersionChangeList
def has_delete_permission(self, request, obj=None):
# subtitle versions should be immutable, don't allow deletion
return False
def version_num(self, sv):
return '#' + str(sv.version_number)
version_num.short_description = 'version #'
def video_title(self, sv):
return sv.video.title
video_title.short_description = 'video'
def language(self, sv):
return sv.subtitle_language.get_language_code_display()
def parent_versions(self, sv):
links = []
for parent in sv.parents.full():
href = reverse('admin:subtitles_subtitleversion_change',
args=(parent.pk,))
links.append('<a href="%s">%s</a>' % (href, parent))
return ', '.join(links)
parent_versions.allow_tags = True
# Hack to generate lineages properly when modifying versions in the admin
# interface. Maybe we should just disallow this entirely once the version
# models are hooked up everywhere else?
def response_change(self, request, obj):
response = super(SubtitleVersionAdmin, self).response_change(request, obj)
obj.lineage = get_lineage(obj.parents.full())
obj.save()
return response
def response_add(self, request, obj, *args, **kwargs):
response = super(SubtitleVersionAdmin, self).response_add(request, obj)
obj.lineage = get_lineage(obj.parents.full())
obj.save()
return response
# -----------------------------------------------------------------------------
admin.site.register(SubtitleLanguage, SubtitleLanguageAdmin)
admin.site.register(SubtitleVersion, SubtitleVersionAdmin)
|
ofer43211/unisubs
|
apps/subtitles/admin.py
|
Python
|
agpl-3.0
| 6,564 | 0.001219 |
# -*- coding:utf-8 -*-
## src/common/connection_handlers_events.py
##
## Copyright (C) 2010-2014 Yann Leboulanger <asterix AT lagaule.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import datetime
import sys
import os
from time import (localtime, time as time_time)
from calendar import timegm
import hmac
from common import atom
from common import nec
from common import helpers
from common import gajim
from common import i18n
import nbxmpp
from common import dataforms
from common import exceptions
from common.zeroconf import zeroconf
from common.logger import LOG_DB_PATH
from common.pep import SUPPORTED_PERSONAL_USER_EVENTS
from nbxmpp.protocol import NS_CHATSTATES
from common.jingle_transport import JingleTransportSocks5
from common.file_props import FilesProp
if gajim.HAVE_PYOPENSSL:
import OpenSSL.crypto
import logging
log = logging.getLogger('gajim.c.connection_handlers_events')
CONDITION_TO_CODE = {
'realjid-public': 100,
'affiliation-changed': 101,
'unavailable-shown': 102,
'unavailable-not-shown': 103,
'configuration-changed': 104,
'self-presence': 110,
'logging-enabled': 170,
'logging-disabled': 171,
'non-anonymous': 172,
'semi-anonymous': 173,
'fully-anonymous': 174,
'room-created': 201,
'nick-assigned': 210,
'banned': 301,
'new-nick': 303,
'kicked': 307,
'removed-affiliation': 321,
'removed-membership': 322,
'removed-shutdown': 332,
}
class HelperEvent:
def get_jid_resource(self, check_fake_jid=False):
if check_fake_jid and hasattr(self, 'id_') and \
self.id_ in self.conn.groupchat_jids:
self.fjid = self.conn.groupchat_jids[self.id_]
del self.conn.groupchat_jids[self.id_]
else:
self.fjid = helpers.get_full_jid_from_iq(self.stanza)
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
def get_id(self):
self.id_ = self.stanza.getID()
def get_gc_control(self):
self.gc_control = gajim.interface.msg_win_mgr.get_gc_control(self.jid,
self.conn.name)
# If gc_control is missing - it may be minimized. Try to get it
# from there. If it's not there - then it's missing anyway and
# will remain set to None.
if not self.gc_control:
minimized = gajim.interface.minimized_controls[self.conn.name]
self.gc_control = minimized.get(self.jid)
def _generate_timestamp(self, tag):
tim = helpers.datetime_tuple(tag)
self.timestamp = localtime(timegm(tim))
def get_chatstate(self):
"""
Extract chatstate from a <message/> stanza
Requires self.stanza and self.msgtxt
"""
self.chatstate = None
# chatstates - look for chatstate tags in a message if not delayed
delayed = self.stanza.getTag('x', namespace=nbxmpp.NS_DELAY) is not None
if not delayed:
children = self.stanza.getChildren()
for child in children:
if child.getNamespace() == NS_CHATSTATES:
self.chatstate = child.getName()
break
class HttpAuthReceivedEvent(nec.NetworkIncomingEvent):
name = 'http-auth-received'
base_network_events = []
def generate(self):
self.opt = gajim.config.get_per('accounts', self.conn.name, 'http_auth')
self.iq_id = self.stanza.getTagAttr('confirm', 'id')
self.method = self.stanza.getTagAttr('confirm', 'method')
self.url = self.stanza.getTagAttr('confirm', 'url')
# In case it's a message with a body
self.msg = self.stanza.getTagData('body')
return True
class LastResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'last-result-received'
base_network_events = []
def generate(self):
self.get_id()
self.get_jid_resource(check_fake_jid=True)
if self.id_ in self.conn.last_ids:
self.conn.last_ids.remove(self.id_)
self.status = ''
self.seconds = -1
if self.stanza.getType() == 'error':
return True
qp = self.stanza.getTag('query')
if not qp:
return
sec = qp.getAttr('seconds')
self.status = qp.getData()
try:
self.seconds = int(sec)
except Exception:
return
return True
class VersionResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'version-result-received'
base_network_events = []
def generate(self):
self.get_id()
self.get_jid_resource(check_fake_jid=True)
if self.id_ in self.conn.version_ids:
self.conn.version_ids.remove(self.id_)
self.client_info = ''
self.os_info = ''
if self.stanza.getType() == 'error':
return True
qp = self.stanza.getTag('query')
if qp.getTag('name'):
self.client_info += qp.getTag('name').getData()
if qp.getTag('version'):
self.client_info += ' ' + qp.getTag('version').getData()
if qp.getTag('os'):
self.os_info += qp.getTag('os').getData()
return True
class TimeResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'time-result-received'
base_network_events = []
def generate(self):
self.get_id()
self.get_jid_resource(check_fake_jid=True)
if self.id_ in self.conn.entity_time_ids:
self.conn.entity_time_ids.remove(self.id_)
self.time_info = ''
if self.stanza.getType() == 'error':
return True
qp = self.stanza.getTag('time')
if not qp:
# wrong answer
return
tzo = qp.getTag('tzo').getData()
if tzo.lower() == 'z':
tzo = '0:0'
tzoh, tzom = tzo.split(':')
utc_time = qp.getTag('utc').getData()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class contact_tz(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=int(tzoh), minutes=int(tzom))
def tzname(self, dt):
return "remote timezone"
def dst(self, dt):
return ZERO
try:
t = datetime.datetime.strptime(utc_time, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
try:
t = datetime.datetime.strptime(utc_time,
'%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError as e:
log.info('Wrong time format: %s' % str(e))
return
t = t.replace(tzinfo=UTC())
self.time_info = t.astimezone(contact_tz()).strftime('%c')
return True
class GMailQueryReceivedEvent(nec.NetworkIncomingEvent):
name = 'gmail-notify'
base_network_events = []
def generate(self):
if not self.stanza.getTag('mailbox'):
return
mb = self.stanza.getTag('mailbox')
if not mb.getAttr('url'):
return
self.conn.gmail_url = mb.getAttr('url')
if mb.getNamespace() != nbxmpp.NS_GMAILNOTIFY:
return
self.newmsgs = mb.getAttr('total-matched')
if not self.newmsgs:
return
if self.newmsgs == '0':
return
# there are new messages
self.gmail_messages_list = []
if mb.getTag('mail-thread-info'):
gmail_messages = mb.getTags('mail-thread-info')
for gmessage in gmail_messages:
unread_senders = []
for sender in gmessage.getTag('senders').getTags(
'sender'):
if sender.getAttr('unread') != '1':
continue
if sender.getAttr('name'):
unread_senders.append(sender.getAttr('name') + \
'< ' + sender.getAttr('address') + '>')
else:
unread_senders.append(sender.getAttr('address'))
if not unread_senders:
continue
gmail_subject = gmessage.getTag('subject').getData()
gmail_snippet = gmessage.getTag('snippet').getData()
tid = int(gmessage.getAttr('tid'))
if not self.conn.gmail_last_tid or \
tid > self.conn.gmail_last_tid:
self.conn.gmail_last_tid = tid
self.gmail_messages_list.append({
'From': unread_senders,
'Subject': gmail_subject,
'Snippet': gmail_snippet,
'url': gmessage.getAttr('url'),
'participation': gmessage.getAttr('participation'),
'messages': gmessage.getAttr('messages'),
'date': gmessage.getAttr('date')})
self.conn.gmail_last_time = int(mb.getAttr('result-time'))
self.jid = gajim.get_jid_from_account(self.name)
log.debug(('You have %s new gmail e-mails on %s.') % (self.newmsgs,
self.jid))
return True
class RosterItemExchangeEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'roster-item-exchange-received'
base_network_events = []
def generate(self):
self.get_id()
self.get_jid_resource()
self.exchange_items_list = {}
items_list = self.stanza.getTag('x').getChildren()
if not items_list:
return
self.action = items_list[0].getAttr('action')
if self.action is None:
self.action = 'add'
for item in self.stanza.getTag('x', namespace=nbxmpp.NS_ROSTERX).\
getChildren():
try:
jid = helpers.parse_jid(item.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % item.getAttr('jid'))
continue
name = item.getAttr('name')
contact = gajim.contacts.get_contact(self.conn.name, jid)
groups = []
same_groups = True
for group in item.getTags('group'):
groups.append(group.getData())
# check that all suggested groups are in the groups we have for
# this contact
if not contact or group not in contact.groups:
same_groups = False
if contact:
# check that all groups we have for this contact are in the
# suggested groups
for group in contact.groups:
if group not in groups:
same_groups = False
if contact.sub in ('both', 'to') and same_groups:
continue
self.exchange_items_list[jid] = []
self.exchange_items_list[jid].append(name)
self.exchange_items_list[jid].append(groups)
if self.exchange_items_list:
return True
class VersionRequestEvent(nec.NetworkIncomingEvent):
name = 'version-request-received'
base_network_events = []
class LastRequestEvent(nec.NetworkIncomingEvent):
name = 'last-request-received'
base_network_events = []
class TimeRequestEvent(nec.NetworkIncomingEvent):
name = 'time-request-received'
base_network_events = []
class TimeRevisedRequestEvent(nec.NetworkIncomingEvent):
name = 'time-revised-request-received'
base_network_events = []
class RosterReceivedEvent(nec.NetworkIncomingEvent):
name = 'roster-received'
base_network_events = []
def generate(self):
if hasattr(self, 'xmpp_roster'):
self.version = self.xmpp_roster.version
self.received_from_server = self.xmpp_roster.received_from_server
self.roster = {}
raw_roster = self.xmpp_roster.getRaw()
our_jid = gajim.get_jid_from_account(self.conn.name)
for jid in raw_roster:
try:
j = helpers.parse_jid(jid)
except Exception:
print(_('JID %s is not RFC compliant. It will not be added '
'to your roster. Use roster management tools such as '
'http://jru.jabberstudio.org/ to remove it') % jid,
file=sys.stderr)
else:
infos = raw_roster[jid]
if jid != our_jid and (not infos['subscription'] or \
infos['subscription'] == 'none') and (not infos['ask'] or \
infos['ask'] == 'none') and not infos['name'] and \
not infos['groups']:
# remove this useless item, it won't be shown in roster
# anyway
self.conn.connection.getRoster().delItem(jid)
elif jid != our_jid: # don't add our jid
self.roster[j] = raw_roster[jid]
else:
# Roster comes from DB
self.received_from_server = False
self.version = gajim.config.get_per('accounts', self.conn.name,
'roster_version')
self.roster = gajim.logger.get_roster(gajim.get_jid_from_account(
self.conn.name))
return True
class RosterSetReceivedEvent(nec.NetworkIncomingEvent):
name = 'roster-set-received'
base_network_events = []
def generate(self):
self.version = self.stanza.getTagAttr('query', 'ver')
self.items = {}
for item in self.stanza.getTag('query').getChildren():
try:
jid = helpers.parse_jid(item.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % item.getAttr('jid'))
continue
name = item.getAttr('name')
sub = item.getAttr('subscription')
ask = item.getAttr('ask')
groups = []
for group in item.getTags('group'):
groups.append(group.getData())
self.items[jid] = {'name': name, 'sub': sub, 'ask': ask,
'groups': groups}
if self.conn.connection and self.conn.connected > 1:
reply = nbxmpp.Iq(typ='result', attrs={'id': self.stanza.getID()},
to=self.stanza.getFrom(), frm=self.stanza.getTo(), xmlns=None)
self.conn.connection.send(reply)
return True
class RosterInfoEvent(nec.NetworkIncomingEvent):
name = 'roster-info'
base_network_events = []
class MucOwnerReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'muc-owner-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
qp = self.stanza.getQueryPayload()
self.form_node = None
for q in qp:
if q.getNamespace() == nbxmpp.NS_DATA:
self.form_node = q
self.dataform = dataforms.ExtendForm(node=self.form_node)
return True
class MucAdminReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'muc-admin-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
items = self.stanza.getTag('query',
namespace=nbxmpp.NS_MUC_ADMIN).getTags('item')
self.users_dict = {}
for item in items:
if item.has_attr('jid') and item.has_attr('affiliation'):
try:
jid = helpers.parse_jid(item.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % \
item.getAttr('jid'))
continue
affiliation = item.getAttr('affiliation')
self.users_dict[jid] = {'affiliation': affiliation}
if item.has_attr('nick'):
self.users_dict[jid]['nick'] = item.getAttr('nick')
if item.has_attr('role'):
self.users_dict[jid]['role'] = item.getAttr('role')
reason = item.getTagData('reason')
if reason:
self.users_dict[jid]['reason'] = reason
return True
class PrivateStorageReceivedEvent(nec.NetworkIncomingEvent):
name = 'private-storage-received'
base_network_events = []
def generate(self):
query = self.stanza.getTag('query')
self.storage_node = query.getTag('storage')
if self.storage_node:
self.namespace = self.storage_node.getNamespace()
return True
class BookmarksHelper:
def parse_bookmarks(self):
self.bookmarks = []
confs = self.storage_node.getTags('conference')
for conf in confs:
autojoin_val = conf.getAttr('autojoin')
if autojoin_val is None: # not there (it's optional)
autojoin_val = False
minimize_val = conf.getAttr('minimize')
if minimize_val is None: # not there (it's optional)
minimize_val = False
print_status = conf.getTagData('print_status')
if not print_status:
print_status = conf.getTagData('show_status')
try:
jid = helpers.parse_jid(conf.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % conf.getAttr('jid'))
continue
bm = {'name': conf.getAttr('name'),
'jid': jid,
'autojoin': autojoin_val,
'minimize': minimize_val,
'password': conf.getTagData('password'),
'nick': conf.getTagData('nick'),
'print_status': print_status}
bm_jids = [b['jid'] for b in self.bookmarks]
if bm['jid'] not in bm_jids:
self.bookmarks.append(bm)
class PrivateStorageBookmarksReceivedEvent(nec.NetworkIncomingEvent,
BookmarksHelper):
name = 'private-storage-bookmarks-received'
base_network_events = ['private-storage-received']
def generate(self):
self.conn = self.base_event.conn
self.storage_node = self.base_event.storage_node
if self.base_event.namespace != nbxmpp.NS_BOOKMARKS:
return
self.parse_bookmarks()
return True
class BookmarksReceivedEvent(nec.NetworkIncomingEvent):
name = 'bookmarks-received'
base_network_events = ['private-storage-bookmarks-received',
'pubsub-bookmarks-received']
def generate(self):
self.conn = self.base_event.conn
self.bookmarks = self.base_event.bookmarks
return True
class PrivateStorageRosternotesReceivedEvent(nec.NetworkIncomingEvent):
name = 'private-storage-rosternotes-received'
base_network_events = ['private-storage-received']
def generate(self):
self.conn = self.base_event.conn
if self.base_event.namespace != nbxmpp.NS_ROSTERNOTES:
return
notes = self.base_event.storage_node.getTags('note')
self.annotations = {}
for note in notes:
try:
jid = helpers.parse_jid(note.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % note.getAttr('jid'))
continue
annotation = note.getData()
self.annotations[jid] = annotation
if self.annotations:
return True
class RosternotesReceivedEvent(nec.NetworkIncomingEvent):
name = 'rosternotes-received'
base_network_events = ['private-storage-rosternotes-received']
def generate(self):
self.conn = self.base_event.conn
self.annotations = self.base_event.annotations
return True
class PubsubReceivedEvent(nec.NetworkIncomingEvent):
name = 'pubsub-received'
base_network_events = []
def generate(self):
self.pubsub_node = self.stanza.getTag('pubsub')
if not self.pubsub_node:
return
self.items_node = self.pubsub_node.getTag('items')
if not self.items_node:
return
self.item_node = self.items_node.getTag('item')
if not self.item_node:
return
children = self.item_node.getChildren()
if not children:
return
self.node = children[0]
return True
class PubsubBookmarksReceivedEvent(nec.NetworkIncomingEvent, BookmarksHelper):
name = 'pubsub-bookmarks-received'
base_network_events = ['pubsub-received']
def generate(self):
self.conn = self.base_event.conn
self.storage_node = self.base_event.node
ns = self.storage_node.getNamespace()
if ns != nbxmpp.NS_BOOKMARKS:
return
self.parse_bookmarks()
return True
class SearchFormReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'search-form-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
self.data = None
self.is_dataform = False
tag = self.stanza.getTag('query', namespace=nbxmpp.NS_SEARCH)
if not tag:
return True
self.data = tag.getTag('x', namespace=nbxmpp.NS_DATA)
if self.data:
self.is_dataform = True
return True
self.data = {}
for i in self.stanza.getQueryPayload():
self.data[i.getName()] = i.getData()
return True
class SearchResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'search-result-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
self.data = None
self.is_dataform = False
tag = self.stanza.getTag('query', namespace=nbxmpp.NS_SEARCH)
if not tag:
return True
self.data = tag.getTag('x', namespace=nbxmpp.NS_DATA)
if self.data:
self.is_dataform = True
return True
self.data = []
for item in tag.getTags('item'):
# We also show attributes. jid is there
f = item.attrs
for i in item.getPayload():
f[i.getName()] = i.getData()
self.data.append(f)
return True
class IqErrorReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'iq-error-received'
base_network_events = []
def generate(self):
self.get_id()
self.get_jid_resource(check_fake_jid=True)
self.errmsg = self.stanza.getErrorMsg()
self.errcode = self.stanza.getErrorCode()
return True
class GmailNewMailReceivedEvent(nec.NetworkIncomingEvent):
name = 'gmail-new-mail-received'
base_network_events = []
def generate(self):
if not self.stanza.getTag('new-mail'):
return
if self.stanza.getTag('new-mail').getNamespace() != \
nbxmpp.NS_GMAILNOTIFY:
return
return True
class PingReceivedEvent(nec.NetworkIncomingEvent):
name = 'ping-received'
base_network_events = []
class StreamReceivedEvent(nec.NetworkIncomingEvent):
name = 'stream-received'
base_network_events = []
class StreamConflictReceivedEvent(nec.NetworkIncomingEvent):
name = 'stream-conflict-received'
base_network_events = ['stream-received']
def generate(self):
if self.base_event.stanza.getTag('conflict'):
self.conn = self.base_event.conn
return True
class StreamOtherHostReceivedEvent(nec.NetworkIncomingEvent):
name = 'stream-other-host-received'
base_network_events = ['stream-received']
def generate(self):
self.conn = self.base_event.conn
self.stanza = self.base_event.stanza
other_host = self.stanza.getTag('see-other-host')
if other_host and self.conn._current_type in ('ssl', 'tls'):
host = other_host.getData()
if ':' in host:
host_l = host.split(':', 1)
h = host_l[0]
p = host_l[1]
else:
h = host
p = 5222
if h.startswith('[') and h.endswith(']'):
h = h[1:-1]
self.redirected = {'host': h, 'port': p}
return True
class PresenceHelperEvent:
def _generate_show(self):
self.show = self.stanza.getShow()
if self.show not in ('chat', 'away', 'xa', 'dnd'):
self.show = '' # We ignore unknown show
if not self.ptype and not self.show:
self.show = 'online'
elif self.ptype == 'unavailable':
self.show = 'offline'
def _generate_ptype(self):
self.ptype = self.stanza.getType()
if self.ptype == 'available':
self.ptype = None
rfc_types = ('unavailable', 'error', 'subscribe', 'subscribed',
'unsubscribe', 'unsubscribed')
if self.ptype and not self.ptype in rfc_types:
self.ptype = None
class PresenceReceivedEvent(nec.NetworkIncomingEvent, HelperEvent,
PresenceHelperEvent):
name = 'presence-received'
base_network_events = ['raw-pres-received']
def _generate_keyID(self, sig_tag):
self.keyID = ''
if sig_tag and self.conn.USE_GPG and self.ptype != 'error':
# error presences contain our own signature
# verify
sig_msg = sig_tag.getData()
self.keyID = self.conn.gpg.verify(self.status, sig_msg)
self.keyID = helpers.prepare_and_validate_gpg_keyID(self.conn.name,
self.jid, self.keyID)
def _generate_prio(self):
self.prio = self.stanza.getPriority()
try:
self.prio = int(self.prio)
except Exception:
self.prio = 0
def generate(self):
self.conn = self.base_event.conn
self.stanza = self.base_event.stanza
self.need_add_in_roster = False
self.need_redraw = False
self.popup = False # Do we want to open chat window ?
if not self.conn or self.conn.connected < 2:
log.debug('account is no more connected')
return
self._generate_ptype()
try:
self.get_jid_resource()
except Exception:
log.warning('Invalid JID: %s, ignoring it' % self.stanza.getFrom())
return
jid_list = gajim.contacts.get_jid_list(self.conn.name)
self.timestamp = None
self.get_id()
self.is_gc = False # is it a GC presence ?
sig_tag = None
self.avatar_sha = None
# XEP-0172 User Nickname
self.user_nick = self.stanza.getTagData('nick') or ''
self.contact_nickname = None
self.transport_auto_auth = False
# XEP-0203
delay_tag = self.stanza.getTag('delay', namespace=nbxmpp.NS_DELAY2)
if delay_tag:
self._generate_timestamp(self.stanza.getTimestamp2())
xtags = self.stanza.getTags('x')
for x in xtags:
namespace = x.getNamespace()
if namespace.startswith(nbxmpp.NS_MUC):
self.is_gc = True
elif namespace == nbxmpp.NS_SIGNED:
sig_tag = x
elif namespace == nbxmpp.NS_VCARD_UPDATE:
self.avatar_sha = x.getTagData('photo')
self.contact_nickname = x.getTagData('nickname')
elif namespace == nbxmpp.NS_DELAY and not self.timestamp:
# XEP-0091
self._generate_timestamp(self.stanza.getTimestamp())
elif namespace == 'http://delx.cjb.net/protocol/roster-subsync':
# see http://trac.gajim.org/ticket/326
agent = gajim.get_server_from_jid(self.jid)
if self.conn.connection.getRoster().getItem(agent):
# to be sure it's a transport contact
self.transport_auto_auth = True
if not self.is_gc and self.id_ and self.id_.startswith('gajim_muc_') \
and self.ptype == 'error':
# Error presences may not include sent stanza, so we don't detect
# it's a muc presence. So detect it by ID
h = hmac.new(self.conn.secret_hmac, self.jid.encode('utf-8')).\
hexdigest()[:6]
if self.id_.split('_')[-1] == h:
self.is_gc = True
self.status = self.stanza.getStatus() or ''
self._generate_show()
self._generate_prio()
self._generate_keyID(sig_tag)
self.errcode = self.stanza.getErrorCode()
self.errmsg = self.stanza.getErrorMsg()
if self.is_gc:
gajim.nec.push_incoming_event(GcPresenceReceivedEvent(None,
conn=self.conn, stanza=self.stanza, presence_obj=self))
return
if self.ptype == 'subscribe':
gajim.nec.push_incoming_event(SubscribePresenceReceivedEvent(None,
conn=self.conn, stanza=self.stanza, presence_obj=self))
elif self.ptype == 'subscribed':
# BE CAREFUL: no con.updateRosterItem() in a callback
gajim.nec.push_incoming_event(SubscribedPresenceReceivedEvent(None,
conn=self.conn, stanza=self.stanza, presence_obj=self))
elif self.ptype == 'unsubscribe':
log.debug(_('unsubscribe request from %s') % self.jid)
elif self.ptype == 'unsubscribed':
gajim.nec.push_incoming_event(UnsubscribedPresenceReceivedEvent(
None, conn=self.conn, stanza=self.stanza, presence_obj=self))
elif self.ptype == 'error':
if self.errcode != '409': # conflict # See #5120
self.show = 'error'
self.status = self.errmsg
return True
if not self.ptype or self.ptype == 'unavailable':
our_jid = gajim.get_jid_from_account(self.conn.name)
if self.jid == our_jid and self.resource == \
self.conn.server_resource:
# We got our own presence
gajim.nec.push_incoming_event(OurShowEvent(None, conn=self.conn,
show=self.show))
elif self.jid in jid_list or self.jid == our_jid:
return True
class ZeroconfPresenceReceivedEvent(nec.NetworkIncomingEvent):
name = 'presence-received'
base_network_events = []
def generate(self):
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
self.resource = 'local'
self.prio = 0
self.keyID = None
self.timestamp = 0
self.contact_nickname = None
self.avatar_sha = None
self.need_add_in_roster = False
self.need_redraw = False
if self.show == 'offline':
self.ptype = 'unavailable'
else:
self.ptype = None
self.is_gc = False
self.user_nick = ''
self.transport_auto_auth = False
self.errcode = None
self.errmsg = ''
self.popup = False # Do we want to open chat window ?
return True
class GcPresenceReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'gc-presence-received'
base_network_events = []
def generate(self):
self.ptype = self.presence_obj.ptype
self.fjid = self.presence_obj.fjid
self.jid = self.presence_obj.jid
self.room_jid = self.presence_obj.jid
self.nick = self.presence_obj.resource
self.show = self.presence_obj.show
self.status = self.presence_obj.status
self.avatar_sha = self.presence_obj.avatar_sha
self.errcode = self.presence_obj.errcode
self.errmsg = self.presence_obj.errmsg
self.errcon = self.stanza.getError()
self.get_gc_control()
self.gc_contact = gajim.contacts.get_gc_contact(self.conn.name,
self.room_jid, self.nick)
if self.ptype == 'error':
return True
if self.ptype and self.ptype != 'unavailable':
return
if gajim.config.get('log_contact_status_changes') and \
gajim.config.should_log(self.conn.name, self.room_jid):
if self.gc_contact:
jid = self.gc_contact.jid
else:
jid = self.stanza.getJid()
st = self.status
if jid:
# we know real jid, save it in db
st += ' (%s)' % jid
try:
gajim.logger.write('gcstatus', self.fjid, st,
self.show)
except exceptions.PysqliteOperationalError as e:
self.conn.dispatch('DB_ERROR', (_('Disk Write Error'),
str(e)))
except exceptions.DatabaseMalformed:
pritext = _('Database Error')
sectext = _('The database file (%s) cannot be read. '
'Try to repair it (see '
'http://trac.gajim.org/wiki/DatabaseBackup) or '
'remove it (all history will be lost).') % \
LOG_DB_PATH
self.conn.dispatch('DB_ERROR', (pritext, sectext))
if self.avatar_sha == '':
# contact has no avatar
puny_nick = helpers.sanitize_filename(self.nick)
gajim.interface.remove_avatar_files(self.room_jid, puny_nick)
# NOTE: if it's a gc presence, don't ask vcard here.
# We may ask it to real jid in gui part.
self.status_code = []
ns_muc_user_x = self.stanza.getTag('x', namespace=nbxmpp.NS_MUC_USER)
if ns_muc_user_x:
destroy = ns_muc_user_x.getTag('destroy')
else:
destroy = None
if ns_muc_user_x and destroy:
# Room has been destroyed. see
# http://www.xmpp.org/extensions/xep-0045.html#destroyroom
self.reason = _('Room has been destroyed')
r = destroy.getTagData('reason')
if r:
self.reason += ' (%s)' % r
if destroy.getAttr('jid'):
try:
jid = helpers.parse_jid(destroy.getAttr('jid'))
self.reason += '\n' + \
_('You can join this room instead: %s') % jid
except helpers.InvalidFormat:
pass
self.status_code = ['destroyed']
else:
self.reason = self.stanza.getReason()
conditions = self.stanza.getStatusConditions()
if conditions:
self.status_code = []
for condition in conditions:
if condition in CONDITION_TO_CODE:
self.status_code.append(CONDITION_TO_CODE[condition])
else:
self.status_code = self.stanza.getStatusCode()
self.role = self.stanza.getRole()
self.affiliation = self.stanza.getAffiliation()
self.real_jid = self.stanza.getJid()
self.actor = self.stanza.getActor()
self.new_nick = self.stanza.getNewNick()
return True
class SubscribePresenceReceivedEvent(nec.NetworkIncomingEvent):
name = 'subscribe-presence-received'
base_network_events = []
def generate(self):
self.jid = self.presence_obj.jid
self.fjid = self.presence_obj.fjid
self.status = self.presence_obj.status
self.transport_auto_auth = self.presence_obj.transport_auto_auth
self.user_nick = self.presence_obj.user_nick
return True
class SubscribedPresenceReceivedEvent(nec.NetworkIncomingEvent):
name = 'subscribed-presence-received'
base_network_events = []
def generate(self):
self.jid = self.presence_obj.jid
self.resource = self.presence_obj.resource
return True
class UnsubscribedPresenceReceivedEvent(nec.NetworkIncomingEvent):
name = 'unsubscribed-presence-received'
base_network_events = []
def generate(self):
self.jid = self.presence_obj.jid
return True
class OurShowEvent(nec.NetworkIncomingEvent):
name = 'our-show'
base_network_events = []
class BeforeChangeShowEvent(nec.NetworkIncomingEvent):
name = 'before-change-show'
base_network_events = []
class MessageReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'message-received'
base_network_events = ['raw-message-received']
def generate(self):
self.conn = self.base_event.conn
self.stanza = self.base_event.stanza
self.get_id()
self.forwarded = False
self.sent = False
account = self.conn.name
# check if the message is a roster item exchange (XEP-0144)
if self.stanza.getTag('x', namespace=nbxmpp.NS_ROSTERX):
gajim.nec.push_incoming_event(RosterItemExchangeEvent(None,
conn=self.conn, stanza=self.stanza))
return
# check if the message is a XEP-0070 confirmation request
if self.stanza.getTag('confirm', namespace=nbxmpp.NS_HTTP_AUTH):
gajim.nec.push_incoming_event(HttpAuthReceivedEvent(None,
conn=self.conn, stanza=self.stanza))
return
try:
self.get_jid_resource()
except helpers.InvalidFormat:
gajim.nec.push_incoming_event(InformationEvent(None, conn=self.conn,
level='error', pri_txt=_('Invalid Jabber ID'),
sec_txt=_('A message from a non-valid JID arrived, it has been '
'ignored.')))
return
address_tag = self.stanza.getTag('addresses',
namespace=nbxmpp.NS_ADDRESS)
# Be sure it comes from one of our resource, else ignore address element
if address_tag and self.jid == gajim.get_jid_from_account(account):
address = address_tag.getTag('address', attrs={'type': 'ofrom'})
if address:
try:
self.fjid = helpers.parse_jid(address.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % address.getAttr(
'jid'))
return
self.jid = gajim.get_jid_without_resource(self.fjid)
carbon_marker = self.stanza.getTag('sent', namespace=nbxmpp.NS_CARBONS)
if not carbon_marker:
carbon_marker = self.stanza.getTag('received',
namespace=nbxmpp.NS_CARBONS)
# Be sure it comes from one of our resource, else ignore forward element
if carbon_marker and self.jid == gajim.get_jid_from_account(account):
forward_tag = carbon_marker.getTag('forwarded',
namespace=nbxmpp.NS_FORWARD)
if forward_tag:
msg = forward_tag.getTag('message')
self.stanza = nbxmpp.Message(node=msg)
if carbon_marker.getName() == 'sent':
to = self.stanza.getTo()
frm = self.stanza.getFrom()
if not frm:
frm = gajim.get_jid_from_account(account)
self.stanza.setTo(frm)
self.stanza.setFrom(to)
self.sent = True
try:
self.get_jid_resource()
except helpers.InvalidFormat:
gajim.nec.push_incoming_event(InformationEvent(None,
conn=self.conn, level='error',
pri_txt=_('Invalid Jabber ID'),
sec_txt=_('A message from a non-valid JID arrived, it '
'has been ignored.')))
return
self.forwarded = True
self.enc_tag = self.stanza.getTag('x', namespace=nbxmpp.NS_ENCRYPTED)
self.invite_tag = None
self.decline_tag = None
if not self.enc_tag:
# Direct invitation?
self.invite_tag = self.stanza.getTag('x',
namespace=nbxmpp.NS_CONFERENCE)
# Mediated invitation?
if not self.invite_tag:
self.invite_tag = self.stanza.getTag('x',
namespace=nbxmpp.NS_MUC_USER)
if self.invite_tag and not self.invite_tag.getTag('invite'):
self.invite_tag = None
self.decline_tag = self.stanza.getTag('x',
namespace=nbxmpp.NS_MUC_USER)
if self.decline_tag and not self.decline_tag.getTag('decline'):
self.decline_tag = None
self.thread_id = self.stanza.getThread()
self.mtype = self.stanza.getType()
if not self.mtype or self.mtype not in ('chat', 'groupchat', 'error'):
self.mtype = 'normal'
self.msgtxt = self.stanza.getBody()
self.get_gc_control()
if self.gc_control and self.jid == self.fjid:
if self.mtype == 'error':
self.msgtxt = _('error while sending %(message)s ( %(error)s )'\
) % {'message': self.msgtxt,
'error': self.stanza.getErrorMsg()}
if self.stanza.getTag('html'):
self.stanza.delChild('html')
# message from a gc without a resource
self.mtype = 'groupchat'
self.session = None
if self.mtype != 'groupchat':
if gajim.interface.is_pm_contact(self.fjid, account) and \
self.mtype == 'error':
self.session = self.conn.find_session(self.fjid, self.thread_id)
if not self.session:
self.session = self.conn.get_latest_session(self.fjid)
if not self.session:
self.session = self.conn.make_new_session(self.fjid,
self.thread_id, type_='pm')
else:
self.session = self.conn.get_or_create_session(self.fjid,
self.thread_id)
if self.thread_id and not self.session.received_thread_id:
self.session.received_thread_id = True
self.session.last_receive = time_time()
# check if the message is a XEP-0020 feature negotiation request
if not self.forwarded and self.stanza.getTag('feature',
namespace=nbxmpp.NS_FEATURE):
if gajim.HAVE_PYCRYPTO:
feature = self.stanza.getTag(name='feature',
namespace=nbxmpp.NS_FEATURE)
form = nbxmpp.DataForm(node=feature.getTag('x'))
if form['FORM_TYPE'] == 'urn:xmpp:ssn':
self.session.handle_negotiation(form)
else:
reply = self.stanza.buildReply()
reply.setType('error')
reply.addChild(feature)
err = nbxmpp.ErrorNode('service-unavailable', typ='cancel')
reply.addChild(node=err)
self.conn.connection.send(reply)
return
if not self.forwarded and self.stanza.getTag('init',
namespace=nbxmpp.NS_ESESSION_INIT):
init = self.stanza.getTag(name='init',
namespace=nbxmpp.NS_ESESSION_INIT)
form = nbxmpp.DataForm(node=init.getTag('x'))
self.session.handle_negotiation(form)
return
self._generate_timestamp(self.stanza.getTimestamp())
self.encrypted = False
xep_200_encrypted = self.stanza.getTag('c',
namespace=nbxmpp.NS_STANZA_CRYPTO)
if xep_200_encrypted:
if self.forwarded:
# Ignore E2E forwarded encrypted messages
return False
self.encrypted = 'xep200'
return True
class ZeroconfMessageReceivedEvent(MessageReceivedEvent):
name = 'message-received'
base_network_events = []
def get_jid_resource(self):
self.fjid =self.stanza.getFrom()
if self.fjid is None:
for key in self.conn.connection.zeroconf.contacts:
if self.ip == self.conn.connection.zeroconf.contacts[key][
zeroconf.C_ADDRESS]:
self.fjid = key
break
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
def generate(self):
self.base_event = nec.NetworkIncomingEvent(None, conn=self.conn,
stanza=self.stanza)
return super(ZeroconfMessageReceivedEvent, self).generate()
class GcInvitationReceivedEvent(nec.NetworkIncomingEvent):
name = 'gc-invitation-received'
base_network_events = []
def generate(self):
invite_tag = self.msg_obj.invite_tag
if invite_tag.getNamespace() == nbxmpp.NS_CONFERENCE:
# direct invitation
try:
self.room_jid = helpers.parse_jid(invite_tag.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % invite_tag.getAttr(
'jid'))
return
self.jid_from = self.msg_obj.fjid
self.reason = invite_tag.getAttr('reason')
self.password = invite_tag.getAttr('password')
self.is_continued = False
if invite_tag.getAttr('continue') == 'true':
self.is_continued = True
else:
self.room_jid = self.msg_obj.fjid
item = self.msg_obj.invite_tag.getTag('invite')
try:
self.jid_from = helpers.parse_jid(item.getAttr('from'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % item.getAttr(
'from'))
return
self.reason = item.getTagData('reason')
self.password = invite_tag.getTagData('password')
self.is_continued = False
if item.getTag('continue'):
self.is_continued = True
if self.room_jid in gajim.gc_connected[self.conn.name] and \
gajim.gc_connected[self.conn.name][self.room_jid]:
# We are already in groupchat. Ignore invitation
return
jid = gajim.get_jid_without_resource(self.jid_from)
if gajim.config.get_per('accounts', self.conn.name,
'ignore_unknown_contacts') and not gajim.contacts.get_contacts(
self.conn.name, jid):
return
return True
class GcDeclineReceivedEvent(nec.NetworkIncomingEvent):
name = 'gc-decline-received'
base_network_events = []
def generate(self):
self.room_jid = self.msg_obj.fjid
item = self.msg_obj.decline_tag.getTag('decline')
try:
self.jid_from = helpers.parse_jid(item.getAttr('from'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % item.getAttr('from'))
return
jid = gajim.get_jid_without_resource(self.jid_from)
if gajim.config.get_per('accounts', self.conn.name,
'ignore_unknown_contacts') and not gajim.contacts.get_contacts(
self.conn.name, jid):
return
self.reason = item.getTagData('reason')
return True
class DecryptedMessageReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'decrypted-message-received'
base_network_events = []
def generate(self):
self.stanza = self.msg_obj.stanza
self.id_ = self.msg_obj.id_
self.jid = self.msg_obj.jid
self.fjid = self.msg_obj.fjid
self.resource = self.msg_obj.resource
self.mtype = self.msg_obj.mtype
self.invite_tag = self.msg_obj.invite_tag
self.decline_tag = self.msg_obj.decline_tag
self.thread_id = self.msg_obj.thread_id
self.msgtxt = self.msg_obj.msgtxt
self.gc_control = self.msg_obj.gc_control
self.session = self.msg_obj.session
self.timestamp = self.msg_obj.timestamp
self.encrypted = self.msg_obj.encrypted
self.forwarded = self.msg_obj.forwarded
self.sent = self.msg_obj.sent
self.popup = False
self.msg_id = None # id in log database
self.attention = False # XEP-0224
self.correct_id = None # XEP-0308
self.receipt_request_tag = self.stanza.getTag('request',
namespace=nbxmpp.NS_RECEIPTS)
self.receipt_received_tag = self.stanza.getTag('received',
namespace=nbxmpp.NS_RECEIPTS)
self.subject = self.stanza.getSubject()
self.displaymarking = None
self.seclabel = self.stanza.getTag('securitylabel',
namespace=nbxmpp.NS_SECLABEL)
if self.seclabel:
self.displaymarking = self.seclabel.getTag('displaymarking')
if self.stanza.getTag('attention', namespace=nbxmpp.NS_ATTENTION):
delayed = self.stanza.getTag('x', namespace=nbxmpp.NS_DELAY) is not\
None
if not delayed:
self.attention = True
self.form_node = self.stanza.getTag('x', namespace=nbxmpp.NS_DATA)
if gajim.config.get('ignore_incoming_xhtml'):
self.xhtml = None
else:
self.xhtml = self.stanza.getXHTML()
# XEP-0172 User Nickname
self.user_nick = self.stanza.getTagData('nick') or ''
self.get_chatstate()
oob_node = self.stanza.getTag('x', namespace=nbxmpp.NS_X_OOB)
self.oob_url = None
self.oob_desc = None
if oob_node:
self.oob_url = oob_node.getTagData('url')
self.oob_desc = oob_node.getTagData('desc')
if self.oob_url:
self.msgtxt += '\n'
if self.oob_desc:
self.msgtxt += self.oob_desc
else:
self.msgtxt += _('URL:')
self.msgtxt += ' ' + self.oob_url
replace = self.stanza.getTag('replace', namespace=nbxmpp.NS_CORRECT)
if replace:
self.correct_id = replace.getAttr('id')
return True
class ChatstateReceivedEvent(nec.NetworkIncomingEvent):
name = 'chatstate-received'
base_network_events = []
def generate(self):
self.stanza = self.msg_obj.stanza
self.jid = self.msg_obj.jid
self.fjid = self.msg_obj.fjid
self.resource = self.msg_obj.resource
self.chatstate = self.msg_obj.chatstate
return True
class GcMessageReceivedEvent(nec.NetworkIncomingEvent):
name = 'gc-message-received'
base_network_events = []
def generate(self):
self.stanza = self.msg_obj.stanza
self.fjid = self.msg_obj.fjid
self.msgtxt = self.msg_obj.msgtxt
self.jid = self.msg_obj.jid
self.room_jid = self.msg_obj.jid
self.nickname = self.msg_obj.resource
self.timestamp = self.msg_obj.timestamp
self.xhtml_msgtxt = self.stanza.getXHTML()
self.correct_id = None # XEP-0308
if gajim.config.get('ignore_incoming_xhtml'):
self.xhtml_msgtxt = None
if self.msg_obj.resource:
# message from someone
self.nick = self.msg_obj.resource
else:
# message from server
self.nick = ''
self.has_timestamp = bool(self.stanza.timestamp)
self.subject = self.stanza.getSubject()
if self.subject is not None:
gajim.nec.push_incoming_event(GcSubjectReceivedEvent(None,
conn=self.conn, msg_event=self))
return
conditions = self.stanza.getStatusConditions()
if conditions:
self.status_code = []
for condition in conditions:
if condition in CONDITION_TO_CODE:
self.status_code.append(CONDITION_TO_CODE[condition])
else:
self.status_code = self.stanza.getStatusCode()
if not self.stanza.getTag('body'): # no <body>
# It could be a config change. See
# http://www.xmpp.org/extensions/xep-0045.html#roomconfig-notify
if self.stanza.getTag('x'):
if self.status_code != []:
gajim.nec.push_incoming_event(GcConfigChangedReceivedEvent(
None, conn=self.conn, msg_event=self))
if self.msg_obj.form_node:
return True
return
self.displaymarking = None
seclabel = self.stanza.getTag('securitylabel')
if seclabel and seclabel.getNamespace() == nbxmpp.NS_SECLABEL:
# Ignore message from room in which we are not
self.displaymarking = seclabel.getTag('displaymarking')
if self.jid not in self.conn.last_history_time:
return
self.captcha_form = None
captcha_tag = self.stanza.getTag('captcha', namespace=nbxmpp.NS_CAPTCHA)
if captcha_tag:
self.captcha_form = captcha_tag.getTag('x',
namespace=nbxmpp.NS_DATA)
for field in self.captcha_form.getTags('field'):
for media in field.getTags('media'):
for uri in media.getTags('uri'):
uri_data = uri.getData()
if uri_data.startswith('cid:'):
uri_data = uri_data[4:]
found = False
for data in self.stanza.getTags('data',
namespace=nbxmpp.NS_BOB):
if data.getAttr('cid') == uri_data:
uri.setData(data.getData())
found = True
if not found:
self.conn.get_bob_data(uri_data, self.fjid,
self.conn._dispatch_gc_msg_with_captcha,
[self.stanza, self.msg_obj], 0)
return
replace = self.stanza.getTag('replace', namespace=nbxmpp.NS_CORRECT)
if replace:
self.correct_id = replace.getAttr('id')
return True
class GcSubjectReceivedEvent(nec.NetworkIncomingEvent):
name = 'gc-subject-received'
base_network_events = []
def generate(self):
self.conn = self.msg_event.conn
self.stanza = self.msg_event.stanza
self.room_jid = self.msg_event.room_jid
self.nickname = self.msg_event.nickname
self.fjid = self.msg_event.fjid
self.subject = self.msg_event.subject
self.msgtxt = self.msg_event.msgtxt
self.has_timestamp = self.msg_event.has_timestamp
return True
class GcConfigChangedReceivedEvent(nec.NetworkIncomingEvent):
name = 'gc-config-changed-received'
base_network_events = []
def generate(self):
self.conn = self.msg_event.conn
self.stanza = self.msg_event.stanza
self.room_jid = self.msg_event.room_jid
self.status_code = self.msg_event.status_code
return True
class MessageSentEvent(nec.NetworkIncomingEvent):
name = 'message-sent'
base_network_events = []
class MessageNotSentEvent(nec.NetworkIncomingEvent):
name = 'message-not-sent'
base_network_events = []
class MessageErrorEvent(nec.NetworkIncomingEvent):
name = 'message-error'
base_network_events = []
class AnonymousAuthEvent(nec.NetworkIncomingEvent):
name = 'anonymous-auth'
base_network_events = []
class JingleRequestReceivedEvent(nec.NetworkIncomingEvent):
name = 'jingle-request-received'
base_network_events = []
def generate(self):
self.fjid = self.jingle_session.peerjid
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
self.sid = self.jingle_session.sid
return True
class JingleConnectedReceivedEvent(nec.NetworkIncomingEvent):
name = 'jingle-connected-received'
base_network_events = []
def generate(self):
self.fjid = self.jingle_session.peerjid
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
self.sid = self.jingle_session.sid
return True
class JingleDisconnectedReceivedEvent(nec.NetworkIncomingEvent):
name = 'jingle-disconnected-received'
base_network_events = []
def generate(self):
self.fjid = self.jingle_session.peerjid
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
self.sid = self.jingle_session.sid
return True
class JingleTransferCancelledEvent(nec.NetworkIncomingEvent):
name = 'jingleFT-cancelled-received'
base_network_events = []
def generate(self):
self.fjid = self.jingle_session.peerjid
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
self.sid = self.jingle_session.sid
return True
class JingleErrorReceivedEvent(nec.NetworkIncomingEvent):
name = 'jingle-error-received'
base_network_events = []
def generate(self):
self.fjid = self.jingle_session.peerjid
self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid)
self.sid = self.jingle_session.sid
return True
class ArchivingReceivedEvent(nec.NetworkIncomingEvent):
name = 'archiving-received'
base_network_events = []
def generate(self):
self.type_ = self.stanza.getType()
if self.type_ not in ('result', 'set', 'error'):
return
return True
class ArchivingErrorReceivedEvent(nec.NetworkIncomingEvent):
name = 'archiving-error-received'
base_network_events = ['archiving-received']
def generate(self):
self.conn = self.base_event.conn
self.stanza = self.base_event.stanza
self.type_ = self.base_event.type_
if self.type_ == 'error':
self.error_msg = self.stanza.getErrorMsg()
return True
class ArchivingPreferencesChangedReceivedEvent(nec.NetworkIncomingEvent):
name = 'archiving-preferences-changed-received'
base_network_events = ['archiving-received']
def generate(self):
self.conn = self.base_event.conn
self.stanza = self.base_event.stanza
self.type_ = self.base_event.type_
if self.type_ not in ('result', 'set'):
return
self.conf = {}
self.new_items = {}
self.removed_items = []
if self.stanza.getTag('pref'):
pref = self.stanza.getTag('pref')
if pref.getTag('auto'):
self.conf['auto'] = pref.getTagAttr('auto', 'save')
method_auto = pref.getTag('method', attrs={'type': 'auto'})
if method_auto:
self.conf['method_auto'] = method_auto.getAttr('use')
method_local = pref.getTag('method', attrs={'type': 'local'})
if method_local:
self.conf['method_local'] = method_local.getAttr('use')
method_manual = pref.getTag('method', attrs={'type': 'manual'})
if method_manual:
self.conf['method_manual'] = method_manual.getAttr('use')
default = pref.getTag('default')
if default:
self.conf['default'] = {
'expire': default.getAttr('expire'),
'otr': default.getAttr('otr'),
'save': default.getAttr('save'),
'unset': default.getAttr('unset')}
for item in pref.getTags('item'):
self.new_items[item.getAttr('jid')] = {
'expire': item.getAttr('expire'),
'otr': item.getAttr('otr'),
'save': item.getAttr('save')}
elif self.stanza.getTag('itemremove'):
for item in pref.getTags('item'):
self.removed_items.append(item.getAttr('jid'))
return True
class AccountCreatedEvent(nec.NetworkIncomingEvent):
name = 'account-created'
base_network_events = []
class AccountNotCreatedEvent(nec.NetworkIncomingEvent):
name = 'account-not-created'
base_network_events = []
class NewAccountConnectedEvent(nec.NetworkIncomingEvent):
name = 'new-account-connected'
base_network_events = []
def generate(self):
try:
self.errnum = self.conn.connection.Connection.ssl_errnum
except AttributeError:
self.errnum = 0 # we don't have an errnum
self.ssl_msg = ''
if self.errnum > 0:
from common.connection import ssl_error
self.ssl_msg = ssl_error.get(self.errnum,
_('Unknown SSL error: %d') % self.errnum)
self.ssl_cert = ''
self.ssl_fingerprint_sha1 = ''
self.ssl_fingerprint_sha256 = ''
if self.conn.connection.Connection.ssl_certificate:
cert = self.conn.connection.Connection.ssl_certificate
self.ssl_cert = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert).decode('utf-8')
self.ssl_fingerprint_sha1 = cert.digest('sha1').decode('utf-8')
self.ssl_fingerprint_sha256 = cert.digest('sha256').decode('utf-8')
return True
class NewAccountNotConnectedEvent(nec.NetworkIncomingEvent):
name = 'new-account-not-connected'
base_network_events = []
class ConnectionTypeEvent(nec.NetworkIncomingEvent):
name = 'connection-type'
base_network_events = []
class VcardPublishedEvent(nec.NetworkIncomingEvent):
name = 'vcard-published'
base_network_events = []
class VcardNotPublishedEvent(nec.NetworkIncomingEvent):
name = 'vcard-not-published'
base_network_events = []
class StanzaReceivedEvent(nec.NetworkIncomingEvent):
name = 'stanza-received'
base_network_events = []
class StanzaSentEvent(nec.NetworkIncomingEvent):
name = 'stanza-sent'
base_network_events = []
class AgentRemovedEvent(nec.NetworkIncomingEvent):
name = 'agent-removed'
base_network_events = []
def generate(self):
self.jid_list = []
for jid in gajim.contacts.get_jid_list(self.conn.name):
if jid.endswith('@' + self.agent):
self.jid_list.append(jid)
return True
class BadGPGPassphraseEvent(nec.NetworkIncomingEvent):
name = 'bad-gpg-passphrase'
base_network_events = []
def generate(self):
self.account = self.conn.name
self.use_gpg_agent = gajim.config.get('use_gpg_agent')
self.keyID = gajim.config.get_per('accounts', self.conn.name, 'keyid')
return True
class ConnectionLostEvent(nec.NetworkIncomingEvent):
name = 'connection-lost'
base_network_events = []
def generate(self):
gajim.nec.push_incoming_event(OurShowEvent(None, conn=self.conn,
show='offline'))
return True
class PingSentEvent(nec.NetworkIncomingEvent):
name = 'ping-sent'
base_network_events = []
class PingReplyEvent(nec.NetworkIncomingEvent):
name = 'ping-reply'
base_network_events = []
class PingErrorEvent(nec.NetworkIncomingEvent):
name = 'ping-error'
base_network_events = []
class CapsPresenceReceivedEvent(nec.NetworkIncomingEvent, HelperEvent,
PresenceHelperEvent):
name = 'caps-presence-received'
base_network_events = ['raw-pres-received']
def _extract_caps_from_presence(self):
caps_tag = self.stanza.getTag('c', namespace=nbxmpp.NS_CAPS)
if caps_tag:
self.hash_method = caps_tag['hash']
self.node = caps_tag['node']
self.caps_hash = caps_tag['ver']
else:
self.hash_method = self.node = self.caps_hash = None
def generate(self):
self.conn = self.base_event.conn
self.stanza = self.base_event.stanza
try:
self.get_jid_resource()
except Exception:
return
self._generate_ptype()
self._generate_show()
self._extract_caps_from_presence()
return True
class CapsDiscoReceivedEvent(nec.NetworkIncomingEvent):
name = 'caps-disco-received'
base_network_events = []
class CapsReceivedEvent(nec.NetworkIncomingEvent):
name = 'caps-received'
base_network_events = ['caps-presence-received', 'caps-disco-received']
def generate(self):
self.conn = self.base_event.conn
self.fjid = self.base_event.fjid
self.jid = self.base_event.jid
self.resource = self.base_event.resource
self.client_caps = self.base_event.client_caps
return True
class GPGTrustKeyEvent(nec.NetworkIncomingEvent):
name = 'gpg-trust-key'
base_network_events = []
class GPGPasswordRequiredEvent(nec.NetworkIncomingEvent):
name = 'gpg-password-required'
base_network_events = []
def generate(self):
self.keyid = gajim.config.get_per('accounts', self.conn.name, 'keyid')
return True
class PEPReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'pep-received'
base_network_events = []
def generate(self):
if not self.stanza.getTag('event'):
return
if self.stanza.getTag('error'):
log.debug('PEPReceivedEvent received error stanza. Ignoring')
return
try:
self.get_jid_resource()
except Exception:
return
self.event_tag = self.stanza.getTag('event')
for pep_class in SUPPORTED_PERSONAL_USER_EVENTS:
pep = pep_class.get_tag_as_PEP(self.fjid, self.conn.name,
self.event_tag)
if pep:
self.pep_type = pep.type_
return True
items = self.event_tag.getTag('items')
if items:
# for each entry in feed (there shouldn't be more than one, but to
# be sure...
for item in items.getTags('item'):
entry = item.getTag('entry', namespace=nbxmpp.NS_ATOM)
if entry:
gajim.nec.push_incoming_event(AtomEntryReceived(None,
conn=self.conn, node=entry))
raise nbxmpp.NodeProcessed
class AtomEntryReceived(nec.NetworkIncomingEvent):
name = 'atom-entry-received'
base_network_events = []
def generate(self):
self.atom_entry = atom.OldEntry(node=self.node)
return True
class PlainConnectionEvent(nec.NetworkIncomingEvent):
name = 'plain-connection'
base_network_events = []
class InsecurePasswordEvent(nec.NetworkIncomingEvent):
name = 'insecure-password'
base_network_events = []
class InsecureSSLConnectionEvent(nec.NetworkIncomingEvent):
name = 'insecure-ssl-connection'
base_network_events = []
class SSLErrorEvent(nec.NetworkIncomingEvent):
name = 'ssl-error'
base_network_events = []
class FingerprintErrorEvent(nec.NetworkIncomingEvent):
name = 'fingerprint-error'
base_network_events = []
class UniqueRoomIdSupportedEvent(nec.NetworkIncomingEvent):
name = 'unique-room-id-supported'
base_network_events = []
class UniqueRoomIdNotSupportedEvent(nec.NetworkIncomingEvent):
name = 'unique-room-id-not-supported'
base_network_events = []
class PrivacyListsReceivedEvent(nec.NetworkIncomingEvent):
name = 'privacy-lists-received'
base_network_events = []
class PrivacyListReceivedEvent(nec.NetworkIncomingEvent):
name = 'privacy-list-received'
base_network_events = []
class PrivacyListRemovedEvent(nec.NetworkIncomingEvent):
name = 'privacy-list-removed'
base_network_events = []
class PrivacyListActiveDefaultEvent(nec.NetworkIncomingEvent):
name = 'privacy-list-active-default'
base_network_events = []
class VcardReceivedEvent(nec.NetworkIncomingEvent):
name = 'vcard-received'
base_network_events = []
def generate(self):
self.nickname = None
if 'NICKNAME' in self.vcard_dict:
self.nickname = self.vcard_dict['NICKNAME']
elif 'FN' in self.vcard_dict:
self.nickname = self.vcard_dict['FN']
self.jid = self.vcard_dict['jid']
self.resource = self.vcard_dict['resource']
self.fjid = self.jid
if self.resource:
self.fjid += '/' + self.resource
return True
class PEPConfigReceivedEvent(nec.NetworkIncomingEvent):
name = 'pep-config-received'
base_network_events = []
class MetacontactsReceivedEvent(nec.NetworkIncomingEvent):
name = 'metacontacts-received'
base_network_events = []
def generate(self):
# Metacontact tags
# http://www.xmpp.org/extensions/xep-0209.html
self.meta_list = {}
# FIXME: disable metacontacts until they work correctly
return True
query = self.stanza.getTag('query')
storage = query.getTag('storage')
metas = storage.getTags('meta')
for meta in metas:
try:
jid = helpers.parse_jid(meta.getAttr('jid'))
except helpers.InvalidFormat:
continue
tag = meta.getAttr('tag')
data = {'jid': jid}
order = meta.getAttr('order')
try:
order = int(order)
except Exception:
order = 0
if order is not None:
data['order'] = order
if tag in self.meta_list:
self.meta_list[tag].append(data)
else:
self.meta_list[tag] = [data]
return True
class ZeroconfNameConflictEvent(nec.NetworkIncomingEvent):
name = 'zeroconf-name-conflict'
base_network_events = []
class PasswordRequiredEvent(nec.NetworkIncomingEvent):
name = 'password-required'
base_network_events = []
class Oauth2CredentialsRequiredEvent(nec.NetworkIncomingEvent):
name = 'oauth2-credentials-required'
base_network_events = []
class FailedDecryptEvent(nec.NetworkIncomingEvent):
name = 'failed-decrypt'
base_network_events = []
def generate(self):
self.conn = self.msg_obj.conn
self.fjid = self.msg_obj.fjid
self.timestamp = self.msg_obj.timestamp
self.session = self.msg_obj.session
return True
class SignedInEvent(nec.NetworkIncomingEvent):
name = 'signed-in'
base_network_events = []
class RegisterAgentInfoReceivedEvent(nec.NetworkIncomingEvent):
name = 'register-agent-info-received'
base_network_events = []
class AgentItemsReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'agent-items-received'
base_network_events = []
def generate(self):
q = self.stanza.getTag('query')
self.node = q.getAttr('node')
if not self.node:
self.node = ''
qp = self.stanza.getQueryPayload()
self.items = []
if not qp:
qp = []
for i in qp:
# CDATA payload is not processed, only nodes
if not isinstance(i, nbxmpp.simplexml.Node):
continue
attr = {}
for key in i.getAttrs():
attr[key] = i.getAttrs()[key]
if 'jid' not in attr:
continue
try:
attr['jid'] = helpers.parse_jid(attr['jid'])
except helpers.InvalidFormat:
# jid is not conform
continue
self.items.append(attr)
self.get_jid_resource()
hostname = gajim.config.get_per('accounts', self.conn.name, 'hostname')
self.get_id()
if self.id_ in self.conn.disco_items_ids:
self.conn.disco_items_ids.remove(self.id_)
if self.fjid == hostname and self.id_[:6] == 'Gajim_':
for item in self.items:
self.conn.discoverInfo(item['jid'], id_prefix='Gajim_')
else:
return True
class AgentItemsErrorReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'agent-items-error-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
self.get_id()
if self.id_ in self.conn.disco_items_ids:
self.conn.disco_items_ids.remove(self.id_)
return True
class AgentInfoReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'agent-info-received'
base_network_events = []
def generate(self):
self.get_id()
if self.id_ in self.conn.disco_info_ids:
self.conn.disco_info_ids.remove(self.id_)
if self.id_ is None:
log.warning('Invalid IQ received without an ID. Ignoring it: %s' % \
self.stanza)
return
# According to XEP-0030:
# For identity: category, type is mandatory, name is optional.
# For feature: var is mandatory
self.identities, self.features, self.data = [], [], []
q = self.stanza.getTag('query')
self.node = q.getAttr('node')
if not self.node:
self.node = ''
qc = self.stanza.getQueryChildren()
if not qc:
qc = []
for i in qc:
if i.getName() == 'identity':
attr = {}
for key in i.getAttrs().keys():
attr[key] = i.getAttr(key)
self.identities.append(attr)
elif i.getName() == 'feature':
var = i.getAttr('var')
if var:
self.features.append(var)
elif i.getName() == 'x' and i.getNamespace() == nbxmpp.NS_DATA:
self.data.append(nbxmpp.DataForm(node=i))
if not self.identities:
# ejabberd doesn't send identities when we browse online users
# see http://www.jabber.ru/bugzilla/show_bug.cgi?id=225
self.identities = [{'category': 'server', 'type': 'im',
'name': self.node}]
self.get_jid_resource()
return True
class AgentInfoErrorReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'agent-info-error-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
self.get_id()
if self.id_ in self.conn.disco_info_ids:
self.conn.disco_info_ids.remove(self.id_)
return True
class FileRequestReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'file-request-received'
base_network_events = []
def init(self):
self.jingle_content = None
self.FT_content = None
def generate(self):
self.get_id()
self.fjid = self.conn._ft_get_from(self.stanza)
self.jid = gajim.get_jid_without_resource(self.fjid)
if self.jingle_content:
secu = self.jingle_content.getTag('security')
self.FT_content.use_security = bool(secu)
if secu:
fingerprint = secu.getTag('fingerprint')
if fingerprint:
self.FT_content.x509_fingerprint = fingerprint.getData()
if not self.FT_content.transport:
self.FT_content.transport = JingleTransportSocks5()
self.FT_content.transport.set_our_jid(
self.FT_content.session.ourjid)
self.FT_content.transport.set_connection(
self.FT_content.session.connection)
sid = self.stanza.getTag('jingle').getAttr('sid')
self.file_props = FilesProp.getNewFileProp(self.conn.name, sid)
self.file_props.transport_sid = self.FT_content.transport.sid
self.FT_content.file_props = self.file_props
self.FT_content.transport.set_file_props(self.file_props)
self.file_props.streamhosts.extend(
self.FT_content.transport.remote_candidates)
for host in self.file_props.streamhosts:
host['initiator'] = self.FT_content.session.initiator
host['target'] = self.FT_content.session.responder
self.file_props.session_type = 'jingle'
self.file_props.stream_methods = nbxmpp.NS_BYTESTREAM
desc = self.jingle_content.getTag('description')
if desc.getTag('offer'):
file_tag = desc.getTag('offer').getTag('file')
self.file_props.sender = self.fjid
self.file_props.receiver = self.conn._ft_get_our_jid()
else:
file_tag = desc.getTag('request').getTag('file')
h = file_tag.getTag('hash')
h = h.getData() if h else None
n = file_tag.getTag('name')
n = n.getData() if n else None
pjid = gajim.get_jid_without_resource(self.fjid)
file_info = self.conn.get_file_info(pjid, hash_=h,
name=n,account=self.conn.name)
self.file_props.file_name = file_info['file-name']
self.file_props.sender = self.conn._ft_get_our_jid()
self.file_props.receiver = self.fjid
self.file_props.type_ = 's'
for child in file_tag.getChildren():
name = child.getName()
val = child.getData()
if val is None:
continue
if name == 'name':
self.file_props.name = val
if name == 'size':
self.file_props.size = int(val)
if name == 'hash':
self.file_props.algo = child.getAttr('algo')
self.file_props.hash_ = val
if name == 'date':
self.file_props.date = val
else:
si = self.stanza.getTag('si')
self.file_props = FilesProp.getNewFileProp(self.conn.name,
si.getAttr('id'))
profile = si.getAttr('profile')
if profile != nbxmpp.NS_FILE:
self.conn.send_file_rejection(self.file_props, code='400',
typ='profile')
raise nbxmpp.NodeProcessed
feature_tag = si.getTag('feature', namespace=nbxmpp.NS_FEATURE)
if not feature_tag:
return
form_tag = feature_tag.getTag('x', namespace=nbxmpp.NS_DATA)
if not form_tag:
return
self.dataform = dataforms.ExtendForm(node=form_tag)
for f in self.dataform.iter_fields():
if f.var == 'stream-method' and f.type_ == 'list-single':
values = [o[1] for o in f.options]
self.file_props.stream_methods = ' '.join(values)
if nbxmpp.NS_BYTESTREAM in values or \
nbxmpp.NS_IBB in values:
break
else:
self.conn.send_file_rejection(self.file_props, code='400',
typ='stream')
raise nbxmpp.NodeProcessed
file_tag = si.getTag('file')
for name, val in file_tag.getAttrs().items():
if val is None:
continue
if name == 'name':
self.file_props.name = val
if name == 'size':
self.file_props.size = int(val)
mime_type = si.getAttr('mime-type')
if mime_type is not None:
self.file_props.mime_type = mime_type
self.file_props.sender = self.fjid
self.file_props.receiver = self.conn._ft_get_our_jid()
self.file_props.request_id = self.id_
file_desc_tag = file_tag.getTag('desc')
if file_desc_tag is not None:
self.file_props.desc = file_desc_tag.getData()
self.file_props.transfered_size = []
return True
class FileRequestErrorEvent(nec.NetworkIncomingEvent):
name = 'file-request-error'
base_network_events = []
def generate(self):
self.jid = gajim.get_jid_without_resource(self.jid)
return True
class GatewayPromptReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'gateway-prompt-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
query = self.stanza.getTag('query')
if query:
self.desc = query.getTagData('desc')
self.prompt = query.getTagData('prompt')
self.prompt_jid = query.getTagData('jid')
else:
self.desc = None
self.prompt = None
self.prompt_jid = None
return True
class NotificationEvent(nec.NetworkIncomingEvent):
name = 'notification'
base_network_events = ['decrypted-message-received', 'gc-message-received',
'presence-received']
def detect_type(self):
if self.base_event.name == 'decrypted-message-received':
self.notif_type = 'msg'
if self.base_event.name == 'gc-message-received':
self.notif_type = 'gc-msg'
if self.base_event.name == 'presence-received':
self.notif_type = 'pres'
def get_focused(self):
self.control_focused = False
if self.control:
parent_win = self.control.parent_win
if parent_win and self.control == parent_win.get_active_control() \
and parent_win.window.has_focus:
self.control_focused = True
def handle_incoming_msg_event(self, msg_obj):
if not msg_obj.msgtxt:
return
self.jid = msg_obj.jid
if msg_obj.session:
self.control = msg_obj.session.control
else:
self.control = None
self.get_focused()
# This event has already been added to event list
if not self.control and len(gajim.events.get_events(self.conn.name, \
self.jid, [msg_obj.mtype])) <= 1:
self.first_unread = True
if msg_obj.mtype == 'pm':
nick = msg_obj.resource
else:
nick = gajim.get_name_from_jid(self.conn.name, self.jid)
if self.first_unread:
self.sound_event = 'first_message_received'
elif self.control_focused:
self.sound_event = 'next_message_received_focused'
else:
self.sound_event = 'next_message_received_unfocused'
if gajim.config.get('notification_preview_message'):
self.popup_text = msg_obj.msgtxt
if self.popup_text and (self.popup_text.startswith('/me ') or \
self.popup_text.startswith('/me\n')):
self.popup_text = '* ' + nick + self.popup_text[3:]
else:
# We don't want message preview, do_preview = False
self.popup_text = ''
if msg_obj.mtype == 'normal': # single message
self.popup_msg_type = 'normal'
self.popup_event_type = _('New Single Message')
self.popup_image = 'gajim-single_msg_recv'
self.popup_title = _('New Single Message from %(nickname)s') % \
{'nickname': nick}
elif msg_obj.mtype == 'pm':
self.popup_msg_type = 'pm'
self.popup_event_type = _('New Private Message')
self.popup_image = 'gajim-priv_msg_recv'
self.popup_title = _('New Private Message from group chat %s') % \
msg_obj.jid
if self.popup_text:
self.popup_text = _('%(nickname)s: %(message)s') % \
{'nickname': nick, 'message': self.popup_text}
else:
self.popup_text = _('Messaged by %(nickname)s') % \
{'nickname': nick}
else: # chat message
self.popup_msg_type = 'chat'
self.popup_event_type = _('New Message')
self.popup_image = 'gajim-chat_msg_recv'
self.popup_title = _('New Message from %(nickname)s') % \
{'nickname': nick}
if not gajim.config.get('notify_on_new_message') or \
not self.first_unread:
self.do_popup = False
elif gajim.config.get('autopopupaway'):
# always show notification
self.do_popup = True
elif gajim.connections[self.conn.name].connected in (2, 3):
# we're online or chat
self.do_popup = True
if msg_obj.attention and not gajim.config.get(
'ignore_incoming_attention'):
self.popup_timeout = 0
self.do_popup = True
else:
self.popup_timeout = gajim.config.get('notification_timeout')
if msg_obj.attention and not gajim.config.get(
'ignore_incoming_attention') and gajim.config.get_per('soundevents',
'attention_received', 'enabled'):
self.sound_event = 'attention_received'
self.do_sound = True
elif self.first_unread and helpers.allow_sound_notification(
self.conn.name, 'first_message_received'):
self.do_sound = True
elif not self.first_unread and self.control_focused and \
helpers.allow_sound_notification(self.conn.name,
'next_message_received_focused'):
self.do_sound = True
elif not self.first_unread and not self.control_focused and \
helpers.allow_sound_notification(self.conn.name,
'next_message_received_unfocused'):
self.do_sound = True
def handle_incoming_gc_msg_event(self, msg_obj):
if not msg_obj.msg_obj.gc_control:
# we got a message from a room we're not in? ignore it
return
sound = msg_obj.msg_obj.gc_control.highlighting_for_message(
msg_obj.msgtxt, msg_obj.timestamp)[1]
if msg_obj.nickname != msg_obj.msg_obj.gc_control.nick:
self.do_sound = True
if sound == 'received':
self.sound_event = 'muc_message_received'
elif sound == 'highlight':
self.sound_event = 'muc_message_highlight'
else:
self.do_sound = False
else:
self.do_sound = False
self.do_popup = False
def get_path_to_generic_or_avatar(self, generic, jid=None, suffix=None):
"""
Choose between avatar image and default image
Returns full path to the avatar image if it exists, otherwise returns full
path to the image. generic must be with extension and suffix without
"""
if jid:
# we want an avatar
puny_jid = helpers.sanitize_filename(jid)
path_to_file = os.path.join(gajim.AVATAR_PATH, puny_jid) + suffix
path_to_local_file = path_to_file + '_local'
for extension in ('.png', '.jpeg'):
path_to_local_file_full = path_to_local_file + extension
if os.path.exists(path_to_local_file_full):
return path_to_local_file_full
for extension in ('.png', '.jpeg'):
path_to_file_full = path_to_file + extension
if os.path.exists(path_to_file_full):
return path_to_file_full
return os.path.abspath(generic)
def handle_incoming_pres_event(self, pres_obj):
if gajim.jid_is_transport(pres_obj.jid):
return True
account = pres_obj.conn.name
self.jid = pres_obj.jid
resource = pres_obj.resource or ''
# It isn't an agent
for c in pres_obj.contact_list:
if c.resource == resource:
# we look for other connected resources
continue
if c.show not in ('offline', 'error'):
return True
# no other resource is connected, let's look in metacontacts
family = gajim.contacts.get_metacontacts_family(account, self.jid)
for info in family:
acct_ = info['account']
jid_ = info['jid']
c_ = gajim.contacts.get_contact_with_highest_priority(acct_, jid_)
if not c_:
continue
if c_.jid == self.jid:
continue
if c_.show not in ('offline', 'error'):
return True
if pres_obj.old_show < 2 and pres_obj.new_show > 1:
event = 'contact_connected'
show_image = 'online.png'
suffix = '_notif_size_colored'
server = gajim.get_server_from_jid(self.jid)
account_server = account + '/' + server
block_transport = False
if account_server in gajim.block_signed_in_notifications and \
gajim.block_signed_in_notifications[account_server]:
block_transport = True
if helpers.allow_showing_notification(account, 'notify_on_signin') \
and not gajim.block_signed_in_notifications[account] and \
not block_transport:
self.do_popup = True
if gajim.config.get_per('soundevents', 'contact_connected',
'enabled') and not gajim.block_signed_in_notifications[account] and\
not block_transport and helpers.allow_sound_notification(account,
'contact_connected'):
self.sound_event = event
self.do_sound = True
elif pres_obj.old_show > 1 and pres_obj.new_show < 2:
event = 'contact_disconnected'
show_image = 'offline.png'
suffix = '_notif_size_bw'
if helpers.allow_showing_notification(account, 'notify_on_signout'):
self.do_popup = True
if gajim.config.get_per('soundevents', 'contact_disconnected',
'enabled') and helpers.allow_sound_notification(account, event):
self.sound_event = event
self.do_sound = True
# Status change (not connected/disconnected or error (<1))
elif pres_obj.new_show > 1:
event = 'status_change'
# FIXME: we don't always 'online.png', but we first need 48x48 for
# all status
show_image = 'online.png'
suffix = '_notif_size_colored'
else:
return True
transport_name = gajim.get_transport_name_from_jid(self.jid)
img_path = None
if transport_name:
img_path = os.path.join(helpers.get_transport_path(
transport_name), '48x48', show_image)
if not img_path or not os.path.isfile(img_path):
iconset = gajim.config.get('iconset')
img_path = os.path.join(helpers.get_iconset_path(iconset),
'48x48', show_image)
self.popup_image_path = self.get_path_to_generic_or_avatar(img_path,
jid=self.jid, suffix=suffix)
self.popup_timeout = gajim.config.get('notification_timeout')
nick = i18n.direction_mark + gajim.get_name_from_jid(account, self.jid)
if event == 'status_change':
self.popup_title = _('%(nick)s Changed Status') % \
{'nick': nick}
self.popup_text = _('%(nick)s is now %(status)s') % \
{'nick': nick, 'status': helpers.get_uf_show(pres_obj.show)}
if pres_obj.status:
self.popup_text = self.popup_text + " : " + pres_obj.status
self.popup_event_type = _('Contact Changed Status')
elif event == 'contact_connected':
self.popup_title = _('%(nickname)s Signed In') % {'nickname': nick}
self.popup_text = ''
if pres_obj.status:
self.popup_text = pres_obj.status
self.popup_event_type = _('Contact Signed In')
elif event == 'contact_disconnected':
self.popup_title = _('%(nickname)s Signed Out') % {'nickname': nick}
self.popup_text = ''
if pres_obj.status:
self.popup_text = pres_obj.status
self.popup_event_type = _('Contact Signed Out')
def generate(self):
# what's needed to compute output
self.conn = self.base_event.conn
self.jid = ''
self.control = None
self.control_focused = False
self.first_unread = False
# For output
self.do_sound = False
self.sound_file = ''
self.sound_event = '' # gajim sound played if not sound_file is set
self.show_popup = False
self.do_popup = False
self.popup_title = ''
self.popup_text = ''
self.popup_event_type = ''
self.popup_msg_type = ''
self.popup_image = ''
self.popup_image_path = ''
self.popup_timeout = -1
self.do_command = False
self.command = ''
self.show_in_notification_area = False
self.show_in_roster = False
self.detect_type()
if self.notif_type == 'msg':
self.handle_incoming_msg_event(self.base_event)
elif self.notif_type == 'gc-msg':
self.handle_incoming_gc_msg_event(self.base_event)
elif self.notif_type == 'pres':
self.handle_incoming_pres_event(self.base_event)
return True
class MessageOutgoingEvent(nec.NetworkOutgoingEvent):
name = 'message-outgoing'
base_network_events = []
def init(self):
self.message = ''
self.keyID = None
self.type_ = 'chat'
self.subject = ''
self.chatstate = None
self.msg_id = None
self.resource = None
self.user_nick = None
self.xhtml = None
self.label = None
self.session = None
self.forward_from = None
self.form_node = None
self.original_message = None
self.delayed = None
self.callback = None
self.callback_args = []
self.now = False
self.is_loggable = True
self.control = None
self.attention = False
self.correction_msg = None
def generate(self):
return True
class GcMessageOutgoingEvent(nec.NetworkOutgoingEvent):
name = 'gc-message-outgoing'
base_network_events = []
def init(self):
self.message = ''
self.xhtml = None
self.label = None
self.callback = None
self.callback_args = []
self.is_loggable = True
self.control = None
self.correction_msg = None
def generate(self):
return True
class ClientCertPassphraseEvent(nec.NetworkIncomingEvent):
name = 'client-cert-passphrase'
base_network_events = []
class InformationEvent(nec.NetworkIncomingEvent):
name = 'information'
base_network_events = []
def init(self):
self.popup = True
|
irl/gajim
|
src/common/connection_handlers_events.py
|
Python
|
gpl-3.0
| 94,929 | 0.00316 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
import os
import re
from stat import *
class Xen(Plugin, RedHatPlugin):
"""Xen virtualization
"""
plugin_name = 'xen'
profiles = ('virt',)
def determine_xen_host(self):
if os.access("/proc/acpi/dsdt", os.R_OK):
result = self.call_ext_prog("grep -qi xen /proc/acpi/dsdt")
if result['status'] == 0:
return "hvm"
if os.access("/proc/xen/capabilities", os.R_OK):
result = self.call_ext_prog(
"grep -q control_d /proc/xen/capabilities")
if result['status'] == 0:
return "dom0"
else:
return "domU"
return "baremetal"
def check_enabled(self):
return (self.determine_xen_host() == "baremetal")
def is_running_xenstored(self):
xs_pid = self.call_ext_prog("pidof xenstored")['output']
xs_pidnum = re.split('\n$', xs_pid)[0]
return xs_pidnum.isdigit()
def dom_collect_proc(self):
self.add_copy_spec([
"/proc/xen/balloon",
"/proc/xen/capabilities",
"/proc/xen/xsd_kva",
"/proc/xen/xsd_port"])
# determine if CPU has PAE support
self.add_cmd_output("grep pae /proc/cpuinfo")
# determine if CPU has Intel-VT or AMD-V support
self.add_cmd_output("egrep -e 'vmx|svm' /proc/cpuinfo")
def setup(self):
host_type = self.determine_xen_host()
if host_type == "domU":
# we should collect /proc/xen and /sys/hypervisor
self.dom_collect_proc()
# determine if hardware virtualization support is enabled
# in BIOS: /sys/hypervisor/properties/capabilities
self.add_copy_spec("/sys/hypervisor")
elif host_type == "hvm":
# what do we collect here???
pass
elif host_type == "dom0":
# default of dom0, collect lots of system information
self.add_copy_spec([
"/var/log/xen",
"/etc/xen",
"/sys/hypervisor/version",
"/sys/hypervisor/compilation",
"/sys/hypervisor/properties",
"/sys/hypervisor/type"])
self.add_cmd_output([
"xm dmesg",
"xm info",
"xm list",
"xm list --long",
"brctl show"
])
self.dom_collect_proc()
if self.is_running_xenstored():
self.add_copy_spec("/sys/hypervisor/uuid")
self.add_cmd_output("xenstore-ls")
else:
# we need tdb instead of xenstore-ls if cannot get it.
self.add_copy_spec("/var/lib/xenstored/tdb")
# FIXME: we *might* want to collect things in /sys/bus/xen*,
# /sys/class/xen*, /sys/devices/xen*, /sys/modules/blk*,
# /sys/modules/net*, but I've never heard of them actually being
# useful, so I'll leave it out for now
else:
# for bare-metal, we don't have to do anything special
return # USEFUL
self.add_custom_text("Xen hostType: "+host_type)
# vim: set et ts=4 sw=4 :
|
cnewcome/sos
|
sos/plugins/xen.py
|
Python
|
gpl-2.0
| 3,945 | 0 |
# -*- coding: utf-8 -*-
## Copyright © 2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from __future__ import division,absolute_import
from rainman.models import Model
from rainman.models.site import Site
from django.db import models as m
from django.db.models import Q
# Tables for environmental effects.
# Note that table names are different for Hysterical Raisins.
class EnvGroup(Model):
class Meta(Model.Meta):
unique_together = (("site", "name"),)
db_table="rainman_paramgroup"
def __unicode__(self):
return self.name
name = m.CharField(max_length=200)
comment = m.CharField(max_length=200,blank=True)
site = m.ForeignKey(Site,related_name="envgroups")
factor = m.FloatField(default=1.0, help_text="Base Factor")
rain = m.BooleanField(default=True,help_text="stop when it's raining?")
def __init__(self,*a,**k):
super(EnvGroup,self).__init__(*a,**k)
self.env_cache = {}
def list_valves(self):
return u"¦".join((d.name for d in self.valves.all()))
def refresh(self):
super(EnvGroup,self).refresh()
self.env_cache = {}
def env_factor_one(self, tws, h):
p=4 # power factor, favoring nearest-neighbor
qtemp,qwind,qsun = tws
if qtemp and h.temp is None: return None
if qwind and h.wind is None: return None
if qsun and h.sun is None: return None
q=Q()
q &= Q(temp__isnull=not qtemp)
q &= Q(wind__isnull=not qwind)
q &= Q(sun__isnull=not qsun)
sum_f = 0
sum_w = 0
try:
ec = self.env_cache[tws]
except KeyError:
self.env_cache[tws] = ec = list(self.items.filter(q))
for ef in ec:
d=0
if qtemp:
d += (h.temp-ef.temp)**2
if qwind:
d += (h.wind-ef.wind)**2
if qsun:
d += (h.sun-ef.sun)**2
d = d**(p*0.5)
if d < 0.001: # close enough
return ef.factor
sum_f += ef.factor/d
sum_w += 1/d
if not sum_w:
return None
return sum_f / sum_w
def env_factor(self, h, logger=None):
"""Calculate a weighted factor for history entry @h, based on the given environmental parameters"""
ql=(
(6,(True,True,True)),
(4,(False,True,True)),
(4,(True,False,True)),
(4,(True,True,False)),
(1,(True,False,False)),
(1,(False,True,False)),
(1,(False,False,True)),
)
sum_f = 1 # if there are no data, return 1
sum_w = 1
n = 1
for weight,tws in ql:
f = self.env_factor_one(tws,h)
if f is not None:
if logger:
logger("Simple factor %s%s%s: %f" % ("T" if tws[0] else "-", "W" if tws[1] else "-", "S" if tws[2] else "-", f))
sum_f *= f**weight
sum_w += weight
n += 1
return sum_f ** (n/sum_w)
@property
def schedules(self):
from rainman.models.schedule import Schedule
return Schedule.objects.filter(valve__envgroup=self)
class EnvItem(Model):
class Meta(Model.Meta):
db_table="rainman_environmenteffect"
def __unicode__(self):
return u"@%s %s¦%s¦%s" % (self.group.name,self.temp,self.wind,self.sun)
group = m.ForeignKey(EnvGroup,db_column="param_group_id",related_name="items")
factor = m.FloatField(default=1.0, help_text="Factor to use at this data point")
# these are single- or multi-dimensional data points for finding a reasonable factor
temp = m.FloatField(blank=True,null=True, help_text="average temperature (°C)")
wind = m.FloatField(blank=True,null=True, help_text="wind speed (m/s or whatever)")
sun = m.FloatField(blank=True,null=True, help_text="how much sunshine was there (0-1)") # measured value
|
smurfix/HomEvenT
|
irrigation/rainman/models/env.py
|
Python
|
gpl-3.0
| 3,941 | 0.041921 |
import csv
from dateutil.parser import parse
from adoptarbol.tree.models import Tree
def load(filename):
with open(filename, encoding='utf-8') as f:
reader = csv.reader(f)
header = next(reader)
def pos_for(field):
return header.index(field)
def float_or_none(string):
try:
return(float(string))
except ValueError:
return None
for row in reader:
# codigo = str(row[pos_for('codigo')]),
print('Procesando ', row)
tree = {'code': row[pos_for('codigo')],
'common_name': row[pos_for('especie')],
'scientific_name': row[pos_for('cientifico')],
'family': row[pos_for('familia')],
'coord_utm_e': float_or_none(row[pos_for('utm_x')].replace(',', '.')),
'coord_utm_n': float_or_none(row[pos_for('utm_y')].replace(',', '.')),
'coord_utm_zone_letter': row[pos_for('utm_zone')],
'coord_utm_zone_n': row[pos_for('utm_south')],
'coord_lat': float_or_none(row[pos_for('lat')].replace(',', '.')),
'coord_lon': float_or_none(row[pos_for('long')].replace(',', '.')),
'photo': row[pos_for('fotos')],
'diameter': row[pos_for('dia')],
'height': row[pos_for('alt')],
'circ': row[pos_for('circ')],
'base_area': float_or_none(row[pos_for('areabasal')].replace(',', '.')),
'size_class': row[pos_for('clasetamano')],
'quality': float_or_none(row[pos_for('calidad')].replace(',', '.')),
'relevance': row[pos_for('relevancia')],
'notes': row[pos_for('notas')],
'phenology': row[pos_for('fenologia')],
'observation': row[pos_for('obs')],
'surveyed_on': parse(row[pos_for('fechahora')]),
}
t = Tree(**tree)
t.save()
"""
if __name__ == '__main__':
app = create_app(CONFIG)
manager = Manager(app)
with app.app_context():
load()
"""
|
icarito/arbio-azucar-adoptarbol
|
loader.py
|
Python
|
gpl-3.0
| 2,243 | 0.002675 |
from .endpoint import Endpoint
from .exceptions import MissingRequiredFieldError
from .fileuploads_endpoint import Fileuploads
from .. import RequestFactory, DatasourceItem, PaginationItem, ConnectionItem
import os
import logging
import copy
import cgi
from contextlib import closing
# The maximum size of a file that can be published in a single request is 64MB
FILESIZE_LIMIT = 1024 * 1024 * 64 # 64MB
ALLOWED_FILE_EXTENSIONS = ['tds', 'tdsx', 'tde']
logger = logging.getLogger('tableau.endpoint.datasources')
class Datasources(Endpoint):
@property
def baseurl(self):
return "{0}/sites/{1}/datasources".format(self.parent_srv.baseurl, self.parent_srv.site_id)
# Get all datasources
def get(self, req_options=None):
logger.info('Querying all datasources on site')
url = self.baseurl
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content)
all_datasource_items = DatasourceItem.from_response(server_response.content)
return all_datasource_items, pagination_item
# Get 1 datasource by id
def get_by_id(self, datasource_id):
if not datasource_id:
error = "Datasource ID undefined."
raise ValueError(error)
logger.info('Querying single datasource (ID: {0})'.format(datasource_id))
url = "{0}/{1}".format(self.baseurl, datasource_id)
server_response = self.get_request(url)
return DatasourceItem.from_response(server_response.content)[0]
# Populate datasource item's connections
def populate_connections(self, datasource_item):
if not datasource_item.id:
error = 'Datasource item missing ID. Datasource must be retrieved from server first.'
raise MissingRequiredFieldError(error)
url = '{0}/{1}/connections'.format(self.baseurl, datasource_item.id)
server_response = self.get_request(url)
datasource_item._set_connections(ConnectionItem.from_response(server_response.content))
logger.info('Populated connections for datasource (ID: {0})'.format(datasource_item.id))
# Delete 1 datasource by id
def delete(self, datasource_id):
if not datasource_id:
error = "Datasource ID undefined."
raise ValueError(error)
url = "{0}/{1}".format(self.baseurl, datasource_id)
self.delete_request(url)
logger.info('Deleted single datasource (ID: {0})'.format(datasource_id))
# Download 1 datasource by id
def download(self, datasource_id, filepath=None):
if not datasource_id:
error = "Datasource ID undefined."
raise ValueError(error)
url = "{0}/{1}/content".format(self.baseurl, datasource_id)
with closing(self.get_request(url, parameters={'stream': True})) as server_response:
_, params = cgi.parse_header(server_response.headers['Content-Disposition'])
filename = os.path.basename(params['filename'])
if filepath is None:
filepath = filename
elif os.path.isdir(filepath):
filepath = os.path.join(filepath, filename)
with open(filepath, 'wb') as f:
for chunk in server_response.iter_content(1024): # 1KB
f.write(chunk)
logger.info('Downloaded datasource to {0} (ID: {1})'.format(filepath, datasource_id))
return os.path.abspath(filepath)
# Update datasource
def update(self, datasource_item):
if not datasource_item.id:
error = 'Datasource item missing ID. Datasource must be retrieved from server first.'
raise MissingRequiredFieldError(error)
url = "{0}/{1}".format(self.baseurl, datasource_item.id)
update_req = RequestFactory.Datasource.update_req(datasource_item)
server_response = self.put_request(url, update_req)
logger.info('Updated datasource item (ID: {0})'.format(datasource_item.id))
updated_datasource = copy.copy(datasource_item)
return updated_datasource._parse_common_tags(server_response.content)
# Publish datasource
def publish(self, datasource_item, file_path, mode, connection_credentials=None):
if not os.path.isfile(file_path):
error = "File path does not lead to an existing file."
raise IOError(error)
if not mode or not hasattr(self.parent_srv.PublishMode, mode):
error = 'Invalid mode defined.'
raise ValueError(error)
filename = os.path.basename(file_path)
file_extension = os.path.splitext(filename)[1][1:]
# If name is not defined, grab the name from the file to publish
if not datasource_item.name:
datasource_item.name = os.path.splitext(filename)[0]
if file_extension not in ALLOWED_FILE_EXTENSIONS:
error = "Only {} files can be published as datasources.".format(', '.join(ALLOWED_FILE_EXTENSIONS))
raise ValueError(error)
# Construct the url with the defined mode
url = "{0}?datasourceType={1}".format(self.baseurl, file_extension)
if mode == self.parent_srv.PublishMode.Overwrite or mode == self.parent_srv.PublishMode.Append:
url += '&{0}=true'.format(mode.lower())
# Determine if chunking is required (64MB is the limit for single upload method)
if os.path.getsize(file_path) >= FILESIZE_LIMIT:
logger.info('Publishing {0} to server with chunking method (datasource over 64MB)'.format(filename))
upload_session_id = Fileuploads.upload_chunks(self.parent_srv, file_path)
url = "{0}&uploadSessionId={1}".format(url, upload_session_id)
xml_request, content_type = RequestFactory.Datasource.publish_req_chunked(datasource_item,
connection_credentials)
else:
logger.info('Publishing {0} to server'.format(filename))
with open(file_path, 'rb') as f:
file_contents = f.read()
xml_request, content_type = RequestFactory.Datasource.publish_req(datasource_item,
filename,
file_contents,
connection_credentials)
server_response = self.post_request(url, xml_request, content_type)
new_datasource = DatasourceItem.from_response(server_response.content)[0]
logger.info('Published {0} (ID: {1})'.format(filename, new_datasource.id))
return new_datasource
|
Talvalin/server-client-python
|
tableauserverclient/server/endpoint/datasources_endpoint.py
|
Python
|
mit
| 6,780 | 0.003835 |
#!/usr/bin/env python
import argparse
import ConfigParser
import sys
import os
import multiprocessing
import itertools
import copy
import subprocess
from pprint import pprint
from benchmark.benchmarker import Benchmarker
from setup.linux.unbuffered import Unbuffered
from setup.linux import setup_util
from ast import literal_eval
# Enable cross-platform colored output
from colorama import init
init()
class StoreSeqAction(argparse.Action):
'''Helper class for parsing a sequence from the command line'''
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(StoreSeqAction, self).__init__(option_strings, dest, type=str, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.parse_seq(values))
def parse_seq(self, argument):
result = argument.split(',')
sequences = [x for x in result if ":" in x]
for sequence in sequences:
try:
(start,step,end) = sequence.split(':')
except ValueError:
print " Invalid: %s" % sequence
print " Requires start:step:end, e.g. 1:2:10"
raise
result.remove(sequence)
result = result + range(int(start), int(end), int(step))
return [abs(int(item)) for item in result]
###################################################################################################
# Main
###################################################################################################
def main(argv=None):
''' Runs the program. There are three ways to pass arguments
1) environment variables TFB_*
2) configuration file benchmark.cfg
3) command line flags
In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
but command line flags have the final say
'''
# Do argv default this way, as doing it in the functional declaration sets it at compile time
if argv is None:
argv = sys.argv
# Enable unbuffered output so messages will appear in the proper order with subprocess output.
sys.stdout=Unbuffered(sys.stdout)
# Update python environment
# 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
sys.path.append('.')
# 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
sys.path.append('toolset/setup/linux')
# Update environment for shell scripts
fwroot = setup_util.get_fwroot()
if not fwroot:
fwroot = os.getcwd()
setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
print "FWROOT is %s"%setup_util.get_fwroot()
conf_parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
args, remaining_argv = conf_parser.parse_known_args()
try:
with open (args.conf_file):
config = ConfigParser.SafeConfigParser()
config.read([os.getcwd() + '/' + args.conf_file])
defaults = dict(config.items("Defaults"))
# Convert strings into proper python types
for k,v in defaults.iteritems():
try:
defaults[k] = literal_eval(v)
except Exception:
pass
except IOError:
if args.conf_file != 'benchmark.cfg':
print 'Configuration file not found!'
defaults = { "client-host":"localhost"}
##########################################################
# Set up default values
##########################################################
serverHost = os.environ.get('TFB_SERVER_HOST')
clientHost = os.environ.get('TFB_CLIENT_HOST')
clientUser = os.environ.get('TFB_CLIENT_USER')
clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
runnerUser = os.environ.get('TFB_RUNNER_USER')
databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
maxThreads = 8
try:
maxThreads = multiprocessing.cpu_count()
except Exception:
pass
##########################################################
# Set up argument parser
##########################################################
parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
parents=[conf_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
0:1:5 creates [0, 1, 2, 3, 4]
''')
# SSH options
parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
parser.add_argument('-r', '--runner-user', default=runnerUser, help='The user to run each test as.')
parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
help='The key to use for SSH to the client instance.')
parser.add_argument('-d', '--database-host', default=databaHost,
help='The database server. If not provided, defaults to the value of --client-host.')
parser.add_argument('--database-user', default=databaUser,
help='The username to use for SSH to the database instance. If not provided, defaults to the value of --client-user.')
parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
help='The key to use for SSH to the database instance. If not provided, defaults to the value of --client-identity-file.')
parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
# Install options
parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
help='Runs installation script(s) before continuing on to execute the tests.')
parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified',
help='''Affects : With unified, all server software is installed into a single directory.
With pertest each test gets its own installs directory, but installation takes longer''')
parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit')
parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
# Test options
parser.add_argument('--test', nargs='+', help='names of tests to run')
parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
'this binary')
parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
# Benchmark options
parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction)
parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
# Misc Options
parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
parser.add_argument('--clear-tmp', action='store_true', default=False, help='Clears files written to /tmp after each framework\'s tests complete.')
parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
args = parser.parse_args(remaining_argv)
# Verify and massage options
if args.client_user is None:
print 'Usernames (e.g. --client-user, --runner-user, and --database-user) are required!'
print 'The system will SSH into the client and the database for the install stage'
print 'Aborting'
exit(1)
if args.runner_user is None:
print 'Usernames (e.g. --client-user, --runner-user, and --database-user) are required!'
print 'The system will run each test as the runner-user'
print 'Aborting'
exit(1)
if args.database_user is None:
args.database_user = args.client_user
if args.database_host is None:
args.database_host = args.client_host
if args.verbose:
print 'Configuration options: '
pprint(vars(args))
benchmarker = Benchmarker(vars(args))
# Run the benchmarker in the specified mode
# Do not use benchmarker variables for these checks,
# they are either str or bool based on the python version
if args.list_tests:
benchmarker.run_list_tests()
elif args.list_test_metadata:
benchmarker.run_list_test_metadata()
elif args.parse != None:
benchmarker.parse_timestamp()
elif not args.install_only:
return benchmarker.run()
if __name__ == "__main__":
sys.exit(main())
|
PermeAgility/FrameworkBenchmarks
|
toolset/run-tests.py
|
Python
|
bsd-3-clause
| 11,680 | 0.009589 |
from Cython.Compiler.ModuleNode import ModuleNode
from Cython.Compiler.Symtab import ModuleScope
from Cython.TestUtils import TransformTest
from Cython.Compiler.Visitor import MethodDispatcherTransform
from Cython.Compiler.ParseTreeTransforms import (
NormalizeTree, AnalyseDeclarationsTransform,
AnalyseExpressionsTransform, InterpretCompilerDirectives)
class TestMethodDispatcherTransform(TransformTest):
_tree = None
def _build_tree(self):
if self._tree is None:
context = None
def fake_module(node):
scope = ModuleScope('test', None, None)
return ModuleNode(node.pos, doc=None, body=node,
scope=scope, full_module_name='test',
directive_comments={})
pipeline = [
fake_module,
NormalizeTree(context),
InterpretCompilerDirectives(context, {}),
AnalyseDeclarationsTransform(context),
AnalyseExpressionsTransform(context),
]
self._tree = self.run_pipeline(pipeline, u"""
cdef bytes s = b'asdfg'
cdef dict d = {1:2}
x = s * 3
d.get('test')
""")
return self._tree
def test_builtin_method(self):
calls = [0]
class Test(MethodDispatcherTransform):
def _handle_simple_method_dict_get(self, node, func, args, unbound):
calls[0] += 1
return node
tree = self._build_tree()
Test(None)(tree)
self.assertEqual(1, calls[0])
def test_binop_method(self):
calls = {'bytes': 0, 'object': 0}
class Test(MethodDispatcherTransform):
def _handle_simple_method_bytes___mul__(self, node, func, args, unbound):
calls['bytes'] += 1
return node
def _handle_simple_method_object___mul__(self, node, func, args, unbound):
calls['object'] += 1
return node
tree = self._build_tree()
Test(None)(tree)
self.assertEqual(1, calls['bytes'])
self.assertEqual(0, calls['object'])
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/Cython/Compiler/Tests/TestVisitor.py
|
Python
|
gpl-2.0
| 2,228 | 0.002693 |
"""
Copyright (C) 2016 ECHO Wizard : Modded by TeamGREEN
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcgui
import urllib
import time
from urllib import FancyURLopener
import sys
class MyOpener(FancyURLopener):
version = "WhosTheDaddy?"
myopener = MyOpener()
urlretrieve = MyOpener().retrieve
urlopen = MyOpener().open
AddonTitle= "[COLORgreen]OptimusGREEN Tools[/COLOR]"
dialog = xbmcgui.Dialog()
def download(url, dest, dp = None):
if not dp:
dp = xbmcgui.DialogProgress()
# dp.create("[COLORgold]Download In Progress[/COLOR]"' ',' ', ' ')
# dp.update(0)
start_time=time.time()
urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time))
def auto(url, dest, dp = None):
dp = xbmcgui.DialogProgress()
start_time=time.time()
urlretrieve(url, dest, lambda nb, bs, fs: _pbhookauto(nb, bs, fs, dp, start_time))
def _pbhookauto(numblocks, blocksize, filesize, url, dp):
none = 0
def _pbhook(numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0:
eta = (filesize - numblocks * blocksize) / kbps_speed
else:
eta = 0
kbps_speed = kbps_speed / 1024
mbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '[COLOR green]%.02f MB[/COLOR] of [COLOR white][B]%.02f MB[/B][/COLOR]' % (currently_downloaded, total)
e = '[COLOR white][B]Speed: [/B][/COLOR][COLOR green]%.02f Mb/s ' % mbps_speed + '[/COLOR]'
e += '[COLOR white][B]ETA: [/B][/COLOR][COLOR green]%02d:%02d' % divmod(eta, 60) + '[/COLOR]'
# dp.update(percent, "",mbs, e)
except:
percent = 100
# dp.update(percent)
# if dp.iscanceled():
# dialog.ok(AddonTitle, 'The download was cancelled.')
# dp.close()
quit()
|
OptimusGREEN/repo67beta
|
OGT Installer/plugin.program.ogtools/downloader.py
|
Python
|
gpl-3.0
| 2,845 | 0.011951 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.odoo-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
due_cost_service_id = fields.Many2one(
related='company_id.due_cost_service_id',
help='Default Service for RiBa Due Cost (collection fees) on invoice',
domain=[('type', '=', 'service')])
def default_get(self, cr, uid, fields, context=None):
res = super(AccountConfigSettings, self).default_get(
cr, uid, fields, context)
if res:
user = self.pool['res.users'].browse(cr, uid, uid, context)
res['due_cost_service_id'] = user.company_id.due_cost_service_id.id
return res
class ResCompany(models.Model):
_inherit = 'res.company'
due_cost_service_id = fields.Many2one('product.product')
|
abstract-open-solutions/l10n-italy
|
l10n_it_ricevute_bancarie/models/account_config.py
|
Python
|
agpl-3.0
| 2,038 | 0 |
from notifications_utils.clients.antivirus.antivirus_client import (
AntivirusClient,
)
from notifications_utils.clients.redis.redis_client import RedisClient
from notifications_utils.clients.zendesk.zendesk_client import ZendeskClient
antivirus_client = AntivirusClient()
zendesk_client = ZendeskClient()
redis_client = RedisClient()
|
alphagov/notifications-admin
|
app/extensions.py
|
Python
|
mit
| 340 | 0 |
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This program scans the log files generated by a RAMCloud recovery,
extracts performance metrics, and print a summary of interesting data
from those metrics.
"""
from __future__ import division, print_function
from glob import glob
from optparse import OptionParser
from pprint import pprint
from functools import partial
import math
import os
import random
import re
import sys
from common import *
__all__ = ['parseRecovery', 'makeReport']
### Utilities:
class AttrDict(dict):
"""A mapping with string keys that aliases x.y syntax to x['y'] syntax.
The attribute syntax is easier to read and type than the item syntax.
"""
def __getattr__(self, name):
if name not in self:
self[name] = AttrDict()
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def assign(self, path, value):
"""
Given a hierarchical path such as 'x.y.z' and
a value, perform an assignment as if the statement
self.x.y.z had been invoked.
"""
names = path.split('.')
container = self
for name in names[0:-1]:
if name not in container:
container[name] = AttrDict()
container = container[name]
container[names[-1]] = value
def parse(f):
"""
Scan a log file containing metrics for several servers, and return
a list of AttrDicts, one containing the metrics for each server.
"""
list = []
for line in f:
match = re.match('.* Metrics: (.*)$', line)
if not match:
continue
info = match.group(1)
start = re.match('begin server (.*)', info)
if start:
list.append(AttrDict())
# Compute a human-readable name for this server (ideally
# just its short host name).
short_name = re.search('host=([^,]*)', start.group(1))
if short_name:
list[-1].server = short_name.group(1)
else:
list[-1].server = start.group(1)
continue;
if len(list) == 0:
raise Exception, ('metrics data before "begin server" in %s'
% f.name)
var, value = info.split(' ')
list[-1].assign(var, int(value))
if len(list) == 0:
raise Exception, 'no metrics in %s' % f.name
return list
def maxTuple(tuples):
"""Return the tuple whose first element is largest."""
maxTuple = None;
maxValue = 0.0;
for tuple in tuples:
if tuple[0] > maxValue:
maxValue = tuple[0]
maxTuple = tuple
return maxTuple
def minTuple(tuples):
"""Return the tuple whose first element is smallest."""
minTuple = None;
minValue = 1e100;
for tuple in tuples:
if tuple[0] < minValue:
minValue = tuple[0]
minTuple = tuple
return minTuple
def values(s):
"""Return a sequence of the second items from a sequence."""
return [p[1] for p in s]
def scale(points, scalar):
"""Try really hard to scale 'points' by 'scalar'.
@type points: mixed
@param points: Points can either be:
- a sequence of pairs, in which case the second item will be scaled,
- a list, or
- a number
@type scalar: number
@param scalar: the amount to multiply points by
"""
if type(points) is list:
try:
return [(k, p * scalar) for k, p in points]
except TypeError:
return [p * scalar for p in points]
else:
return points * scalar
def toString(x):
"""Return a reasonable string conversion for the argument."""
if type(x) is int:
return '{0:7d}'.format(x)
elif type(x) is float:
return '{0:7.2f}'.format(x)
else:
return '{0:>7s}'.format(x)
### Summary functions
# This is a group of functions that can be passed in to Section.line() to
# affect how the line is summarized.
# Each one takes the following arguments:
# - values, which is a list of numbers
# - unit, which is a short string specifying the units for values
# Each returns a list, possibly empty, of strings to contribute to the summary
# text. The summary strings are later displayed with a delimiter or line break
# in between.
def AVG(values, unit):
"""Returns the average of its values."""
if max(values) > min(values):
r = toString(sum(values) / len(values))
if unit:
r += ' ' + unit
r += ' avg'
else:
r = toString(values[0])
if unit:
r += ' ' + unit
return [r]
def MIN(values, unit):
"""Returns the minimum of the values if the range is non-zero."""
if len(values) < 2 or max(values) == min(values):
return []
r = toString(min(values))
if unit:
r += ' ' + unit
r += ' min'
return [r]
def MAX(values, unit):
"""Returns the maximum of the values if the range is non-zero."""
if len(values) < 2 or max(values) == min(values):
return []
r = toString(max(values))
if unit:
r += ' ' + unit
r += ' max'
return [r]
def SUM(values, unit):
"""Returns the sum of the values if there are any."""
if len(values) == 0:
return []
r = toString(sum(values))
if unit:
r += ' ' + unit
r += ' total'
return [r]
def FRAC(total):
"""Returns a function that shows the average percentage of the values from
the total given."""
def realFrac(values, unit):
r = toString(sum(values) / len(values) / total * 100)
r += '%'
if max(values) > min(values):
r += ' avg'
return [r]
return realFrac
def CUSTOM(s):
"""Returns a function that returns the string or list of strings given.
This is useful when you need custom processing that doesn't fit in any of
the other summary functions and is too specific to merit a new summary
function.
"""
def realCustom(values, unit):
if type(s) is list:
return s
else:
return [s]
return realCustom
### Report structure:
class Report(object):
"""This produces a report which can be uploaded to dumpstr.
It is essentially a list of Sections.
"""
def __init__(self):
self.sections = []
def add(self, section):
"""Add a new Section to the report."""
self.sections.append(section)
return section
def jsonable(self):
"""Return a representation of the report that can be JSON-encoded in
dumpstr format.
"""
doc = [section.jsonable() for section in self.sections if section]
return doc
class Section(object):
"""A part of a Report consisting of lines with present metrics."""
def __init__(self, key):
"""
@type key: string
@param key: a stable, unique string identifying the section
This should not ever be changed, as dumpstr's labels and
descriptions are looked up by this key.
"""
self.key = key
self.lines = []
def __len__(self):
return len(self.lines)
def jsonable(self):
"""Return a representation of the section that can be JSON-encoded in
dumpstr format.
"""
return {'key': self.key, 'lines': self.lines}
def line(self, key, points, unit='',
summaryFns=[AVG, MAX]):
"""Add a line to the Section.
@type key: string
@param key: a stable, unique string identifying the line
This should not ever be changed, as dumpstr's labels and
descriptions are looked up by this key.
@type points: number, string, list of numbers, or
list of pairs of (string label, number)
@param points: the data, in detail
@type unit: string
@param unit: a short string specifying the units for values
@type summaryFn: list of summary functions
@param summaryFn: used to create a short summary of the data
See the big comment block under "Summary functions" above.
"""
if unit:
spaceUnit = ' ' + unit
else:
spaceUnit = ''
if type(points) is str:
summary = points
else:
values = []
if type(points) is list:
for point in points:
try:
label, point = point
except TypeError:
pass
assert type(point) in [int, float]
values.append(point)
else:
assert type(points) in [int, float]
values.append(points)
summary = []
for fn in summaryFns:
summary += fn(values, unit)
self.lines.append({'key': key,
'summary': summary,
'points': points,
'unit': unit})
def ms(self, key, points, total=None):
"""A commonly used line type for printing in milliseconds.
@param key: see line
@param ponts: The points given in units of seconds (they will be
scaled by 1000 internally).
@param total: If the time is a fraction of some total, this is that
total, in seconds.
"""
summaryFns = [AVG, MAX]
if total:
summaryFns.append(FRAC(total * 1000))
self.line(key, scale(points, 1000), unit='ms', summaryFns=summaryFns)
def parseRecovery(recovery_dir):
data = AttrDict()
data.log_dir = os.path.realpath(os.path.expanduser(recovery_dir))
logFile = glob('%s/client*.*.log' % recovery_dir)[0]
data.backups = []
data.masters = []
data.servers = parse(open(logFile))
for server in data.servers:
# Each iteration of this loop corresponds to one server's
# log file. Figure out whether this server is a coordinator,
# master, backup, or both master and backup, and put the
# data in appropriate sub-lists.
if server.backup.recoveryCount > 0:
data.backups.append(server)
if server.master.recoveryCount > 0:
data.masters.append(server)
if server.coordinator.recoveryCount > 0:
data.coordinator = server
# Calculator the total number of unique server nodes (subtract 1 for the
# coordinator).
data.totalNodes = len(set([server.server for server in data.servers])) - 1
data.client = AttrDict()
for line in open(glob('%s/client*.*.log' % recovery_dir)[0]):
m = re.search(
r'\bRecovery completed in (\d+) ns, failure detected in (\d+) ns\b',
line)
if m:
failureDetectionNs = int(m.group(2))
data.client.recoveryNs = int(m.group(1)) - failureDetectionNs
data.client.failureDetectionNs = failureDetectionNs
return data
def rawSample(data):
"""Prints out some raw data for debugging"""
print('Client:')
pprint(data.client)
print('Coordinator:')
pprint(data.coordinator)
print()
print('Sample Master:')
pprint(random.choice(data.masters))
print()
print('Sample Backup:')
pprint(random.choice(data.backups))
def rawFull(data):
"""Prints out all raw data for debugging"""
pprint(data)
def makeReport(data):
"""Generate ASCII report"""
coord = data.coordinator
masters = data.masters
backups = data.backups
servers = data.servers
recoveryTime = data.client.recoveryNs / 1e9
failureDetectionTime = data.client.failureDetectionNs / 1e9
report = Report()
# TODO(ongaro): Size distributions of filtered segments
def make_fail_fun(fun, fail):
"""Wrap fun to return fail instead of throwing ZeroDivisionError."""
def fun2(x):
try:
return fun(x)
except ZeroDivisionError:
return fail
return fun2
def on_masters(fun, fail=0):
"""Call a function on each master,
replacing ZeroDivisionErrors with 'fail'."""
fun2 = make_fail_fun(fun, fail)
return [(master.serverId, fun2(master)) for master in masters]
def on_backups(fun, fail=0):
"""Call a function on each backup,
replacing ZeroDivisionErrors with 'fail'."""
fun2 = make_fail_fun(fun, fail)
return [(backup.serverId, fun2(backup)) for backup in backups]
summary = report.add(Section('Summary'))
summary.line('Recovery time', recoveryTime, 's')
summary.line('Failure detection time', failureDetectionTime, 's')
summary.line('Recovery + detection time',
recoveryTime + failureDetectionTime, 's')
summary.line('Masters', len(masters))
summary.line('Backups', len(backups))
summary.line('Total nodes', data.totalNodes)
summary.line('Replicas',
masters[0].master.replicas)
summary.line('Objects per master',
on_masters(lambda m: m.master.liveObjectCount))
summary.line('Object size',
on_masters(lambda m: m.master.liveObjectBytes /
m.master.liveObjectCount),
'bytes')
summary.line('Total recovery segment entries',
sum([master.master.recoverySegmentEntryCount
for master in masters]))
summary.line('Total live object space',
sum([master.master.liveObjectBytes
for master in masters]) / 1024.0 / 1024.0,
'MB')
summary.line('Total recovery segment space w/ overhead',
sum([master.master.segmentReadByteCount
for master in masters]) / 1024.0 / 1024.0,
'MB')
if backups:
storageTypes = set([backup.backup.storageType for backup in backups])
if len(storageTypes) > 1:
storageTypeStr = 'mixed'
else:
storageType = storageTypes.pop()
if storageType == 1:
storageTypeStr = 'memory'
elif storageType == 2:
storageTypeStr = 'disk'
else:
storageTypeStr = 'unknown (%s)' % storageType
summary.line('Storage type', storageTypeStr)
summary.line('Log directory', data.log_dir)
coordSection = report.add(Section('Coordinator Time'))
coordSection.ms('Total',
coord.coordinator.recoveryTicks /
coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Starting recovery on backups',
coord.coordinator.recoveryConstructorTicks / coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Starting recovery on masters',
coord.coordinator.recoveryStartTicks / coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Tablets recovered',
coord.rpc.tabletsRecoveredTicks / coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Completing recovery on backups',
coord.coordinator.recoveryCompleteTicks / coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Set will',
coord.rpc.setWillTicks / coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Get tablet map',
coord.rpc.getTabletMapTicks / coord.clockFrequency,
total=recoveryTime)
coordSection.ms('Other',
((coord.coordinator.recoveryTicks -
coord.coordinator.recoveryConstructorTicks -
coord.coordinator.recoveryStartTicks -
coord.rpc.setWillTicks -
coord.rpc.getTabletMapTicks -
coord.rpc.tabletsRecoveredTicks) /
coord.clockFrequency),
total=recoveryTime)
coordSection.ms('Receiving in transport',
coord.transport.receive.ticks / coord.clockFrequency,
total=recoveryTime)
masterSection = report.add(Section('Master Time'))
def master_ticks(label, field):
"""This is a shortcut for adding to the masterSection a recorded number
of ticks that are a fraction of the total recovery.
@type label: string
@param label: the key for the line
@type field: string
@param field: the field within a master's metrics that collected ticks
"""
masterSection.ms(label,
on_masters(lambda m: eval('m.' + field) /
m.clockFrequency),
total=recoveryTime)
master_ticks('Total',
'master.recoveryTicks')
master_ticks('Waiting for incoming segments',
'master.segmentReadStallTicks')
master_ticks('Inside recoverSegment',
'master.recoverSegmentTicks')
master_ticks('Backup.proceed',
'master.backupInRecoverTicks')
master_ticks('Verify checksum',
'master.verifyChecksumTicks')
master_ticks('Segment append',
'master.segmentAppendTicks')
master_ticks('Segment append copy',
'master.segmentAppendCopyTicks')
master_ticks('Segment append checksum',
'master.segmentAppendChecksumTicks')
masterSection.ms('Other recoverSegment',
on_masters(lambda m: (m.master.recoverSegmentTicks -
m.master.backupInRecoverTicks -
m.master.verifyChecksumTicks -
m.master.segmentAppendTicks) /
m.clockFrequency),
total=recoveryTime)
master_ticks('Final log sync',
'master.logSyncTicks')
master_ticks('Removing tombstones',
'master.removeTombstoneTicks')
masterSection.ms('Other',
on_masters(lambda m: (m.master.recoveryTicks -
m.master.segmentReadStallTicks -
m.master.recoverSegmentTicks -
m.master.logSyncTicks -
m.master.removeTombstoneTicks) /
m.clockFrequency),
total=recoveryTime)
master_ticks('Receiving in transport',
'transport.receive.ticks')
master_ticks('Transmitting in transport',
'transport.transmit.ticks')
master_ticks('Opening sessions',
'transport.sessionOpenTicks')
# There used to be a bunch of code here for analyzing the variance in
# session open times. We don't open sessions during recovery anymore, so
# I've deleted this code. Look in the git repo for mid-2011 if you want it
# back. -Diego
masterSection.ms('Replicating each segment',
on_masters(lambda m: (
(m.master.replicationTicks / m.clockFrequency) /
math.ceil(m.master.replicationBytes / m.segmentSize) /
m.master.replicas)))
masterSection.ms('Replicating each segment during replay',
on_masters(lambda m: (
((m.master.replicationTicks - m.master.logSyncTicks) /
m.clockFrequency) /
math.ceil((m.master.replicationBytes - m.master.logSyncBytes) /
m.segmentSize) /
m.master.replicas)))
masterSection.ms('Replicating each segment during log sync',
on_masters(lambda m: (
(m.master.logSyncTicks / m.clockFrequency) /
math.ceil(m.master.logSyncBytes / m.segmentSize) /
m.master.replicas)))
masterSection.ms('RPC latency replicating each segment',
on_masters(lambda m: (
(m.master.backupCloseTicks + m.master.logSyncCloseTicks) /
m.clockFrequency /
(m.master.backupCloseCount + m.master.logSyncCloseCount))))
masterSection.ms('RPC latency replicating each segment during replay',
on_masters(lambda m: m.master.backupCloseTicks / m.clockFrequency /
m.master.backupCloseCount))
masterSection.ms('RPC latency replicating each segment during log sync',
on_masters(lambda m: m.master.logSyncCloseTicks / m.clockFrequency /
m.master.logSyncCloseCount))
master_ticks('Replication',
'master.replicationTicks')
master_ticks('Client RPCs Active',
'transport.clientRpcsActiveTicks')
masterSection.ms('Average GRD completion time',
on_masters(lambda m: (m.master.segmentReadTicks /
m.master.segmentReadCount /
m.clockFrequency)))
backupSection = report.add(Section('Backup Time'))
def backup_ticks(label, field):
"""This is a shortcut for adding to the backupSection a recorded number
of ticks that are a fraction of the total recovery.
@type label: string
@param label: the key for the line
@type field: string
@param field: the field within a backup's metrics that collected ticks
"""
backupSection.ms(label,
on_backups(lambda b: eval('b.' + field) /
b.clockFrequency),
total=recoveryTime)
backup_ticks('RPC service time',
'backup.serviceTicks')
backup_ticks('startReadingData RPC',
'rpc.backupStartReadingDataTicks')
backup_ticks('write RPC',
'rpc.backupWriteTicks')
backup_ticks('Open segment memset',
'backup.writeClearTicks')
backup_ticks('Write copy',
'backup.writeCopyTicks')
backupSection.ms('Other write RPC',
on_backups(lambda b: (b.rpc.backupWriteTicks -
b.backup.writeClearTicks -
b.backup.writeCopyTicks) /
b.clockFrequency),
total=recoveryTime)
backup_ticks('getRecoveryData RPC',
'rpc.backupGetRecoveryDataTicks')
backupSection.ms('Other',
on_backups(lambda b: (b.backup.serviceTicks -
b.rpc.backupStartReadingDataTicks -
b.rpc.backupWriteTicks -
b.rpc.backupGetRecoveryDataTicks) /
b.clockFrequency),
total=recoveryTime)
backup_ticks('Transmitting in transport',
'transport.transmit.ticks')
backup_ticks('Filtering segments',
'backup.filterTicks')
backup_ticks('Reading segments',
'backup.readingDataTicks')
backup_ticks('Using disk',
'backup.storageReadTicks')
backupSection.line('getRecoveryData completions',
on_backups(lambda b: b.backup.readCompletionCount))
backupSection.line('getRecoveryData retry fraction',
on_backups(lambda b: (b.rpc.backupGetRecoveryDataCount -
b.backup.readCompletionCount) /
b.rpc.backupGetRecoveryDataCount))
efficiencySection = report.add(Section('Efficiency'))
efficiencySection.line('recoverSegment CPU',
(sum([m.master.recoverSegmentTicks / m.clockFrequency
for m in masters]) * 1000 /
sum([m.master.segmentReadCount
for m in masters])),
unit='ms avg')
efficiencySection.line('Writing a segment',
(sum([b.rpc.backupWriteTicks / b.clockFrequency
for b in backups]) * 1000 /
# Divide count by 2 since each segment does two writes:
# one to open the segment and one to write the data.
sum([b.rpc.backupWriteCount / 2
for b in backups])),
unit='ms avg')
efficiencySection.line('Filtering a segment',
sum([b.backup.filterTicks / b.clockFrequency * 1000
for b in backups]) /
sum([b.backup.storageReadCount
for b in backups]),
unit='ms avg')
efficiencySection.line('Memory bandwidth (backup copies)',
on_backups(lambda b: (
(b.backup.writeCopyBytes / 2**30) /
(b.backup.writeCopyTicks / b.clockFrequency))),
unit='GB/s',
summaryFns=[AVG, MIN])
networkSection = report.add(Section('Network Utilization'))
networkSection.line('Aggregate',
(sum([host.transport.transmit.byteCount
for host in [coord] + masters + backups]) *
8 / 2**30 / recoveryTime),
unit='Gb/s',
summaryFns=[AVG, FRAC(data.totalNodes*25)])
networkSection.line('Master in',
on_masters(lambda m: (m.transport.receive.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Master out',
on_masters(lambda m: (m.transport.transmit.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Master out during replication',
on_masters(lambda m: (m.master.replicationBytes * 8 / 2**30) /
(m.master.replicationTicks / m.clockFrequency)),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Master out during log sync',
on_masters(lambda m: (m.master.logSyncBytes * 8 / 2**30) /
(m.master.logSyncTicks / m.clockFrequency)),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Backup in',
on_backups(lambda b: (b.transport.receive.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Backup out',
on_backups(lambda b: (b.transport.transmit.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
diskSection = report.add(Section('Disk Utilization'))
diskSection.line('Effective bandwidth',
on_backups(lambda b: (b.backup.storageReadBytes +
b.backup.storageWriteBytes) /
2**20 / recoveryTime),
unit='MB/s',
summaryFns=[AVG, MIN, SUM])
def active_bandwidth(b):
totalBytes = b.backup.storageReadBytes + b.backup.storageWriteBytes
totalTicks = b.backup.storageReadTicks + b.backup.storageWriteTicks
return ((totalBytes / 2**20) /
(totalTicks / b.clockFrequency))
diskSection.line('Active bandwidth',
on_backups(active_bandwidth),
unit='MB/s',
summaryFns=[AVG, MIN, SUM])
diskSection.line('Active bandwidth reading',
on_backups(lambda b: (b.backup.storageReadBytes / 2**20) /
(b.backup.storageReadTicks / b.clockFrequency)),
unit='MB/s',
summaryFns=[AVG, MIN, SUM])
diskSection.line('Active bandwidth writing',
on_backups(lambda b: (b.backup.storageWriteBytes / 2**20) /
(b.backup.storageWriteTicks / b.clockFrequency)),
unit='MB/s',
summaryFns=[AVG, MIN, SUM])
diskSection.line('Disk active time',
on_backups(lambda b: 100 * (b.backup.storageReadTicks +
b.backup.storageWriteTicks) /
b.clockFrequency /
recoveryTime),
unit='%')
diskSection.line('Disk reading time',
on_backups(lambda b: 100 * b.backup.storageReadTicks /
b.clockFrequency /
recoveryTime),
unit='%')
diskSection.line('Disk writing time',
on_backups(lambda b: 100 * b.backup.storageWriteTicks /
b.clockFrequency /
recoveryTime),
unit='%')
backupSection = report.add(Section('Backup Events'))
backupSection.line('Segments read',
on_backups(lambda b: b.backup.storageReadCount))
backupSection.line('Primary segments loaded',
on_backups(lambda b: b.backup.primaryLoadCount))
backupSection.line('Secondary segments loaded',
on_backups(lambda b: b.backup.secondaryLoadCount))
slowSection = report.add(Section('Slowest Servers'))
slowest = maxTuple([
[1e03 * (master.master.replicaManagerTicks -
master.master.logSyncTicks) / master.clockFrequency,
master.server] for master in masters])
if slowest:
slowSection.line('Backup opens, writes',
slowest[0],
summaryFns=[CUSTOM(slowest[1]),
CUSTOM('{0:.1f} ms'.format(slowest[0]))])
slowest = maxTuple([
[1e03 * master.master.segmentReadStallTicks /
master.clockFrequency, master.server]
for master in masters])
if slowest:
slowSection.line('Stalled reading segs from backups',
slowest[0],
summaryFns=[CUSTOM(slowest[1]),
CUSTOM('{0:.1f} ms'.format(slowest[0]))])
slowest = minTuple([
[(backup.backup.storageReadBytes / 2**20) /
(backup.backup.storageReadTicks / backup.clockFrequency),
backup.server] for backup in backups
if (backup.backup.storageReadTicks > 0)])
if slowest:
slowSection.line('Reading from disk',
slowest[0],
summaryFns=[CUSTOM(slowest[1]),
CUSTOM('{0:.1f} MB/s'.format(slowest[0]))])
slowest = minTuple([
[(backup.backup.storageWriteBytes / 2**20) /
(backup.backup.storageWriteTicks / backup.clockFrequency),
backup.server] for backup in backups
if backup.backup.storageWriteTicks])
if slowest:
slowSection.line('Writing to disk',
slowest[0],
summaryFns=[CUSTOM(slowest[1]),
CUSTOM('{0:.1f} MB/s'.format(slowest[0]))])
tempSection = report.add(Section('Temporary Metrics'))
for i in range(10):
field = 'ticks{0:}'.format(i)
points = [(host.serverId, host.temp[field] / host.clockFrequency)
for host in servers]
if any(values(points)):
tempSection.ms('temp.%s' % field,
points,
total=recoveryTime)
for i in range(10):
field = 'count{0:}'.format(i)
points = [(host.serverId, host.temp[field])
for host in servers]
if any(values(points)):
tempSection.line('temp.%s' % field,
points)
return report
def main():
### Parse command line options
parser = OptionParser()
parser.add_option('-r', '--raw',
dest='raw', action='store_true',
help='Print out raw data (helpful for debugging)')
parser.add_option('-a', '--all',
dest='all', action='store_true',
help='Print out all raw data not just a sample')
options, args = parser.parse_args()
if len(args) > 0:
recovery_dir = args[0]
else:
recovery_dir = 'logs/latest'
data = parseRecovery(recovery_dir)
if options.raw:
if options.all:
rawFull(data)
else:
rawSample(data)
report = makeReport(data).jsonable()
getDumpstr().print_report(report)
if __name__ == '__main__':
sys.exit(main())
|
taschik/ramcloud-load-manager
|
scripts/recoverymetrics.py
|
Python
|
isc
| 32,500 | 0.003692 |
"""
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
from abc import ABCMeta, abstractmethod
from django.contrib.auth.models import User, Group
from xmodule.modulestore import Location
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError
from xmodule.modulestore.django import loc_mapper
from xmodule.modulestore.locator import CourseLocator, Locator
class CourseContextRequired(Exception):
"""
Raised when a course_context is required to determine permissions
"""
pass
class AccessRole(object):
"""
Object representing a role with particular access to a resource
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_user(self, user): # pylint: disable=unused-argument
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return user.is_staff
def add_users(self, *users):
for user in users:
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class GroupBasedRole(AccessRole):
"""
A role based on membership to any of a set of groups.
"""
def __init__(self, group_names):
"""
Create a GroupBasedRole from a list of group names
The first element of `group_names` will be the preferred group
to use when adding a user to this Role.
If a user is a member of any of the groups in the list, then
they will be consider a member of the Role
"""
self._group_names = [name.lower() for name in group_names]
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
# pylint: disable=protected-access
if not user.is_authenticated():
return False
if not hasattr(user, '_groups'):
user._groups = set(name.lower() for name in user.groups.values_list('name', flat=True))
return len(user._groups.intersection(self._group_names)) > 0
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
group, _ = Group.objects.get_or_create(name=self._group_names[0])
group.user_set.add(*users)
for user in users:
if hasattr(user, '_groups'):
del user._groups
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
group, _ = Group.objects.get_or_create(name=self._group_names[0])
group.user_set.remove(*users)
for user in users:
if hasattr(user, '_groups'):
del user._groups
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.filter(groups__name__in=self._group_names)
class CourseRole(GroupBasedRole):
"""
A named role in a particular course
"""
def __init__(self, role, location, course_context=None):
"""
Location may be either a Location, a string, dict, or tuple which Location will accept
in its constructor, or a CourseLocator. Handle all these giving some preference to
the preferred naming.
"""
# TODO: figure out how to make the group name generation lazy so it doesn't force the
# loc mapping?
location = Locator.to_locator_or_location(location)
# direct copy from auth.authz.get_all_course_role_groupnames will refactor to one impl asap
groupnames = []
# pylint: disable=no-member
if isinstance(location, Location):
try:
groupnames.append('{0}_{1}'.format(role, location.course_id))
except InvalidLocationError: # will occur on old locations where location is not of category course
if course_context is None:
raise CourseContextRequired()
else:
groupnames.append('{0}_{1}'.format(role, course_context))
try:
locator = loc_mapper().translate_location(location.course_id, location, False, False)
groupnames.append('{0}_{1}'.format(role, locator.package_id))
except (InvalidLocationError, ItemNotFoundError):
# if it's never been mapped, the auth won't be via the Locator syntax
pass
# least preferred legacy role_course format
groupnames.append('{0}_{1}'.format(role, location.course))
elif isinstance(location, CourseLocator):
groupnames.append('{0}_{1}'.format(role, location.package_id))
# handle old Location syntax
old_location = loc_mapper().translate_locator_to_location(location, get_course=True)
if old_location:
# the slashified version of the course_id (myu/mycourse/myrun)
groupnames.append('{0}_{1}'.format(role, old_location.course_id))
# add the least desirable but sometimes occurring format.
groupnames.append('{0}_{1}'.format(role, old_location.course))
super(CourseRole, self).__init__(groupnames)
class OrgRole(GroupBasedRole):
"""
A named role in a particular org
"""
def __init__(self, role, location):
# pylint: disable=no-member
location = Location(location)
super(OrgRole, self).__init__(['{}_{}'.format(role, location.org)])
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
def __init__(self, *args, **kwargs):
super(CourseStaffRole, self).__init__('staff', *args, **kwargs)
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
def __init__(self, *args, **kwargs):
super(CourseInstructorRole, self).__init__('instructor', *args, **kwargs)
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
def __init__(self, *args, **kwargs):
super(CourseBetaTesterRole, self).__init__('beta_testers', *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super(OrgStaffRole, self).__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
|
mjg2203/edx-platform-seas
|
lms/djangoapps/courseware/roles.py
|
Python
|
agpl-3.0
| 7,324 | 0.001912 |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
import unittest
from model import *
from example_data import expenses, payments, participations, persons, events
kasse = Gruppenkasse.create_new()
kasse.fill_with(expenses, payments, participations)
class TestGruppenkasse(unittest.TestCase):
def setUp(self):
...
def test_persons(self):
person_names = list(map(lambda p: p.name, kasse.persons))
for name in person_names:
self.assertTrue(name in persons, msg=name)
def test_events(self):
print(kasse.person_dict)
event_names = list(map(lambda p: p.name, kasse.events))
for name in event_names:
self.assertTrue(name in events, msg=name)
for name in events:
self.assertTrue(name in event_names, msg=name)
def test_event(self):
for event in kasse.events:
...#print(event)
def test_person(self):
for person in kasse.persons:
print(person, "\t{:5.2f}".format(person.balance / 100))
def test_payments(self):
print(kasse.payments)
if __name__ == '__main__':
unittest.main()
|
RincewindWizzard/gruppenkasse-gtk
|
src/tests/test_model.py
|
Python
|
lgpl-3.0
| 1,143 | 0.0035 |
import mock
import pytest
from rest_framework import exceptions
from addons.wiki.models import WikiPage
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from api.base.settings.defaults import API_BASE
from api_tests.wikis.views.test_wiki_detail import WikiCRUDTestCase
from framework.auth.core import Auth
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
)
from osf.utils.permissions import WRITE, READ
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
def create_wiki_payload(name):
return {
'data': {
'type': 'wikis',
'attributes': {
'name': name
}
}
}
@pytest.mark.django_db
class TestNodeWikiList:
@pytest.fixture()
def add_project_wiki_page(self):
def add_page(node, user):
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=node, user=user)
WikiVersionFactory(wiki_page=wiki_page)
return wiki_page
return add_page
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_wiki(self, add_project_wiki_page, user, public_project):
return add_project_wiki_page(public_project, user)
@pytest.fixture()
def public_url(self, public_project, public_wiki):
return '/{}nodes/{}/wikis/'.format(API_BASE, public_project._id)
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def private_wiki(self, add_project_wiki_page, user, private_project):
return add_project_wiki_page(private_project, user)
@pytest.fixture()
def private_url(self, private_project, private_wiki):
return '/{}nodes/{}/wikis/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_registration(self, user, public_project, public_wiki):
public_registration = RegistrationFactory(
project=public_project, user=user, is_public=True)
return public_registration
@pytest.fixture()
def public_registration_url(self, public_registration):
return '/{}registrations/{}/wikis/'.format(
API_BASE, public_registration._id)
@pytest.fixture()
def private_registration(self, user, private_project, private_wiki):
private_registration = RegistrationFactory(
project=private_project, user=user)
return private_registration
@pytest.fixture()
def private_registration_url(self, private_registration):
return '/{}registrations/{}/wikis/'.format(
API_BASE, private_registration._id)
def test_return_wikis(
self, app, user, non_contrib, private_registration, private_project,
public_wiki, private_wiki, public_url, private_url,
private_registration_url):
# test_return_public_node_wikis_logged_out_user
res = app.get(public_url)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_public_node_wikis_logged_in_non_contributor
res = app.get(public_url, auth=non_contrib.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_public_node_wikis_logged_in_contributor
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_private_node_wikis_logged_out_user
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_return_private_node_wikis_logged_in_osf_group_member
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, READ)
res = app.get(private_url, auth=group_mem.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert private_wiki._id in wiki_ids
# test_return_private_node_wikis_logged_in_non_contributor
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_return_private_node_wikis_logged_in_contributor
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert private_wiki._id in wiki_ids
# test_return_registration_wikis_logged_out_user
res = app.get(private_registration_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_return_registration_wikis_logged_in_non_contributor
res = app.get(
private_registration_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_return_registration_wikis_logged_in_contributor
res = app.get(private_registration_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert WikiPage.objects.get_for_node(private_registration, 'home')._id in wiki_ids
def test_wikis_not_returned_for_withdrawn_registration(
self, app, user, private_registration, private_registration_url):
private_registration.is_public = True
withdrawal = private_registration.retract_registration(
user=user, save=True)
token = list(withdrawal.approval_state.values())[0]['approval_token']
# TODO: Remove mocking when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
res = app.get(
private_registration_url,
auth=user.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_do_not_return_disabled_wiki(self, app, user, public_url, public_project):
public_project.delete_addon('wiki', auth=Auth(user))
res = app.get(public_url, expect_errors=True)
assert res.status_code == 404
def test_relationship_links(
self, app, user, public_project, private_project,
public_registration, private_registration,
public_url, private_url, public_registration_url,
private_registration_url):
# test_public_node_wikis_relationship_links
res = app.get(public_url)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, public_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, public_project._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_private_node_wikis_relationship_links
res = app.get(private_url, auth=user.auth)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, private_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, private_project._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_public_registration_wikis_relationship_links
res = app.get(public_registration_url)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, public_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, public_registration._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_private_registration_wikis_relationship_links
res = app.get(private_registration_url, auth=user.auth)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, private_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, private_registration._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
def test_not_returned(
self, app, public_project, public_registration,
public_url, public_registration_url):
# test_registration_wikis_not_returned_from_nodes_endpoint
res = app.get(public_url)
node_relationships = [
node_wiki['relationships']['node']['links']['related']['href']
for node_wiki in res.json['data']
]
assert res.status_code == 200
assert len(node_relationships) == 1
assert public_project._id in node_relationships[0]
# test_node_wikis_not_returned_from_registrations_endpoint
res = app.get(public_registration_url)
node_relationships = [
node_wiki['relationships']['node']['links']['related']['href']
for node_wiki in res.json['data']
]
assert res.status_code == 200
assert len(node_relationships) == 1
assert public_registration._id in node_relationships[0]
@pytest.mark.django_db
class TestFilterNodeWikiList:
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def base_url(self, private_project):
return '/{}nodes/{}/wikis/'.format(API_BASE, private_project._id)
@pytest.fixture()
def wiki(self, user, private_project):
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=private_project, user=user)
WikiVersionFactory(wiki_page=wiki_page)
return wiki_page
@pytest.fixture()
def date(self, wiki):
return wiki.modified.strftime('%Y-%m-%dT%H:%M:%S.%f')
def test_filter_node_wiki_list(self, app, user, wiki, date, base_url):
# test_node_wikis_with_no_filter_returns_all
res = app.get(base_url, auth=user.auth)
wiki_ids = [item['id'] for item in res.json['data']]
assert wiki._id in wiki_ids
# test_filter_wikis_by_page_name
url = base_url + '?filter[name]=home'
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['name'] == 'home'
# test_filter_wikis_modified_on_date
url = base_url + '?filter[date_modified][eq]={}'.format(date)
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 1
# test_filter_wikis_modified_before_date
url = base_url + '?filter[date_modified][lt]={}'.format(date)
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 0
# test_filter_wikis_modified_after_date
url = base_url + '?filter[date_modified][gt]={}'.format(date)
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 0
@pytest.mark.django_db
class TestNodeWikiCreate(WikiCRUDTestCase):
@pytest.fixture
def url_node_public(self, project_public):
return '/{}nodes/{}/wikis/'.format(API_BASE, project_public._id)
@pytest.fixture
def url_node_private(self, project_private):
return '/{}nodes/{}/wikis/'.format(API_BASE, project_private._id)
@pytest.fixture
def url_registration_public(self, wiki_registration_public):
return '/{}registrations/{}/wikis/'.format(API_BASE, wiki_registration_public.node._id)
@pytest.fixture
def url_registration_private(self, wiki_registration_private):
return '/{}registrations/{}/wikis/'.format(API_BASE, wiki_registration_private.node._id)
def test_create_public_wiki_page(self, app, user_write_contributor, url_node_public):
page_name = fake.word()
res = app.post_json_api(url_node_public, create_wiki_payload(page_name), auth=user_write_contributor.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == page_name
def test_create_public_wiki_page_with_content(self, app, user_write_contributor, url_node_public, project_public):
page_name = 'using random variables in tests can sometimes expose Testmon problems!'
payload = create_wiki_payload(page_name)
payload['data']['attributes']['content'] = 'my first wiki page'
res = app.post_json_api(url_node_public, payload, auth=user_write_contributor.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == page_name
wiki_page = WikiPage.objects.get_for_node(project_public, page_name)
assert wiki_page.get_version().content == 'my first wiki page'
# test_osf_group_member_write
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project_public.add_osf_group(group, WRITE)
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=group_mem.auth, expect_errors=True)
assert res.status_code == 201
def test_create_public_wiki_page_with_empty_content(self, app, user_write_contributor, url_node_public, project_public):
page_name = fake.word()
payload = create_wiki_payload(page_name)
payload['data']['attributes']['content'] = ''
res = app.post_json_api(url_node_public, payload, auth=user_write_contributor.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
def test_do_not_create_public_wiki_page(
self, app, user_creator, user_read_contributor, user_non_contributor,
url_node_public, wiki_public, project_public
):
# test_do_not_create_home_wiki_page
res = app.post_json_api(url_node_public, create_wiki_payload('home'), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == "A wiki page with the name 'home' already exists."
# test_do_not_create_wiki_page_name_exists
res = app.post_json_api(url_node_public, create_wiki_payload(wiki_public.page_name), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == "A wiki page with the name '{}' already exists.".format(wiki_public.page_name)
# test_do_not_create_public_wiki_page_as_read_contributor
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_public_wiki_page_as_non_contributor
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_public_wiki_page_as_read_osf_group_member
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project_public.add_osf_group(group, READ)
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=group_mem.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_public_wiki_page_as_unauthenticated
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), expect_errors=True)
assert res.status_code == 401
def test_create_private_wiki_page(self, app, user_write_contributor, url_node_private):
page_name = fake.word()
res = app.post_json_api(url_node_private, create_wiki_payload(page_name), auth=user_write_contributor.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == page_name
def test_do_not_create_private_wiki_page(
self, app, wiki_private, url_node_private,
user_read_contributor, user_non_contributor
):
# test_do_not_create_private_wiki_page_as_read_contributor
res = app.post_json_api(url_node_private, create_wiki_payload(fake.word()), auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_private_wiki_page_as_non_contributor
res = app.post_json_api(url_node_private, create_wiki_payload(fake.word()), auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_private_wiki_page_as_unauthenticated
res = app.post_json_api(url_node_private, create_wiki_payload(fake.word()), expect_errors=True)
assert res.status_code == 401
def test_do_not_create_registration_wiki_page(
self, app, user_creator,
url_registration_public, url_registration_private
):
# test_do_not_create_wiki_on_public_registration
res = app.post_json_api(url_registration_public, create_wiki_payload(fake.word()), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
# test_do_not_create_wiki_on_embargoed_registration
res = app.post_json_api(url_registration_private, create_wiki_payload(fake.word()), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
def test_do_not_create_wiki_page_if_disabled(
self, app, user_creator,
project_public, url_node_public, wiki_public
):
project_public.delete_addon('wiki', auth=Auth(user_creator))
page_name = fake.word()
res = app.post_json_api(url_node_public, create_wiki_payload(page_name), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 404
def test_do_not_create_wiki_page_if_publicly_editable_non_contrib(
self, app, user_creator, user_non_contributor,
project_public, url_node_public, wiki_public
):
project_public.addons_wiki_node_settings.set_editing(True, auth=Auth(user_creator))
page_name = fake.word()
res = app.post_json_api(url_node_public, create_wiki_payload(page_name), auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
|
mfraezz/osf.io
|
api_tests/nodes/views/test_node_wiki_list.py
|
Python
|
apache-2.0
| 19,731 | 0.001977 |
from __future__ import division
# Read two integers from STDIN
a = int(raw_input())
b = int(raw_input())
# Print integer division, a//b
print(a // b)
# Print float division, a/b
print(a % b)
# Print divmod of a and b
print(divmod(a, b))
|
nhquiroz/hacker-rank
|
python/introduction/mod-div-mod.py
|
Python
|
mit
| 239 | 0 |
"""Primare amplier control.
This module allows you to control your Primare I22 and I32 amplifier from the
command line using Primare's binary protocol via the RS232 port on the
amplifier.
"""
from __future__ import with_statement
import binascii
import logging
import struct
import time
# from twisted.logger import Logger
# logger = Logger()
logger = logging.getLogger(__name__)
# Primare documentation on their RS232 protocol writes this:
# == Command structure ==
# Commands are sent to the device using the following format, where each field
# is one byte sent to the device:
# <STX> <command> <variable> [<value>] <DLE> <ETX>
# The <command> can be either 'W' for write or 'R' for read. The variable
# table that follows specifies which variables supports which <command> types.
# If verbose is active, the device will send replies on the following format
# either when a command is received or when the variable in question is
# changed on the device.
# <STX> <variable> [<value>] <DLE> <ETX>
# Note that the <value> field can contain several bytes of data for certain
# commands.
# == Command special chars ==
# <STX> = 0x02
# <DLE> = 0x10
# <ETX> = 0x03
# Write = 0x57 (ASCII: W)
# Read = 0x52 (ASCII: R)
# == Escape sequence ==
# If any variable or value byte is equal to <DLE>, this byte must be sent
# twice to avoid confusing this with end of message.
# Protocol settings
# Baud rate: 4800
# Bits: 8
# Stop bits: 1
# Parity: None
# == Example ==
# The specific variables and commands will be defined later, here are
# examples on what the commands looks like in bytes.
# This is an example of a command to toggle verbose setting.
# Command is write (0x57), variable is 13 (0x0d)
# and value is 0. The footer is x10 x03
# 0x02 0x57 0x0xd 0x00 0x10 0x03
POS_STX = slice(0, 1)
POS_DLE_ETX = slice(-2, None)
POS_CMD_VAR = slice(2, 3)
POS_REPLY_VAR = slice(1, 2)
POS_REPLY_DATA = slice(2, -2)
BYTE_STX = '\x02'
BYTE_WRITE = '\x57'
BYTE_READ = '\x52'
BYTE_DLE_ETX = '\x10\x03'
INDEX_CMD = 0
INDEX_VARIABLE = 1
INDEX_REPLY = 2
INDEX_WAIT = 3
PRIMARE_CMD = {
'power_toggle': ['W', '0100', '01', True],
'power_set': ['W', '81YY', '01YY', False],
'input_set': ['W', '82YY', '02YY', True],
'input_next': ['W', '0201', '02', True],
'input_prev': ['W', '02FF', '02', True],
'volume_set': ['W', '83YY', '03YY', True],
'volume_get': ['W', '0300', '03', True],
'volume_up': ['W', '0301', '03', True],
'volume_down': ['W', '03FF', '03', True],
'balance_adjust': ['W', '04YY', '04', True],
'balance_set': ['W', '84YY', '04YY', True],
'mute_toggle': ['W', '0900', '09', True],
'mute_set': ['W', '89YY', '09YY', True],
'dim_cycle': ['W', '0A00', '0A', True],
'dim_set': ['W', '8AYY', '0AYY', True],
'verbose_toggle': ['W', '0D00', '0D', True],
'verbose_set': ['W', '8DYY', '0DYY', True],
'menu_toggle': ['W', '0E01', '0E', True],
'menu_set': ['W', '8EYY', '0EYY', True],
'remote_cmd': ['W', '0FYY', 'YY', True],
'ir_input_toggle': ['W', '1200', '12', True],
'ir_input_set': ['W', '92YY', '12YY', True],
'recall_factory_settings': ['R', '1300', '', False],
'inputname_current_get': ['R', '1400', '14YY', True],
'inputname_specific_get': ['R', '94YY', '94YY', True],
'manufacturer_get': ['R', '1500', '15', True],
'modelname_get': ['R', '1600', '16', True],
'swversion_get': ['R', '1700', '17', True]
}
PRIMARE_REPLY = {
'01': 'power',
'02': 'input',
'03': 'volume',
'04': 'balance',
'09': 'mute',
'0a': 'dim',
'0d': 'verbose',
'0e': 'menu',
'12': 'ir_input',
'13': 'recall_factory_settings',
'14': 'inputname',
'15': 'manufacturer',
'16': 'modelname',
'17': 'swversion'
}
# TODO:
# FIXING Better reply handling than table?
# * Better error handling
# After suspend/resume, if volume up/down fails (or similar),
# try turning amp on
#
# LATER
# * v2: Implement as module(?), not class, for multiple writers/subscribers
# (singleton)
# Seems like a factory would be better, so 'import primare_serial' then
# primare_serial.initComs() which then creates the single Serial object.
# * v2: Add notification callback mechanism to notify users of changes on
# amp (dials or other SW)
# http://bit.ly/WGRn0g
# Better idea: websocket
# http://forums.lantronix.com/showthread.php?p=3131
# * ...
class PrimareController():
"""This class provides methods for controlling a Primare amplifier."""
# Number of volume levels the amplifier supports.
# Primare amplifiers have 79 levels
VOLUME_LEVELS = 79
def __init__(self, source=None, volume=None, writer=None):
"""Initialization."""
self._bytes_read = bytearray()
self._write_cb = writer
self._boot_print = True
self._manufacturer = ''
self._modelname = ''
self._swversion = ''
self._inputname = ''
self._source = source
# Volume in range 0..VOLUME_LEVELS. :class:`None` before calibration.
if volume:
self.volume_set(volume)
# Setup logging so that is available
logging.basicConfig(level=logging.DEBUG)
# Private methods
def _set_device_to_known_state(self):
logger.debug('_set_device_to_known_state')
self.verbose_set(True)
self.power_on()
time.sleep(1)
if self._source is not None:
self.input_set(self._source)
self.mute_set(False)
def _print_device_info(self):
self.manufacturer_get()
self.modelname_get()
self.swversion_get()
# We always get inputname last, this represents our initialization
self.inputname_current_get()
def _primare_reader(self, rawdata):
r"""Take raw data and finds the EOL sequence \x10\x03."""
eol = BYTE_DLE_ETX
leneol = len(eol)
for index, c in enumerate(rawdata):
self._bytes_read += c
# TODO: Need to do conversion of \x10\x10 before looking for EOL!
# Doing it after is actually wrong, move code up here from
# _decode_raw_data
if self._bytes_read[-leneol:] == eol:
logger.debug('_primare_reader - decoded: %s',
binascii.hexlify(self._bytes_read))
variable_char, decoded_data = self._decode_raw_data(
self._bytes_read)
# We found a data sequence, extract remaining data and start
# again
rawdata = rawdata[index + 1:]
self._bytes_read = bytearray()
self._parse_and_store(variable_char, decoded_data)
else:
# logger.debug('_primare_reader - not-eol: %s',
# binascii.hexlify(self._bytes_read[-leneol:]))
pass
def _decode_raw_data(self, rawdata):
r"""Decode raw data from the serial port.
Replace any '\x10\x10' sequences with '\x10'.
Returns the variable char and the data received between the STX and
DLE+ETX markers
"""
variable_char = ''
data = ''
# logger.debug('Read: "%s"', binascii.hexlify(rawdata))
byte_string = struct.unpack('c' * len(rawdata), rawdata)
variable_char = binascii.hexlify(''.join(byte_string[POS_REPLY_VAR]))
byte_string = byte_string[POS_REPLY_DATA]
# We need to replace double DLE (0x10) with single DLE
for byte_pairs in zip(byte_string[0:None:2],
byte_string[1:None:2]):
# Convert binary tuple to str to ascii
str_pairs = binascii.hexlify(''.join(byte_pairs))
if str_pairs == '1010':
data += '10'
else:
data += str_pairs
# Very often we have an odd amount of data which not handled by
# the zip above, manually append that one byte
if len(byte_string) % 2 != 0:
data += binascii.hexlify(byte_string[-1])
logger.debug('Read(%s) = %s (%s)', PRIMARE_REPLY[variable_char], data,
binascii.hexlify(rawdata))
return variable_char, data
def _parse_and_store(self, variable_char, data):
if variable_char in ['01', '14', '15', '16', '17']:
if variable_char in ['14', '15', '16', '17']:
logger.debug('_parse_and_store - index: "%s" - %s',
variable_char,
binascii.unhexlify(data))
if variable_char == '01':
self._power_state = int(data, 16)
elif variable_char == '14':
self._inputname = data
if self._boot_print is True:
self._boot_print = False
logger.info("""Connected to:
Manufacturer: %s
Model: %s
SW Version: %s
Current input: %s """,
binascii.unhexlify(self._manufacturer),
binascii.unhexlify(self._modelname),
binascii.unhexlify(self._swversion),
binascii.unhexlify(self._inputname))
elif variable_char == '15':
self._manufacturer = data
elif variable_char == '16':
self._modelname = data
elif variable_char == '17':
self._swversion = data
def _send_command(self, variable, option=None):
"""Send the specified command to the amplifier.
:param variable: String key for the PRIMARE_CMD dict
:type variable: string
:param option: String value needed for some of the commands
:type option: string
:rtype: :class:`True` if success, :class:`False` if failure
"""
command = PRIMARE_CMD[variable][INDEX_CMD]
data = PRIMARE_CMD[variable][INDEX_VARIABLE]
if option is not None:
data = data.replace('YY', option)
logger.debug('_send_command(%s), data: "%s"', variable, data)
self._write(command, data)
def _write(self, cmd_type, data):
r"""Write data to the serial port.
Any occurences of '\x10' must be replaced with '\x10\x10' and add
the STX and DLE+ETX markers
"""
# We need to replace single DLE (0x10) with double DLE to discern it
data_safe = ''
for index in range(0, len(data) - 1, 2):
pair = data[index:index + 2]
if pair == '10':
data_safe += '1010'
else:
data_safe += pair
# Convert ascii string to binary
binary_variable = binascii.unhexlify(data_safe)
binary_data = BYTE_STX
binary_data += BYTE_WRITE if cmd_type == 'W' else BYTE_READ
binary_data += binary_variable + BYTE_DLE_ETX
logger.debug('WriteHex: %s', binascii.hexlify(binary_data))
self._write_cb(binary_data)
# Things are wonky if we try to write too quickly
time.sleep(0.06)
# Public methods
def setup(self):
"""Setup the amplifier.
Set the receiver to a known state and print information about the
amplifier
"""
self._set_device_to_known_state()
self._print_device_info()
def power_on(self):
"""Power on the Primare amplifier."""
self._send_command('power_set', '01')
def power_off(self):
"""Power off the Primare amplifier."""
self._send_command('power_set', '00')
def power_toggle(self):
"""Toggle the power to the Primare amplifier.
:rtype: :class:True if amplifier turned on as result of toggle,
:class:False otherwise
"""
self._send_command('power_toggle')
def input_set(self, source):
"""Set the current input used by the Primare amplifier."""
self._send_command('input_set', '{:02X}'.format(int(source) % 8))
self.inputname_current_get()
def input_next(self):
"""Select next input on device."""
self._send_command('input_next')
self.inputname_current_get()
def input_prev(self):
"""Select previous input on device."""
self._send_command('input_prev')
self.inputname_current_get()
def volume_get(self):
"""
Get volume level of the mixer on a linear scale from 0 to 100.
Example values:
0:
Silent
100:
Maximum volume.
:class:`None`:
Volume is unknown.
:rtype: int in range [0..100] or :class:`None`
"""
self._send_command('volume_get')
def volume_set(self, volume):
"""
Set volume level of the amplifier.
:param volume: Volume in the range [0..100]
:type volume: int
:rtype: :class:`True` if success, :class:`False` if failure
"""
target_primare_volume = int(round(volume * self.VOLUME_LEVELS / 100.0))
logger.debug("volume_set - target volume: {}".format(
target_primare_volume))
self._send_command('volume_set',
'{:02X}'.format(target_primare_volume))
# There's a crazy bug where setting the volume to 65 and above will
# generate a reply indicating a volume of 1 less!?
# Hence the work-around
# if reply and (int(reply, 16) == target_primare_volume or
# int(reply, 16) == target_primare_volume - 1):
# self._volume = volume
# logger.debug("LASSE - target volume SUCCESS, _volume: %d",
# self._volume)
# return True
# else:
# return False
def volume_up(self):
"""Increase volume by one step."""
self._send_command('volume_up')
def volume_down(self):
"""Decrease volume by one step."""
self._send_command('volume_down')
def balance_adjust(self, adjustment):
"""Modify volume balance settings."""
# TODO
pass
def balance_set(self, balance):
"""Set specific balance setting.
Value 10 means centered. Lower values adjusts balance to the left.
"""
# TODO
pass
def mute_toggle(self):
"""Toggle mute on device."""
self._send_command('mute_toggle')
def mute_get(self):
"""Get mute state of the mixer."""
self._send_command('mute_toggle')
def mute_set(self, mute):
"""
Enable or disable mute on device.
:param mute: :class:`True` to mute, :class:`False` to unmute
:type mute: bool
:rtype: :class:`True` if success, :class:`False` if failure
"""
mute_value = '01' if mute is True else '00'
self._send_command('mute_set', mute_value)
def dim_cycle(self):
"""Cycle through the different dim levels on device."""
self._send_command('dim_cycle')
def dim_set(self, level):
"""Select a specific dim level on device."""
if level >= 0:
self._send_command('dim_set', '{:02X}'.format(int(level) % 4))
def verbose_toggle(self):
"""Toggle verbose mode on device.
When verbose is active, device will respond to commands and inform
about changes to variables.
"""
self._send_command('verbose_toggle')
def verbose_set(self, verbose):
"""Enable or disables verbose mode on device."""
verbose_value = '01' if verbose is True else '00'
self._send_command('verbose_set', verbose_value)
def menu_toggle(self):
"""Enter or leaves menu of device."""
self._send_command('menu_toggle')
def menu_set(self, menu):
"""Control menus on the amplifier.
Allow closing of the menu or stepping into or out of a submenu if the
menu is active.
"""
self._send_command('menu_set', '{:02X}'.format(int(menu)))
def remote_cmd(self, cmd):
"""Send an IR command to the device.
The command will be treated as if the IR remote control has been used
to send the command.
"""
# TODO
pass
def ir_input_toggle(self):
"""Toggle IR input source on device between front and back."""
self._send_command('ir_input_toggle')
def ir_input_set(self, ir_input):
"""Select either front or back as current IR input source on device."""
ir_value = '01' if ir_input is True else '00'
self._send_command('ir_input_set', ir_value)
def recall_factory_settings(self):
"""Perform a factory reset.
Restore default values and restart the device.
"""
self._send_command('recall_factory_settings')
def manufacturer_get(self):
"""Read manufacturer name from the device."""
self._send_command('manufacturer_get')
def modelname_get(self):
"""Read model name from device."""
self._send_command('modelname_get')
def swversion_get(self):
"""Read current software version from device."""
self._send_command('swversion_get')
def inputname_current_get(self):
"""Read current input name from device."""
self._send_command('inputname_current_get')
def inputname_specific_get(self, input):
"""Read specified input name from device."""
if input >= 0:
self._send_command('inputname_specific_get',
'{:02X}'.format((int(input) % 8)))
|
ZenithDK/mopidy-primare
|
mopidy_primare/primare_serial.py
|
Python
|
apache-2.0
| 17,724 | 0 |
from monitor import monitor_qlen
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import sys
import os
parser = ArgumentParser(description="CWND/Queue Monitor")
parser.add_argument('--exp', '-e',
dest="exp",
action="store",
help="Name of the Experiment",
required=True)
# Expt parameters
args = parser.parse_args()
def start_tcpprobe():
"Install tcp_pobe module and dump to file"
os.system("(rmmod tcp_probe >/dev/null 2>&1); modprobe tcp_probe full=1;")
print "Monitoring TCP CWND ... will save it to ./%s_tcpprobe.txt " % args.exp
Popen("cat /proc/net/tcpprobe > ./%s_tcpprobe.txt" %
args.exp, shell=True)
def qmon():
monitor = Process(target=monitor_qlen,args=('s0-eth2', 0.01, '%s_sw0-qlen.txt' % args.exp ))
monitor.start()
print "Monitoring Queue Occupancy ... will save it to %s_sw0-qlen.txt " % args.exp
raw_input('Press Enter key to stop the monitor--> ')
monitor.terminate()
if __name__ == '__main__':
start_tcpprobe()
qmon()
Popen("killall -9 cat", shell=True).wait()
|
TheArbiter/Networks
|
lab4/lab4exercise2/exp_monitor.py
|
Python
|
gpl-3.0
| 1,212 | 0.008251 |
# -*- coding: utf-8 -*
from pymeasure.instruments.pyvisa_instrument import PyVisaInstrument
from pymeasure.case import ChannelRead
from pymeasure.instruments.oxford import OxfordInstrument
import time
class _QxfordILMChannel(ChannelRead):
def __init__(self, instrument):
ChannelRead.__init__(self)
self._instrument = instrument
self.unit = 'percent'
self._config += ['fast']
@ChannelRead._readmethod
def read(self):
while True:
helium = self._instrument.query('R')
helium = helium[2:]
if len(helium) == 4:
break
return [float(helium)/10]
@property
def fast(self):
while True:
status = self._instrument.query('X')
status = status[5]
if status == '4' or status == 'C':
return False
elif status == '2' or status == '3' or status == 'A' :
return True
else:
time.sleep(1)
pass
@fast.setter
def fast(self, boolean):
if boolean:
self._instrument.write('T1')
else:
self._instrument.write('S1')
class QxfordILM(PyVisaInstrument):
def __init__(self, address, name='', reset=True, defaults=True, isobus=6, **pyvisa):
super().__init__(address, name, **pyvisa)
self._isobus = isobus
self._instrument = OxfordInstrument(self._instrument, isobus = self._isobus)
self._instrument.timeout = 200
self._instrument.read_termination = '\r'
self._instrument.write_termination = '\r'
self._instrument.write('C3')
# Channels
self.__setitem__('helium', _QxfordILMChannel(self._instrument))
if defaults is True:
self.defaults()
#@property
#def status(self):
# return self._instrument.ask('X')
def defaults(self):
pass
|
t--wagner/pymeasure
|
instruments/oxford_ilm.py
|
Python
|
gpl-3.0
| 1,931 | 0.004661 |
#
# The Python Imaging Library.
# $Id: MicImagePlugin.py,v 1.2 2007/06/17 14:12:15 robertoconnor Exp $
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import string
import Image, TiffImagePlugin
from OleFileIO import *
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError, "not an MIC file; invalid OLE file"
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = []
for file in self.ole.listdir():
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
self.images.append(file)
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
raise SyntaxError, "not an MIC file; no image entries"
self.__fp = self.fp
self.frame = 0
if len(self.images) > 1:
self.category = Image.CONTAINER
self.seek(0)
def seek(self, frame):
try:
filename = self.images[frame]
except IndexError:
raise EOFError, "no such frame"
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self):
return self.frame
#
# --------------------------------------------------------------------
Image.register_open("MIC", MicImageFile, _accept)
Image.register_extension("MIC", ".mic")
|
arpruss/plucker
|
plucker_desktop/installer/osx/application_bundle_files/Resources/parser/python/vm/PIL/MicImagePlugin.py
|
Python
|
gpl-2.0
| 2,334 | 0.002571 |
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made.
"""
# <<< imports
# @generated
from cdpsm.iec61970.wires.switch import Switch
from google.appengine.ext import db
# >>> imports
class Disconnector(Switch):
""" A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made.
"""
# <<< disconnector.attributes
# @generated
# >>> disconnector.attributes
# <<< disconnector.references
# @generated
# >>> disconnector.references
# <<< disconnector.operations
# @generated
# >>> disconnector.operations
# EOF -------------------------------------------------------------------------
|
rwl/openpowersystem
|
cdpsm/iec61970/wires/disconnector.py
|
Python
|
agpl-3.0
| 1,912 | 0.004184 |
from __future__ import absolute_import, unicode_literals
import os
from appconf import AppConf
from django.conf import settings # noqa
class SessionRedisConf(AppConf):
HOST = '127.0.0.1'
PORT = 6379
DB = 0
PREFIX = 'django_sessions'
PASSWORD = None
UNIX_DOMAIN_SOCKET_PATH = None
URL = None
CONNECTION_POOL = None
JSON_ENCODING = 'latin-1'
ENV_URLS = (
'REDISCLOUD_URL',
'REDISTOGO_URL',
'OPENREDIS_URL',
'REDISGREEN_URL',
'MYREDIS_URL',
)
def configure(self):
if self.configured_data['URL'] is None:
for url in self.configured_data['ENV_URLS']:
redis_env_url = os.environ.get(url)
if redis_env_url:
self.configured_data['URL'] = redis_env_url
break
return self.configured_data
class Meta:
prefix = 'session_redis'
|
hellysmile/django-redis-sessions-fork
|
redis_sessions_fork/conf.py
|
Python
|
bsd-3-clause
| 932 | 0 |
"""
Compute class
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#TODO: Take care of the looks of these plots
class Compute(object):
"""
Abstract compute class. It will never be used, but is parent of all
the different computes.
"""
def __init__(self):
"""
Constructor. Not clear what to do here
"""
self.value = 0
self.idx = 0
self.header = []
def compute(self, system):
"""
Compute routine
"""
pass
def tally(self, value):
"""
Tally new compute with the previous ones. Mostly because not all
of the computes have the same structure, so the "average" is not
standard. By default we do the usual average.
"""
self.idx += 1
self.value *= (self.idx - 1)/self.idx
self.value += value/self.idx
def zero(self):
"""
Zero out current tallies.
"""
self.value = 0
self.idx = 0
def log(self, filename):
"""
Logging routine. By default we just write self.value to filename,
with self.header
"""
np.savetxt(filename, self.value, header='; '.join(self.header))
def plot(self, filename):
"""
Plotting routine. By default we plot every column [1:] as a
function of column 0, setting labels and axis names with
self.header and save it to filename.
"""
fig, axis = pl.subplots()
for i, vec in enumerate(self.value.T[1:]):
axis.plot(self.value[:, 0], vec, label=self.header[i])
axis.set_xlabel(self.header[0])
fig.savefig('{0}.pdf'.format(filename))
pl.close()
|
pabloalcain/lammps-python
|
pylammps/Computes/Compute.py
|
Python
|
gpl-3.0
| 1,563 | 0.007038 |
from django.core.files.storage import default_storage
from django.forms import widgets
from django.urls import reverse
from repanier.const import EMPTY_STRING
from repanier.picture.const import SIZE_M
from repanier.tools import get_repanier_template_name
class RepanierPictureWidget(widgets.TextInput):
template_name = get_repanier_template_name("widgets/picture.html")
def __init__(self, *args, **kwargs):
self.upload_to = kwargs.pop("upload_to", "pictures")
self.size = kwargs.pop("size", SIZE_M)
self.bootstrap = kwargs.pop("bootstrap", False)
super().__init__(*args, **kwargs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context["upload_url"] = reverse(
"repanier:ajax_picture", args=(self.upload_to, self.size)
)
if value:
context["repanier_file_path"] = file_path = str(value)
context["repanier_display_picture"] = "inline"
context["repanier_display_upload"] = "none"
context["repanier_file_url"] = default_storage.url(file_path)
else:
context["repanier_file_path"] = EMPTY_STRING
context["repanier_display_picture"] = "none"
context["repanier_display_upload"] = "inline"
context["repanier_file_url"] = EMPTY_STRING
context["repanier_height"] = context["repanier_width"] = self.size
context["bootstrap"] = self.bootstrap
return context
class Media:
js = ("admin/js/jquery.init.js",)
|
pcolmant/repanier
|
repanier/widget/picture.py
|
Python
|
gpl-3.0
| 1,573 | 0 |
"""This will perform basic enrichment on a given IP."""
import csv
import json
import mmap
import os
import socket
import urllib
import dns.resolver
import dns.reversename
from geoip import geolite2
from IPy import IP
from joblib import Parallel, delayed
from netaddr import AddrFormatError, IPSet
torcsv = 'Tor_ip_list_ALL.csv'
sfile = 'http://torstatus.blutmagie.de/ip_list_all.php/Tor_ip_list_ALL.csv'
SUBNET = 0
INPUTDICT = {}
SECTOR_CSV = 'sector.csv'
OUTFILE = 'IPLookup-output.csv'
CSVCOLS = '"ip-address","asn","as-name","isp","abuse-1","abuse-2","abuse-3","domain","reverse-dns","type","country","lat","long","tor-node"'
def identify(var):
result = ""
with open(SECTOR_CSV) as f:
root = csv.reader(f)
for i in root:
if i[0] in var:
result = i[1]
return result
def lookup(value):
"""Perform a dns request on the given value."""
try:
answers = dns.resolver.query(value, 'TXT')
for rdata in answers:
for txt_string in rdata.strings:
value = txt_string.replace(" | ", "|")
value = value.replace(" |", "|").split("|")
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
value = []
return value
def flookup(value, fname, sfile):
"""Look up a value in a file."""
try:
fhandle = open(fname)
except IOError:
sourceFile = urllib.URLopener()
sourceFile.retrieve(
sfile,
fname)
fhandle = open(fname)
search = mmap.mmap(fhandle.fileno(), 0, access=mmap.ACCESS_READ)
if search.find(value) != -1:
return 'true'
else:
return 'false'
def iprange(sample, sub):
"""Identify if the given ip address is in the previous range."""
if sub is not 0:
try:
ipset = IPSet([sub])
if sample in ipset:
return True
except AddrFormatError:
return False
else:
return False
def mainlookup(var):
"""Wrap the main lookup and generated the dictionary."""
global SUBNET
global INPUTDICT
var = ''.join(var.split())
if IP(var).iptype() != 'PRIVATE' and IP(var).version() == 4:
if iprange(var, SUBNET) is True:
print
elif INPUTDICT.get("ip-address") == var:
print
else:
try:
socket.inet_aton(var)
except socket.error:
var = socket.gethostbyname(var)
contactlist = []
rvar = '.'.join(reversed(str(var).split(".")))
origin = lookup(rvar + '.origin.asn.shadowserver.org')
SUBNET = origin[1]
try:
contact = lookup(rvar + '.abuse-contacts.abusix.org')
contactlist = str(contact[0]).split(",")
except IndexError:
contactlist = []
contactlist.extend(["-"] * (4 - len(contactlist)))
try:
addr = dns.reversename.from_address(var)
rdns = str(dns.resolver.query(addr, "PTR")[0])
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
rdns = ""
match = geolite2.lookup(var)
if match is None or match.location is None:
country = ''
location = ["", ""]
else:
country = match.country
location = match.location
tor = flookup(var, torcsv, sfile)
category = identify(origin[4])
if category == "":
category = identify(contactlist[0])
origin.extend(["-"] * (6 - len(origin)))
INPUTDICT = {
'abuse-1': contactlist[0],
'abuse-2': contactlist[1],
'abuse-3': contactlist[2],
'as-name': origin[2],
'asn': origin[0],
'country': country,
'descr': origin[5],
'domain': origin[4],
'ip-address': var,
'lat': location[0],
'long': location[1],
'reverse-dns': rdns,
'tor-node': tor,
'sector': category,
}
else:
INPUTDICT = {
'abuse-1': "", 'abuse-2': "", 'abuse-3': "", 'as-name': "",
'asn': "", 'country': "", 'descr': "", 'domain': "",
'domain-count': "", 'ip-address': var, 'lat': "", 'long': "",
'reverse-dns': "", 'tor-node': "", 'sector': "",
}
INPUTDICT['ip-address'] = var
out = json.dumps(
INPUTDICT,
indent=4,
sort_keys=True,
ensure_ascii=False)
csvout(INPUTDICT)
return out
def batch(inputfile):
"""Handle batch lookups using file based input."""
if os.path.isfile(OUTFILE):
os.remove(OUTFILE)
fhandle = open(OUTFILE, "a")
header = 0
if header == 0:
fhandle.write(str(CSVCOLS) + "\n")
header = 1
fhandle.close()
with open(inputfile) as fhandle:
Parallel(n_jobs=100, verbose=51)(delayed(mainlookup)(i.rstrip('\n'))
for i in fhandle)
def single(lookupvar):
"""Do a single IP lookup."""
result = mainlookup(lookupvar)
return result
def csvout(inputdict):
"""Generate a CSV file from the output inputdict."""
fhandle = open(OUTFILE, "a")
# header = 0
# if header == 0:
# fhandle.write("Boop")
# header = 1
try:
writer = csv.writer(fhandle, quoting=csv.QUOTE_ALL)
writer.writerow((
inputdict['ip-address'],
inputdict['asn'],
inputdict['as-name'],
inputdict['descr'],
inputdict['abuse-1'],
inputdict['abuse-2'],
inputdict['abuse-3'],
inputdict['domain'],
inputdict['reverse-dns'],
inputdict['sector'],
inputdict['country'],
inputdict['lat'],
inputdict['long'],
inputdict['tor-node']))
finally:
fhandle.close()
def main():
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-t",
choices=('single', 'batch'),
required="false",
metavar="request-type",
help="Either single or batch request")
PARSER.add_argument("-v",
required="false",
metavar="value",
help="The value of the request")
ARGS = PARSER.parse_args()
if ARGS.t == "single":
print(single(ARGS.v))
elif ARGS.t == "batch":
batch(ARGS.v)
else:
PARSER.print_help()
if __name__ == "__main__":
main()
|
zebde/RobIP
|
iplookup.py
|
Python
|
gpl-3.0
| 6,811 | 0.000147 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Module for the downloading, checking, and unpacking of necessary files into the source tree.
"""
import argparse
import configparser
import enum
import hashlib
import shutil
import subprocess
import sys
import urllib.request
from pathlib import Path
from _common import ENCODING, USE_REGISTRY, ExtractorEnum, get_logger, \
get_chromium_version, add_common_params
from _extraction import extract_tar_file, extract_with_7z, extract_with_winrar
sys.path.insert(0, str(Path(__file__).parent / 'third_party'))
import schema #pylint: disable=wrong-import-position
sys.path.pop(0)
# Constants
class HashesURLEnum(str, enum.Enum):
"""Enum for supported hash URL schemes"""
chromium = 'chromium'
class HashMismatchError(BaseException):
"""Exception for computed hashes not matching expected hashes"""
class DownloadInfo: #pylint: disable=too-few-public-methods
"""Representation of an downloads.ini file for downloading files"""
_hashes = ('md5', 'sha1', 'sha256', 'sha512')
hash_url_delimiter = '|'
_nonempty_keys = ('url', 'download_filename')
_optional_keys = (
'version',
'strip_leading_dirs',
)
_passthrough_properties = (*_nonempty_keys, *_optional_keys, 'extractor', 'output_path')
_ini_vars = {
'_chromium_version': get_chromium_version(),
}
@staticmethod
def _is_hash_url(value):
return value.count(DownloadInfo.hash_url_delimiter) == 2 and value.split(
DownloadInfo.hash_url_delimiter)[0] in iter(HashesURLEnum)
_schema = schema.Schema({
schema.Optional(schema.And(str, len)): {
**{x: schema.And(str, len)
for x in _nonempty_keys},
'output_path': (lambda x: str(Path(x).relative_to(''))),
**{schema.Optional(x): schema.And(str, len)
for x in _optional_keys},
schema.Optional('extractor'): schema.Or(ExtractorEnum.TAR, ExtractorEnum.SEVENZIP,
ExtractorEnum.WINRAR),
schema.Optional(schema.Or(*_hashes)): schema.And(str, len),
schema.Optional('hash_url'): lambda x: DownloadInfo._is_hash_url(x), #pylint: disable=unnecessary-lambda
}
})
class _DownloadsProperties: #pylint: disable=too-few-public-methods
def __init__(self, section_dict, passthrough_properties, hashes):
self._section_dict = section_dict
self._passthrough_properties = passthrough_properties
self._hashes = hashes
def has_hash_url(self):
"""
Returns a boolean indicating whether the current
download has a hash URL"""
return 'hash_url' in self._section_dict
def __getattr__(self, name):
if name in self._passthrough_properties:
return self._section_dict.get(name, fallback=None)
if name == 'hashes':
hashes_dict = {}
for hash_name in (*self._hashes, 'hash_url'):
value = self._section_dict.get(hash_name, fallback=None)
if value:
if hash_name == 'hash_url':
value = value.split(DownloadInfo.hash_url_delimiter)
hashes_dict[hash_name] = value
return hashes_dict
raise AttributeError('"{}" has no attribute "{}"'.format(type(self).__name__, name))
def _parse_data(self, path):
"""
Parses an INI file located at path
Raises schema.SchemaError if validation fails
"""
def _section_generator(data):
for section in data:
if section == configparser.DEFAULTSECT:
continue
yield section, dict(
filter(lambda x: x[0] not in self._ini_vars, data.items(section)))
new_data = configparser.ConfigParser(defaults=self._ini_vars)
with path.open(encoding=ENCODING) as ini_file:
new_data.read_file(ini_file, source=str(path))
try:
self._schema.validate(dict(_section_generator(new_data)))
except schema.SchemaError as exc:
get_logger().error('downloads.ini failed schema validation (located in %s)', path)
raise exc
return new_data
def __init__(self, ini_paths):
"""Reads an iterable of pathlib.Path to download.ini files"""
self._data = configparser.ConfigParser()
for path in ini_paths:
self._data.read_dict(self._parse_data(path))
def __getitem__(self, section):
"""
Returns an object with keys as attributes and
values already pre-processed strings
"""
return self._DownloadsProperties(self._data[section], self._passthrough_properties,
self._hashes)
def __contains__(self, item):
"""
Returns True if item is a name of a section; False otherwise.
"""
return self._data.has_section(item)
def __iter__(self):
"""Returns an iterator over the section names"""
return iter(self._data.sections())
def properties_iter(self):
"""Iterator for the download properties sorted by output path"""
return sorted(
map(lambda x: (x, self[x]), self), key=(lambda x: str(Path(x[1].output_path))))
class _UrlRetrieveReportHook: #pylint: disable=too-few-public-methods
"""Hook for urllib.request.urlretrieve to log progress information to console"""
def __init__(self):
self._max_len_printed = 0
self._last_percentage = None
def __call__(self, block_count, block_size, total_size):
# Use total_blocks to handle case total_size < block_size
# total_blocks is ceiling of total_size / block_size
# Ceiling division from: https://stackoverflow.com/a/17511341
total_blocks = -(-total_size // block_size)
if total_blocks > 0:
# Do not needlessly update the console. Since the console is
# updated synchronously, we don't want updating the console to
# bottleneck downloading. Thus, only refresh the output when the
# displayed value should change.
percentage = round(block_count / total_blocks, ndigits=3)
if percentage == self._last_percentage:
return
self._last_percentage = percentage
print('\r' + ' ' * self._max_len_printed, end='')
status_line = 'Progress: {:.1%} of {:,d} B'.format(percentage, total_size)
else:
downloaded_estimate = block_count * block_size
status_line = 'Progress: {:,d} B of unknown size'.format(downloaded_estimate)
self._max_len_printed = len(status_line)
print('\r' + status_line, end='')
def _download_via_urllib(url, file_path, show_progress, disable_ssl_verification):
reporthook = None
if show_progress:
reporthook = _UrlRetrieveReportHook()
if disable_ssl_verification:
import ssl
# TODO: Remove this or properly implement disabling SSL certificate verification
orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access
ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access
try:
urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)
finally:
# Try to reduce damage of hack by reverting original HTTPS context ASAP
if disable_ssl_verification:
ssl._create_default_https_context = orig_https_context #pylint: disable=protected-access
if show_progress:
print()
def _download_if_needed(file_path, url, show_progress, disable_ssl_verification):
"""
Downloads a file from url to the specified path file_path if necessary.
If show_progress is True, download progress is printed to the console.
"""
if file_path.exists():
get_logger().info('%s already exists. Skipping download.', file_path)
return
# File name for partially download file
tmp_file_path = file_path.with_name(file_path.name + '.partial')
if tmp_file_path.exists():
get_logger().debug('Resuming downloading URL %s ...', url)
else:
get_logger().debug('Downloading URL %s ...', url)
# Perform download
if shutil.which('curl'):
get_logger().debug('Using curl')
try:
subprocess.run(['curl', '-L', '-o', str(tmp_file_path), '-C', '-', url], check=True)
except subprocess.CalledProcessError as exc:
get_logger().error('curl failed. Re-run the download command to resume downloading.')
raise exc
else:
get_logger().debug('Using urllib')
_download_via_urllib(url, tmp_file_path, show_progress, disable_ssl_verification)
# Download complete; rename file
tmp_file_path.rename(file_path)
def _chromium_hashes_generator(hashes_path):
with hashes_path.open(encoding=ENCODING) as hashes_file:
hash_lines = hashes_file.read().splitlines()
for hash_name, hash_hex, _ in map(lambda x: x.lower().split(' '), hash_lines):
if hash_name in hashlib.algorithms_available:
yield hash_name, hash_hex
else:
get_logger().warning('Skipping unknown hash algorithm: %s', hash_name)
def _get_hash_pairs(download_properties, cache_dir):
"""Generator of (hash_name, hash_hex) for the given download"""
for entry_type, entry_value in download_properties.hashes.items():
if entry_type == 'hash_url':
hash_processor, hash_filename, _ = entry_value
if hash_processor == 'chromium':
yield from _chromium_hashes_generator(cache_dir / hash_filename)
else:
raise ValueError('Unknown hash_url processor: %s' % hash_processor)
else:
yield entry_type, entry_value
def retrieve_downloads(download_info, cache_dir, show_progress, disable_ssl_verification=False):
"""
Retrieve downloads into the downloads cache.
download_info is the DowloadInfo of downloads to retrieve.
cache_dir is the pathlib.Path to the downloads cache.
show_progress is a boolean indicating if download progress is printed to the console.
disable_ssl_verification is a boolean indicating if certificate verification
should be disabled for downloads using HTTPS.
Raises FileNotFoundError if the downloads path does not exist.
Raises NotADirectoryError if the downloads path is not a directory.
"""
if not cache_dir.exists():
raise FileNotFoundError(cache_dir)
if not cache_dir.is_dir():
raise NotADirectoryError(cache_dir)
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Downloading "%s" to "%s" ...', download_name,
download_properties.download_filename)
download_path = cache_dir / download_properties.download_filename
_download_if_needed(download_path, download_properties.url, show_progress,
disable_ssl_verification)
if download_properties.has_hash_url():
get_logger().info('Downloading hashes for "%s"', download_name)
_, hash_filename, hash_url = download_properties.hashes['hash_url']
_download_if_needed(cache_dir / hash_filename, hash_url, show_progress,
disable_ssl_verification)
def check_downloads(download_info, cache_dir):
"""
Check integrity of the downloads cache.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path to the downloads cache.
Raises source_retrieval.HashMismatchError when the computed and expected hashes do not match.
"""
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Verifying hashes for "%s" ...', download_name)
download_path = cache_dir / download_properties.download_filename
with download_path.open('rb') as file_obj:
archive_data = file_obj.read()
for hash_name, hash_hex in _get_hash_pairs(download_properties, cache_dir):
get_logger().debug('Verifying %s hash...', hash_name)
hasher = hashlib.new(hash_name, data=archive_data)
if not hasher.hexdigest().lower() == hash_hex.lower():
raise HashMismatchError(download_path)
def unpack_downloads(download_info, cache_dir, output_dir, extractors=None):
"""
Unpack downloads in the downloads cache to output_dir. Assumes all downloads are retrieved.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path directory containing the download cache
output_dir is the pathlib.Path directory to unpack the downloads to.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR.
May raise undetermined exceptions during archive unpacking.
"""
for download_name, download_properties in download_info.properties_iter():
download_path = cache_dir / download_properties.download_filename
get_logger().info('Unpacking "%s" to %s ...', download_name,
download_properties.output_path)
extractor_name = download_properties.extractor or ExtractorEnum.TAR
if extractor_name == ExtractorEnum.SEVENZIP:
extractor_func = extract_with_7z
elif extractor_name == ExtractorEnum.WINRAR:
extractor_func = extract_with_winrar
elif extractor_name == ExtractorEnum.TAR:
extractor_func = extract_tar_file
else:
raise NotImplementedError(extractor_name)
if download_properties.strip_leading_dirs is None:
strip_leading_dirs_path = None
else:
strip_leading_dirs_path = Path(download_properties.strip_leading_dirs)
extractor_func(
archive_path=download_path,
output_dir=output_dir / Path(download_properties.output_path),
relative_to=strip_leading_dirs_path,
extractors=extractors)
def _add_common_args(parser):
parser.add_argument(
'-i',
'--ini',
type=Path,
nargs='+',
help='The downloads INI to parse for downloads. Can be specified multiple times.')
parser.add_argument(
'-c', '--cache', type=Path, required=True, help='Path to the directory to cache downloads.')
def _retrieve_callback(args):
retrieve_downloads(
DownloadInfo(args.ini), args.cache, args.show_progress, args.disable_ssl_verification)
try:
check_downloads(DownloadInfo(args.ini), args.cache)
except HashMismatchError as exc:
get_logger().error('File checksum does not match: %s', exc)
sys.exit(1)
def _unpack_callback(args):
extractors = {
ExtractorEnum.SEVENZIP: args.sevenz_path,
ExtractorEnum.WINRAR: args.winrar_path,
ExtractorEnum.TAR: args.tar_path,
}
unpack_downloads(DownloadInfo(args.ini), args.cache, args.output, extractors)
def main():
"""CLI Entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
add_common_params(parser)
subparsers = parser.add_subparsers(title='Download actions', dest='action')
# retrieve
retrieve_parser = subparsers.add_parser(
'retrieve',
help='Retrieve and check download files',
description=('Retrieves and checks downloads without unpacking. '
'The downloader will attempt to use CLI command "curl". '
'If it is not present, Python\'s urllib will be used. However, only '
'the CLI-based downloaders can be resumed if the download is aborted.'))
_add_common_args(retrieve_parser)
retrieve_parser.add_argument(
'--hide-progress-bar',
action='store_false',
dest='show_progress',
help='Hide the download progress.')
retrieve_parser.add_argument(
'--disable-ssl-verification',
action='store_true',
help='Disables certification verification for downloads using HTTPS.')
retrieve_parser.set_defaults(callback=_retrieve_callback)
# unpack
unpack_parser = subparsers.add_parser(
'unpack',
help='Unpack download files',
description='Verifies hashes of and unpacks download files into the specified directory.')
_add_common_args(unpack_parser)
unpack_parser.add_argument(
'--tar-path',
default='tar',
help=('(Linux and macOS only) Command or path to the BSD or GNU tar '
'binary for extraction. Default: %(default)s'))
unpack_parser.add_argument(
'--7z-path',
dest='sevenz_path',
default=USE_REGISTRY,
help=('Command or path to 7-Zip\'s "7z" binary. If "_use_registry" is '
'specified, determine the path from the registry. Default: %(default)s'))
unpack_parser.add_argument(
'--winrar-path',
dest='winrar_path',
default=USE_REGISTRY,
help=('Command or path to WinRAR\'s "winrar" binary. If "_use_registry" is '
'specified, determine the path from the registry. Default: %(default)s'))
unpack_parser.add_argument('output', type=Path, help='The directory to unpack to.')
unpack_parser.set_defaults(callback=_unpack_callback)
args = parser.parse_args()
args.callback(args)
if __name__ == '__main__':
main()
|
Eloston/ungoogled-chromium
|
utils/downloads.py
|
Python
|
bsd-3-clause
| 17,917 | 0.003628 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for Recognize
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-speech
# [START speech_v1p1beta1_generated_Speech_Recognize_async]
from google.cloud import speech_v1p1beta1
async def sample_recognize():
# Create a client
client = speech_v1p1beta1.SpeechAsyncClient()
# Initialize request argument(s)
config = speech_v1p1beta1.RecognitionConfig()
config.language_code = "language_code_value"
audio = speech_v1p1beta1.RecognitionAudio()
audio.content = b'content_blob'
request = speech_v1p1beta1.RecognizeRequest(
config=config,
audio=audio,
)
# Make the request
response = await client.recognize(request=request)
# Handle the response
print(response)
# [END speech_v1p1beta1_generated_Speech_Recognize_async]
|
googleapis/python-speech
|
samples/generated_samples/speech_v1p1beta1_generated_speech_recognize_async.py
|
Python
|
apache-2.0
| 1,643 | 0.000609 |
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
###########################################################################
# #
# ESPResSo++ Python script for tabulated GROMACS simulation #
# #
###########################################################################
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import logging
import copy
import math
from espressopp import Real3D, Int3D
from espressopp.tools import gromacs
from espressopp.tools import decomp
from espressopp.tools import timers
import pathintegral
def genTabPotentials(tabfilesnb):
potentials = {}
for fg in tabfilesnb:
fe = fg.split(".")[0]+".tab" # name of espressopp file
gromacs.convertTable(fg, fe, sigma, epsilon, c6, c12)
pot = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=rc)
t1, t2 = fg[6], fg[8] # type 1, type 2
potentials.update({t1+"_"+t2: pot})
print "created", t1, t2, fe
return potentials
# This example reads in a gromacs water system (SPC/Fw) treated with reaction field. See the corresponding gromacs grompp.mdp paramter file.
# Output of gromacs energies and esp energies should be the same
# simulation parameters (nvt = False is nve)
steps = 1 #100
check = 1 #steps/10
rc = 0.9 # Verlet list cutoff
skin = 0.14
timestep = 0.0002
# parameters to convert GROMACS tabulated potential file
sigma = 1.0
epsilon = 1.0
c6 = 1.0
c12 = 1.0
# GROMACS setup files
grofile = "conf.gro"
topfile = "topol.top"
# this calls the gromacs parser for processing the top file (and included files) and the conf file
# The variables at the beginning defaults, types, etc... can be found by calling
# gromacs.read(grofile,topfile) without return values. It then prints out the variables to be unpacked
defaults, types, atomtypes, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, resname, resid, Lx, Ly, Lz= gromacs.read(grofile,topfile)
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
#types, bonds, angles, dihedrals, x, y, z, vx, vy, vz, Lx, Ly, Lz = gromacs.read(grofile,topfile)
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
sys.stdout.write('Setting up simulation ...\n')
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size,size,rc,skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# setting up GROMACS interaction stuff
# create a force capped Lennard-Jones interaction that uses a verlet list
verletlist = espressopp.VerletList(system, rc)
#interaction = espressopp.interaction.VerletListLennardJonesGromacs(verletlist)
# add particles to the system and then decompose
props = ['id', 'pos', 'v', 'type', 'mass', 'q']
allParticles = []
for pid in range(num_particles):
part = [pid + 1, Real3D(x[pid], y[pid], z[pid]),
Real3D(0, 0, 0), types[pid], masses[pid], charges[pid]]
allParticles.append(part)
system.storage.addParticles(allParticles, *props)
#system.storage.decompose()
# set up LJ interaction according to the parameters read from the .top file
#ljinteraction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist,rc)
########## tabulated nb interactions ############
tabfilesnb = ["table_O_O.xvg", "table_H_O.xvg", "table_H_H.xvg"]
potentials = genTabPotentials(tabfilesnb)
tabulatedinteraction = espressopp.interaction.VerletListTabulated(verletlist)
tabulatedinteraction.setPotential(0, 0, potentials["O_O"])
tabulatedinteraction.setPotential(0, 1, potentials["H_O"])
tabulatedinteraction.setPotential(1, 1, potentials["H_H"])
system.addInteraction(tabulatedinteraction)
# set up angle interactions according to the parameters read from the .top file
angleinteractions=gromacs.setAngleInteractions(system, angletypes, angletypeparams)
# set up bonded interactions according to the parameters read from the .top file
bondedinteractions=gromacs.setBondedInteractions(system, bondtypes, bondtypeparams)
# exlusions, i.e. pairs of atoms not considered for the non-bonded part. Those are defined either by bonds which automatically generate an exclusion. Or by the nregxcl variable
verletlist.exclude(exclusions)
# langevin thermostat
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = 10
langevin.temperature = 2.4942 # kT in gromacs units
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.addExtension(langevin)
integrator.dt = timestep
print "POT", potentials
pathintegral.createPathintegralSystem(allParticles, props, types, system, langevin, potentials, P=16)
system.storage.decompose()
num_particles = int(espressopp.analysis.NPart(system).compute())
# print simulation parameters
print ''
print 'number of particles =', num_particles
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
configurations = espressopp.analysis.Configurations(system)
configurations.gather()
temperature = espressopp.analysis.Temperature(system)
pressure = espressopp.analysis.Pressure(system)
pressureTensor = espressopp.analysis.PressureTensor(system)
print "i*timestep,Eb, EAng, ETab, Ek, Etotal T"
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n'
outfile = open("esp.dat", "w")
start_time = time.clock()
espressopp.tools.psfwrite("system.psf", system)
#espressopp.tools.decomp.tuneSkin(system, integrator)
#espressopp.tools.analyse.info(system, integrator)
espressopp.tools.fastwritexyz("traj.xyz", system, append=False, scale=10)
for i in range(check):
T = temperature.compute()
P = pressure.compute()
Eb = 0
EAng = 0
ETab=0
#for bd in bondedinteractions.values(): Eb+=bd.computeEnergy()
#for ang in angleinteractions.values(): EAng+=ang.computeEnergy()
#ELj= ljinteraction.computeEnergy()
#EQQ= qq_interactions.computeEnergy()
ETab= tabulatedinteraction.computeEnergy()
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Etotal = Ek+Eb+EAng+ETab
sys.stdout.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T))
outfile.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T))
#espressopp.tools.pdb.pdbfastwrite("traj.pdb", system, append=True)
espressopp.tools.fastwritexyz("traj.xyz", system, append=True, scale=10)
integrator.run(steps/check) # print out every steps/check steps
#espressopp.tools.vmd.imd_positions(system, sock)
# print timings and neighbor list information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=2)
espressopp.tools.analyse.final_info(system, integrator, verletlist, start_time, end_time)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
niktre/espressopp
|
testsuite/pi_water/water.py
|
Python
|
gpl-3.0
| 8,192 | 0.007935 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function
import numpy as np
import tables
from liam2.data import merge_arrays, get_fields, index_table_light, merge_array_records
from liam2.utils import timed, loop_wh_progress, merge_items
__version__ = "0.4"
def get_group_fields(node):
if node is None:
return {}
# noinspection PyProtectedMember
return {table._v_name: get_fields(table) for table in node._f_iter_nodes()}
def merge_group(parent1, parent2, name, output_file, index_col):
print()
print(name)
print('=' * len(name))
group1 = getattr(parent1, name, None)
group2 = getattr(parent2, name, None)
if group1 is None and group2 is None:
print("node not found in either input files, skipped")
return
output_group = output_file.create_group("/", name)
fields1 = get_group_fields(group1)
fields2 = get_group_fields(group2)
ent_names1 = set(fields1.keys())
ent_names2 = set(fields2.keys())
for ent_name in sorted(ent_names1 | ent_names2):
print()
print(ent_name)
ent_fields1 = fields1.get(ent_name, [])
ent_fields2 = fields2.get(ent_name, [])
output_fields = merge_items(ent_fields1, ent_fields2)
output_table = output_file.create_table(output_group, ent_name,
np.dtype(output_fields))
if ent_name in ent_names1:
table1 = getattr(group1, ent_name)
# noinspection PyProtectedMember
print(" * indexing table from %s ..." % group1._v_file.filename,
end=' ')
input1_rows = index_table_light(table1, index_col)
print("done.")
else:
table1 = None
input1_rows = {}
if ent_name in ent_names2:
table2 = getattr(group2, ent_name)
# noinspection PyProtectedMember
print(" * indexing table from %s ..." % group2._v_file.filename,
end=' ')
input2_rows = index_table_light(table2, index_col)
print("done.")
else:
table2 = None
input2_rows = {}
print(" * merging: ", end=' ')
input1_periods = set(input1_rows.keys())
input2_periods = set(input2_rows.keys())
output_periods = sorted(input1_periods | input2_periods)
# noinspection PyUnusedLocal
def merge_period(period_idx, period):
if ent_name in ent_names1:
start, stop = input1_rows.get(period, (0, 0))
input1_array = table1.read(start, stop)
else:
input1_array = None
if ent_name in ent_names2:
start, stop = input2_rows.get(period, (0, 0))
input2_array = table2.read(start, stop)
else:
input2_array = None
if ent_name in ent_names1 and ent_name in ent_names2:
if 'id' in input1_array.dtype.names:
assert 'id' in input2_array.dtype.names
output_array, _ = merge_arrays(input1_array, input2_array)
else:
output_array = merge_array_records(input1_array,
input2_array)
elif ent_name in ent_names1:
output_array = input1_array
elif ent_name in ent_names2:
output_array = input2_array
else:
raise Exception("this shouldn't have happened")
output_table.append(output_array)
output_table.flush()
loop_wh_progress(merge_period, output_periods)
print(" done.")
def merge_h5(input1_path, input2_path, output_path):
input1_file = tables.open_file(input1_path)
input2_file = tables.open_file(input2_path)
output_file = tables.open_file(output_path, mode="w")
input1root = input1_file.root
input2root = input2_file.root
merge_group(input1root, input2root, 'globals', output_file, 'PERIOD')
merge_group(input1root, input2root, 'entities', output_file, 'period')
input1_file.close()
input2_file.close()
output_file.close()
if __name__ == '__main__':
import sys
import platform
print("LIAM HDF5 merge %s using Python %s (%s)\n" %
(__version__, platform.python_version(), platform.architecture()[0]))
args = sys.argv
if len(args) < 4:
print("Usage: %s inputpath1 inputpath2 outputpath" % args[0])
sys.exit()
timed(merge_h5, args[1], args[2], args[3])
|
liam2/liam2
|
liam2/merge_h5.py
|
Python
|
gpl-3.0
| 4,745 | 0.000211 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from datetime import datetime
import time
import unittest
from babel import __version__ as VERSION
from babel.core import Locale, UnknownLocaleError
from babel.dates import format_datetime
from babel.messages import checkers
from babel.messages.plurals import PLURALS
from babel.messages.pofile import read_po
from babel.util import LOCALTZ
from babel._compat import BytesIO
class CheckersTestCase(unittest.TestCase):
# the last msgstr[idx] is always missing except for singular plural forms
def test_1_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 1]:
try:
locale = Locale.parse(_locale)
except UnknownLocaleError:
# Just an alias? Not what we're testing here, let's continue
continue
po_file = (u"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\\n"
"PO-Revision-Date: %(date)s\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=utf-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Generated-By: Babel %(version)s\\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
""" % dict(locale=_locale,
english_name=locale.english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_2_num_plurals_checkers(self):
# in this testcase we add an extra msgstr[idx], we should be
# disregarding it
for _locale in [p for p in PLURALS if PLURALS[p][0] == 2]:
if _locale in ['nn', 'no']:
_locale = 'nn_NO'
num_plurals = PLURALS[_locale.split('_')[0]][0]
plural_expr = PLURALS[_locale.split('_')[0]][1]
else:
num_plurals = PLURALS[_locale][0]
plural_expr = PLURALS[_locale][1]
try:
locale = Locale(_locale)
date = format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale)
except UnknownLocaleError:
# Just an alias? Not what we're testing here, let's continue
continue
po_file = (u"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\\n"
"PO-Revision-Date: %(date)s\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: %(locale)s <LL@li.org>\\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=utf-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Generated-By: Babel %(version)s\\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
""" % dict(locale=_locale,
english_name=locale.english_name,
version=VERSION,
year=time.strftime('%Y'),
date=date,
num_plurals=num_plurals,
plural_expr=plural_expr)).encode('utf-8')
# we should be adding the missing msgstr[0]
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_3_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 3]:
po_file = (r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
""" % dict(locale=_locale,
english_name=Locale.parse(_locale).english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_4_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 4]:
po_file = (r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
""" % dict(locale=_locale,
english_name=Locale.parse(_locale).english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_5_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 5]:
po_file = (r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
msgstr[3] ""
""" % dict(locale=_locale,
english_name=Locale.parse(_locale).english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_6_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 6]:
po_file = (r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
msgstr[3] ""
msgstr[4] ""
""" % dict(locale=_locale,
english_name=Locale.parse(_locale).english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
|
cloudera/hue
|
desktop/core/ext-py/Babel-2.5.1/tests/messages/test_checkers.py
|
Python
|
apache-2.0
| 12,538 | 0 |
# -*- coding: utf-8 -*-
from Components.ActionMap import ActionMap, HelpableActionMap, NumberActionMap
from Components.Harddisk import harddiskmanager, findMountPoint
from Components.Input import Input
from Components.Label import Label
from Components.MovieList import AUDIO_EXTENSIONS
from Components.PluginComponent import plugins
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.Boolean import Boolean
from Components.Sources.List import List
from Components.config import config, configfile, ConfigBoolean, ConfigClock
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import preferredInstantRecordPath, defaultMoviePath, preferredTimerPath, ConfigSelection
# from Components.Task import Task, Job, job_manager as JobManager
from Components.Pixmap import MovingPixmap, MultiPixmap
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Plugins.Plugin import PluginDescriptor
from Components.Timeshift import InfoBarTimeshift
from Screens.Screen import Screen
from Screens import ScreenSaver
from Screens.ChannelSelection import ChannelSelection, BouquetSelector, SilentBouquetSelector, EpgBouquetSelector
from Screens.ChoiceBox import ChoiceBox
from Screens.Dish import Dish
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.EpgSelection import EPGSelection
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.MinuteInput import MinuteInput
from Screens.TimerSelection import TimerSelection
from Screens.PictureInPicture import PictureInPicture
from Screens.PVRState import PVRState, TimeshiftState
from Screens.SubtitleDisplay import SubtitleDisplay
from Screens.RdsDisplay import RdsInfoDisplay, RassInteractive
from Screens.Standby import Standby, TryQuitMainloop
from Screens.TimeDateInput import TimeDateInput
from Screens.TimerEdit import TimerEditList
from Screens.UnhandledKey import UnhandledKey
from ServiceReference import ServiceReference, isPlayableForCur
from RecordTimer import RecordTimer, RecordTimerEntry, parseEvent, AFTEREVENT, findSafeRecordPath
from Screens.TimerEntry import TimerEntry as TimerEntry
from Tools import Directories, Notifications
from Tools.Directories import pathExists, fileExists, getRecordingFilename, copyfile, moveFiles, resolveFilename, SCOPE_TIMESHIFT, SCOPE_CURRENT_SKIN
from Tools.KeyBindings import getKeyDescription
from enigma import eTimer, eServiceCenter, eDVBServicePMTHandler, iServiceInformation, iPlayableService, eServiceReference, eEPGCache, eActionMap
from boxbranding import getBoxType
from time import time, localtime, strftime
from bisect import insort
from sys import maxint
import os, cPickle
# hack alert!
from Screens.Menu import MainMenu, Menu, mdom
from Screens.Setup import Setup
import Screens.Standby
AUDIO = False
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CoolTVGuide/plugin.pyo"):
COOLTVGUIDE = True
else:
COOLTVGUIDE = False
def isStandardInfoBar(self):
return self.__class__.__name__ == "InfoBar"
def isMoviePlayerInfoBar(self):
return self.__class__.__name__ == "MoviePlayer"
def setResumePoint(session):
global resumePointCache, resumePointCacheLast
service = session.nav.getCurrentService()
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (service is not None) and (ref is not None): # and (ref.type != 1):
# ref type 1 has its own memory...
seek = service.seek()
if seek:
pos = seek.getPlayPosition()
if not pos[0]:
key = ref.toString()
lru = int(time())
l = seek.getLength()
if l:
l = l[1]
else:
l = None
resumePointCache[key] = [lru, pos[1], l]
for k, v in resumePointCache.items():
if v[0] < lru:
candidate = k
filepath = os.path.realpath(candidate.split(':')[-1])
mountpoint = findMountPoint(filepath)
if os.path.ismount(mountpoint) and not os.path.exists(filepath):
del resumePointCache[candidate]
saveResumePoints()
def delResumePoint(ref):
global resumePointCache, resumePointCacheLast
try:
del resumePointCache[ref.toString()]
except KeyError:
pass
saveResumePoints()
def getResumePoint(session):
global resumePointCache
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (ref is not None) and (ref.type != 1):
try:
entry = resumePointCache[ref.toString()]
entry[0] = int(time()) # update LRU timestamp
return entry[1]
except KeyError:
return None
def saveResumePoints():
global resumePointCache, resumePointCacheLast
try:
f = open('/etc/enigma2/resumepoints.pkl', 'wb')
cPickle.dump(resumePointCache, f, cPickle.HIGHEST_PROTOCOL)
f.close()
except Exception, ex:
print "[InfoBar] Failed to write resumepoints:", ex
resumePointCacheLast = int(time())
def loadResumePoints():
try:
file = open('/etc/enigma2/resumepoints.pkl', 'rb')
PickleFile = cPickle.load(file)
file.close()
return PickleFile
except Exception, ex:
print "[InfoBar] Failed to load resumepoints:", ex
return {}
def updateresumePointCache():
global resumePointCache
resumePointCache = loadResumePoints()
def ToggleVideo():
mode = open("/proc/stb/video/policy").read()[:-1]
print mode
if mode == "letterbox":
f = open("/proc/stb/video/policy", "w")
f.write("panscan")
f.close()
elif mode == "panscan":
f = open("/proc/stb/video/policy", "w")
f.write("letterbox")
f.close()
else:
# if current policy is not panscan or letterbox, set to panscan
f = open("/proc/stb/video/policy", "w")
f.write("panscan")
f.close()
resumePointCache = loadResumePoints()
resumePointCacheLast = int(time())
class InfoBarDish:
def __init__(self):
self.dishDialog = self.session.instantiateDialog(Dish)
class InfoBarUnhandledKey:
def __init__(self):
self.unhandledKeyDialog = self.session.instantiateDialog(UnhandledKey)
self.hideUnhandledKeySymbolTimer = eTimer()
self.hideUnhandledKeySymbolTimer.callback.append(self.unhandledKeyDialog.hide)
self.checkUnusedTimer = eTimer()
self.checkUnusedTimer.callback.append(self.checkUnused)
self.onLayoutFinish.append(self.unhandledKeyDialog.hide)
eActionMap.getInstance().bindAction('', -maxint -1, self.actionA) #highest prio
eActionMap.getInstance().bindAction('', maxint, self.actionB) #lowest prio
self.flags = (1<<1)
self.uflags = 0
#this function is called on every keypress!
def actionA(self, key, flag):
try:
print 'KEY: %s %s' % (key,getKeyDescription(key)[0])
except:
print 'KEY: %s' % key
self.unhandledKeyDialog.hide()
if self.closeSIB(key) and self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if flag != 4:
if self.flags & (1<<1):
self.flags = self.uflags = 0
self.flags |= (1<<flag)
if flag == 1: # break
self.checkUnusedTimer.start(0, True)
return 0
def closeSIB(self, key):
if key >= 12 and key != 352 and key != 103 and key != 108 and key != 402 and key != 403 and key != 407 and key != 412 :
return True
else:
return False
#this function is only called when no other action has handled this key
def actionB(self, key, flag):
if flag != 4:
self.uflags |= (1<<flag)
def checkUnused(self):
if self.flags == self.uflags:
self.unhandledKeyDialog.show()
self.hideUnhandledKeySymbolTimer.start(2000, True)
class InfoBarScreenSaver:
def __init__(self):
self.onExecBegin.append(self.__onExecBegin)
self.onExecEnd.append(self.__onExecEnd)
self.screenSaverTimer = eTimer()
self.screenSaverTimer.callback.append(self.screensaverTimeout)
self.screensaver = self.session.instantiateDialog(ScreenSaver.Screensaver)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.screensaver.hide()
def __onExecBegin(self):
self.ScreenSaverTimerStart()
def __onExecEnd(self):
if self.screensaver.shown:
self.screensaver.hide()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
self.screenSaverTimer.stop()
def ScreenSaverTimerStart(self):
time = int(config.usage.screen_saver.value)
flag = self.seekstate[0]
if not flag:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
ref = ref.toString().split(":")
flag = ref[2] == "2" or os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS
if time and flag:
self.screenSaverTimer.startLongTimer(time)
else:
self.screenSaverTimer.stop()
def screensaverTimeout(self):
if self.execing and not Screens.Standby.inStandby and not Screens.Standby.inTryQuitMainloop:
self.hide()
if hasattr(self, "pvrStateDialog"):
try:
self.pvrStateDialog.hide()
except:
pass
self.screensaver.show()
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressScreenSaver)
def keypressScreenSaver(self, key, flag):
if flag:
self.screensaver.hide()
self.show()
self.ScreenSaverTimerStart()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
class SecondInfoBar(Screen):
ADD_TIMER = 0
REMOVE_TIMER = 1
def __init__(self, session):
Screen.__init__(self, session)
if config.usage.show_second_infobar.getValue() == "3" and config.skin.primary_skin.getValue() == "DMConcinnity-HD/skin.xml":
self.skinName = "SecondInfoBarECM"
else:
self.skinName = "SecondInfoBar"
self["epg_description"] = ScrollLabel()
self["channel"] = Label()
self["key_red"] = Label()
self["key_green"] = Label()
self["key_yellow"] = Label()
self["key_blue"] = Label()
self["SecondInfoBar"] = ActionMap(["2ndInfobarActions"],
{
"prevPage": self.pageUp,
"nextPage": self.pageDown,
"prevEvent": self.prevEvent,
"nextEvent": self.nextEvent,
"timerAdd": self.timerAdd,
"openSimilarList": self.openSimilarList,
}, -1)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.getEvent
})
self.onShow.append(self.__Show)
self.onHide.append(self.__Hide)
def pageUp(self):
self["epg_description"].pageUp()
def pageDown(self):
self["epg_description"].pageDown()
def __Show(self):
if config.plisettings.ColouredButtons.getValue():
self["key_yellow"].setText(_("Search"))
self["key_red"].setText(_("Similar"))
self["key_blue"].setText(_("Extensions"))
self["SecondInfoBar"].doBind()
self.getEvent()
def __Hide(self):
if self["SecondInfoBar"].bound:
self["SecondInfoBar"].doUnbind()
def getEvent(self):
self["epg_description"].setText("")
self["channel"].setText("")
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
Event = self.epglist[0]
Ref = ServiceReference(ref)
callback = self.eventViewCallback
self.cbFunc = callback
self.currentService = Ref
self.isRecording = (not Ref.ref.flags & eServiceReference.isGroup) and Ref.ref.getPath()
self.event = Event
self.key_green_choice = self.ADD_TIMER
if self.isRecording:
self["key_green"].setText("")
else:
self["key_green"].setText(_("Add timer"))
self.setEvent(self.event)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def prevEvent(self):
if self.cbFunc is not None:
self.cbFunc(self.setEvent, self.setService, -1)
def nextEvent(self):
if self.cbFunc is not None:
self.cbFunc(self.setEvent, self.setService, +1)
def removeTimer(self, timer):
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def timerAdd(self):
self.hide()
self.secondInfoBarWasShown = False
if self.isRecording:
return
event = self.event
serviceref = self.currentService
if event is None:
return
eventid = event.getEventId()
refstr = serviceref.ref.toString()
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and timer.service_ref.ref.toString() == refstr:
cb_func = lambda ret : not ret or self.removeTimer(timer)
self.session.openWithCallback(cb_func, MessageBox, _("Do you really want to delete %s?") % event.getEventName())
break
else:
newEntry = RecordTimerEntry(self.currentService, checkOldTimers = True, dirname = preferredTimerPath(), *parseEvent(self.event))
self.session.openWithCallback(self.finishedAdd, TimerEntry, newEntry)
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self["key_green"].setText(_("Remove timer"))
self.key_green_choice = self.REMOVE_TIMER
else:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def setService(self, service):
self.currentService=service
if self.isRecording:
self["channel"].setText(_("Recording"))
else:
name = self.currentService.getServiceName()
if name is not None:
self["channel"].setText(name)
else:
self["channel"].setText(_("unknown service"))
def sort_func(self,x,y):
if x[1] < y[1]:
return -1
elif x[1] == y[1]:
return 0
else:
return 1
def setEvent(self, event):
if event is None:
return
self.event = event
try:
name = event.getEventName()
self["channel"].setText(name)
except:
pass
description = event.getShortDescription()
extended = event.getExtendedDescription()
if description and extended:
description += '\n'
text = description + extended
self.setTitle(event.getEventName())
self["epg_description"].setText(text)
serviceref = self.currentService
eventid = self.event.getEventId()
refstr = serviceref.ref.toString()
isRecordEvent = False
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and timer.service_ref.ref.toString() == refstr:
isRecordEvent = True
break
if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER:
self["key_green"].setText(_("Remove timer"))
self.key_green_choice = self.REMOVE_TIMER
elif not isRecordEvent and self.key_green_choice != self.ADD_TIMER:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def openSimilarList(self):
id = self.event and self.event.getEventId()
refstr = str(self.currentService)
if id is not None:
self.hide()
self.secondInfoBarWasShown = False
self.session.open(EPGSelection, refstr, None, id)
class InfoBarShowHide(InfoBarScreenSaver):
""" InfoBar show/hide control, accepts toggleShow and hide actions, might start
fancy animations. """
STATE_HIDDEN = 0
STATE_HIDING = 1
STATE_SHOWING = 2
STATE_SHOWN = 3
def __init__(self):
self["ShowHideActions"] = ActionMap( ["InfobarShowHideActions"] ,
{
"toggleShow": self.OkPressed,
"LongOKPressed": self.LongOKPressed,
"hide": self.keyHide,
}, 1) # lower prio to make it possible to override ok and cancel..
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.serviceStarted,
})
InfoBarScreenSaver.__init__(self)
self.__state = self.STATE_SHOWN
self.__locked = 0
self.hideTimer = eTimer()
self.hideTimer.callback.append(self.doTimerHide)
self.hideTimer.start(5000, True)
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.onShowHideNotifiers = []
self.standardInfoBar = False
self.lastSecondInfoBar = 0
self.secondInfoBarScreen = ""
if isStandardInfoBar(self):
self.SwitchSecondInfoBarScreen()
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
self.standardInfoBar = True
self.secondInfoBarWasShown = False
self.EventViewIsShown = False
try:
if self.pvrStateDialog:
pass
except:
self.pvrStateDialog = None
def OkPressed(self):
if config.usage.okbutton_mode.getValue() == "0":
self.toggleShow()
elif config.usage.okbutton_mode.getValue() == "1":
try:
self.openServiceList()
except:
self.toggleShow()
def SwitchSecondInfoBarScreen(self):
if self.lastSecondInfoBar == config.usage.show_second_infobar.getValue():
return
self.secondInfoBarScreen = self.session.instantiateDialog(SecondInfoBar)
self.lastSecondInfoBar = config.usage.show_second_infobar.getValue()
def LongOKPressed(self):
if isinstance(self, InfoBarEPG):
if config.plisettings.InfoBarEpg_mode.getValue() == "1":
self.openInfoBarEPG()
def __onShow(self):
self.__state = self.STATE_SHOWN
for x in self.onShowHideNotifiers:
x(True)
self.startHideTimer()
def __onHide(self):
self.__state = self.STATE_HIDDEN
# if self.secondInfoBarScreen:
# self.secondInfoBarScreen.hide()
for x in self.onShowHideNotifiers:
x(False)
def keyHide(self):
if self.__state == self.STATE_HIDDEN:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
ref = ref.toString()
else:
ref = " "
if config.plisettings.InfoBarEpg_mode.getValue() == "2" and not ref[1:].startswith(":0:0:0:0:0:0:0:0:0:"):
self.openInfoBarEPG()
else:
self.hide()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if self.session.pipshown and "popup" in config.usage.pip_hideOnExit.getValue():
if config.usage.pip_hideOnExit.getValue() == "popup":
self.session.openWithCallback(self.hidePipOnExitCallback, MessageBox, _("Disable Picture in Picture"), simple=True)
else:
self.hidePipOnExitCallback(True)
else:
self.hide()
if hasattr(self, "pvrStateDialog"):
self.pvrStateDialog.hide()
def hidePipOnExitCallback(self, answer):
if answer:
self.showPiP()
def connectShowHideNotifier(self, fnc):
if not fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.append(fnc)
def disconnectShowHideNotifier(self, fnc):
if fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.remove(fnc)
def serviceStarted(self):
if self.execing:
if config.usage.show_infobar_on_zap.getValue():
self.doShow()
def startHideTimer(self):
if self.__state == self.STATE_SHOWN and not self.__locked:
self.hideTimer.stop()
idx = config.usage.infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
elif (self.secondInfoBarScreen and self.secondInfoBarScreen.shown) or ((not config.usage.show_second_infobar.getValue() or isMoviePlayerInfoBar(self)) and self.EventViewIsShown):
self.hideTimer.stop()
idx = config.usage.second_infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
elif hasattr(self, "pvrStateDialog"):
self.hideTimer.stop()
idx = config.usage.infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
def doShow(self):
self.show()
self.startHideTimer()
def doTimerHide(self):
self.hideTimer.stop()
if self.__state == self.STATE_SHOWN:
self.hide()
if hasattr(self, "pvrStateDialog"):
try:
self.pvrStateDialog.hide()
except:
pass
elif self.__state == self.STATE_HIDDEN and self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
elif self.__state == self.STATE_HIDDEN and self.EventViewIsShown:
try:
self.eventView.close()
except:
pass
self.EventViewIsShown = False
elif hasattr(self, "pvrStateDialog"):
try:
self.pvrStateDialog.hide()
except:
pass
def toggleShow(self):
if self.__state == self.STATE_HIDDEN:
if not self.secondInfoBarWasShown or (config.usage.show_second_infobar.getValue() == "1" and not self.EventViewIsShown):
self.show()
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
self.EventViewIsShown = False
elif self.secondInfoBarScreen and (config.usage.show_second_infobar.getValue() == "2" or config.usage.show_second_infobar.getValue() == "3") and not self.secondInfoBarScreen.shown:
self.SwitchSecondInfoBarScreen()
self.hide()
self.secondInfoBarScreen.show()
self.secondInfoBarWasShown = True
self.startHideTimer()
elif (config.usage.show_second_infobar.getValue() == "1" or isMoviePlayerInfoBar(self)) and not self.EventViewIsShown:
self.hide()
try:
self.openEventView()
except:
pass
self.EventViewIsShown = True
self.hideTimer.stop()
else:
self.hide()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
elif self.EventViewIsShown:
try:
self.eventView.close()
except:
pass
self.EventViewIsShown = False
def lockShow(self):
try:
self.__locked += 1
except:
self.__locked = 0
if self.execing:
self.show()
self.hideTimer.stop()
def unlockShow(self):
try:
self.__locked -= 1
except:
self.__locked = 0
if self.__locked <0:
self.__locked = 0
if self.execing:
self.startHideTimer()
def openEventView(self, simple=False):
try:
if self.servicelist is None:
return
except:
simple = True
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
self.dlg_stack.append(self.eventView)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack = None
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def closed(self, ret=False):
if not self.dlg_stack:
return
closedScreen = self.dlg_stack.pop()
if self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret == True or ret == 'close':
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
self.reopen(ret)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
class NumberZap(Screen):
def quit(self):
self.Timer.stop()
self.close()
def keyOK(self):
self.Timer.stop()
self.close(self.service, self.bouquet)
def handleServiceName(self):
if self.searchNumber:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self ["servicename"].text = ServiceReference(self.service).getServiceName()
if not self.startBouquet:
self.startBouquet = self.bouquet
def keyBlue(self):
self.Timer.start(3000, True)
if self.searchNumber:
if self.startBouquet == self.bouquet:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()), firstBouquetOnly = True)
else:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self ["servicename"].text = ServiceReference(self.service).getServiceName()
def keyNumberGlobal(self, number):
self.Timer.start(1000, True)
self.field += str(number)
self["number"].setText(self.field)
self["number_summary"].setText(self.field)
self.handleServiceName()
if len(self.field) >= 4:
self.keyOK()
def __init__(self, session, number, searchNumberFunction = None):
Screen.__init__(self, session)
self.onChangedEntry = [ ]
self.field = str(number)
self.searchNumber = searchNumberFunction
self.startBouquet = None
self["channel"] = Label(_("Channel:"))
self["channel_summary"] = StaticText(_("Channel:"))
self["number"] = Label(self.field)
self["number_summary"] = StaticText(self.field)
self["servicename"] = Label()
self.handleServiceName()
self["actions"] = NumberActionMap( [ "SetupActions", "ShortcutActions" ],
{
"cancel": self.quit,
"ok": self.keyOK,
"blue": self.keyBlue,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.Timer = eTimer()
self.Timer.callback.append(self.keyOK)
self.Timer.start(3000, True)
class InfoBarNumberZap:
""" Handles an initial number for NumberZapping """
def __init__(self):
self["NumberActions"] = NumberActionMap( [ "NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
})
def keyNumberGlobal(self, number):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled() and self.isSeekable():
InfoBarTimeshiftState._mayShow(self)
self.pvrStateDialog["PTSSeekPointer"].setPosition((self.pvrStateDialog["PTSSeekBack"].instance.size().width()-4)/2, self.pvrStateDialog["PTSSeekPointer"].position[1])
if self.seekstate != self.SEEK_STATE_PLAY:
self.setSeekState(self.SEEK_STATE_PLAY)
self.ptsSeekPointerOK()
return
if self.pts_blockZap_timer.isActive():
return
# if self.save_current_timeshift and self.timeshiftEnabled():
# InfoBarTimeshift.saveTimeshiftActions(self)
# return
if number == 0:
if isinstance(self, InfoBarPiP) and self.pipHandles0Action():
self.pipDoHandle0Action()
else:
if config.usage.panicbutton.getValue():
self.servicelist.history_tv = []
self.servicelist.history_radio = []
self.servicelist.history = self.servicelist.history_tv
self.servicelist.history_pos = 0
if config.usage.multibouquet.getValue():
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
bqrootstr = '%s FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet'% self.service_types
serviceHandler = eServiceCenter.getInstance()
rootbouquet = eServiceReference(bqrootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
if bouquet.flags & eServiceReference.isDirectory:
self.servicelist.clearPath()
self.servicelist.setRoot(bouquet)
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
service, bouquet2 = self.searchNumber(1)
if service == serviceIterator: break
serviceIterator = servicelist.getNext()
if serviceIterator.valid() and service == serviceIterator: break
self.servicelist.enterPath(rootbouquet)
self.servicelist.enterPath(bouquet)
self.servicelist.saveRoot()
self.selectAndStartService(service, bouquet)
else:
self.servicelist.recallPrevService()
else:
if self.has_key("TimeshiftActions") and self.timeshiftEnabled():
ts = self.getTimeshift()
if ts and ts.isTimeshiftActive():
return
self.session.openWithCallback(self.numberEntered, NumberZap, number, self.searchNumber)
def numberEntered(self, service = None, bouquet = None):
if service:
self.selectAndStartService(service, bouquet)
def searchNumberHelper(self, serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if servicelist:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if num == serviceIterator.getChannelNum():
return serviceIterator
serviceIterator = servicelist.getNext()
return None
def searchNumber(self, number, firstBouquetOnly = False):
bouquet = self.servicelist.getRoot()
service = None
serviceHandler = eServiceCenter.getInstance()
if not firstBouquetOnly:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value and not service:
bouquet = self.servicelist.bouquet_root
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist:
bouquet = bouquetlist.getNext()
while bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service:
playable = not (service.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)) or (service.flags & eServiceReference.isNumberedMarker)
if not playable:
service = None
break
if config.usage.alternative_number_mode.getValue() or firstBouquetOnly:
break
bouquet = bouquetlist.getNext()
return service, bouquet
def selectAndStartService(self, service, bouquet):
if service:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
self.servicelist.zap(enable_pipzap = True)
self.servicelist.correctChannelNumber()
self.servicelist.startRoot = None
def zapToNumber(self, number):
service, bouquet = self.searchNumber(number)
self.selectAndStartService(service, bouquet)
config.misc.initialchannelselection = ConfigBoolean(default = True)
class InfoBarChannelSelection:
""" ChannelSelection - handles the channelSelection dialog and the initial
channelChange actions which open the channelSelection dialog """
def __init__(self):
#instantiate forever
self.servicelist = self.session.instantiateDialog(ChannelSelection)
self.tscallback = None
if config.misc.initialchannelselection.value:
self.onShown.append(self.firstRun)
self["ChannelSelectActions"] = HelpableActionMap(self, "InfobarChannelSelection",
{
"switchChannelUp": (self.UpPressed, _("Open service list and select previous channel")),
"switchChannelDown": (self.DownPressed, _("Open service list and select next channel")),
"zapUp": (self.zapUp, _("Switch to previous channel")),
"zapDown": (self.zapDown, _("Switch next channel")),
"historyBack": (self.historyBack, _("Switch to previous channel in history")),
"historyNext": (self.historyNext, _("Switch to next channel in history")),
"openServiceList": (self.openServiceList, _("Open service list")),
"openSatellites": (self.openSatellites, _("Open satellites list")),
"LeftPressed": self.LeftPressed,
"RightPressed": self.RightPressed,
"ChannelPlusPressed": self.ChannelPlusPressed,
"ChannelMinusPressed": self.ChannelMinusPressed,
})
def firstRun(self):
self.onShown.remove(self.firstRun)
config.misc.initialchannelselection.value = False
config.misc.initialchannelselection.save()
self.openServiceList()
def LeftPressed(self):
if config.plisettings.InfoBarEpg_mode.getValue() == "3":
self.openInfoBarEPG()
else:
self.zapUp()
def RightPressed(self):
if config.plisettings.InfoBarEpg_mode.getValue() == "3":
self.openInfoBarEPG()
else:
self.zapDown()
def UpPressed(self):
if config.usage.updownbutton_mode.getValue() == "0":
self.zapDown()
elif config.usage.updownbutton_mode.getValue() == "1":
self.switchChannelUp()
def DownPressed(self):
if config.usage.updownbutton_mode.getValue() == "0":
self.zapUp()
elif config.usage.updownbutton_mode.getValue() == "1":
self.switchChannelDown()
def ChannelPlusPressed(self):
if config.usage.channelbutton_mode.getValue() == "0":
self.zapDown()
elif config.usage.channelbutton_mode.getValue() == "1":
self.openServiceList()
elif config.usage.channelbutton_mode.getValue() == "2":
self.serviceListType = "Norm"
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def ChannelMinusPressed(self):
if config.usage.channelbutton_mode.getValue() == "0":
self.zapUp()
elif config.usage.channelbutton_mode.getValue() == "1":
self.openServiceList()
elif config.usage.channelbutton_mode.getValue() == "2":
self.serviceListType = "Norm"
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def showTvChannelList(self, zap=False):
self.servicelist.setModeTv()
if zap:
self.servicelist.zap()
if config.usage.show_servicelist.getValue():
self.session.execDialog(self.servicelist)
def showRadioChannelList(self, zap=False):
self.servicelist.setModeRadio()
if zap:
self.servicelist.zap()
if config.usage.show_servicelist.getValue():
self.session.execDialog(self.servicelist)
def historyBack(self):
if config.usage.historymode.getValue() == "0":
self.servicelist.historyBack()
else:
self.servicelist.historyZap(-1)
def historyNext(self):
if config.usage.historymode.getValue() == "0":
self.servicelist.historyNext()
else:
self.servicelist.historyZap(+1)
def switchChannelUp(self):
if not config.usage.show_bouquetalways.getValue():
if "keep" not in config.usage.servicelist_cursor_behavior.getValue():
self.servicelist.moveUp()
self.session.execDialog(self.servicelist)
else:
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def switchChannelDown(self):
if not config.usage.show_bouquetalways.getValue():
if "keep" not in config.usage.servicelist_cursor_behavior.getValue():
self.servicelist.moveDown()
self.session.execDialog(self.servicelist)
else:
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def openServiceList(self):
self.session.execDialog(self.servicelist)
def openSatellites(self):
self.servicelist.showSatellites()
self.session.execDialog(self.servicelist)
def zapUp(self):
if self.pts_blockZap_timer.isActive():
return
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.getValue():
if self.servicelist.atBegin():
self.servicelist.prevBouquet()
self.servicelist.moveUp()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveUp()
self.servicelist.zap(enable_pipzap = True)
def zapDown(self):
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atEnd():
self.servicelist.nextBouquet()
else:
self.servicelist.moveDown()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveDown()
self.servicelist.zap(enable_pipzap = True)
class InfoBarMenu:
""" Handles a menu action, to open the (main) menu """
def __init__(self):
self["MenuActions"] = HelpableActionMap(self, "InfobarMenuActions",
{
"mainMenu": (self.mainMenu, _("Enter main menu...")),
"showNetworkSetup": (self.showNetworkMounts, _("Show network mounts ...")),
"showSystemSetup": (self.showSystemMenu, _("Show network mounts ...")),
"showRFmod": (self.showRFSetup, _("Show RFmod setup...")),
"toggleAspectRatio": (self.toggleAspectRatio, _("Toggle aspect ratio...")),
})
self.session.infobar = None
def mainMenu(self):
# print "loading mainmenu XML..."
menu = mdom.getroot()
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.infobar = self
# so we can access the currently active infobar from screens opened from within the mainmenu
# at the moment used from the SubserviceSelection
self.session.openWithCallback(self.mainMenuClosed, MainMenu, menu)
def mainMenuClosed(self, *val):
self.session.infobar = None
def toggleAspectRatio(self):
ASPECT = [ "auto", "16_9", "4_3" ]
ASPECT_MSG = { "auto":"Auto", "16_9":"16:9", "4_3":"4:3" }
if config.av.aspect.getValue() in ASPECT:
index = ASPECT.index(config.av.aspect.getValue())
config.av.aspect.value = ASPECT[(index+1)%3]
else:
config.av.aspect.value = "auto"
config.av.aspect.save()
self.session.open(MessageBox, _("AV aspect is %s." % ASPECT_MSG[config.av.aspect.getValue()]), MessageBox.TYPE_INFO, timeout=5)
def showSystemMenu(self):
menulist = mdom.getroot().findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'setup_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'system_selection':
menu = item
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.openWithCallback(self.mainMenuClosed, Menu, menu)
def showNetworkMounts(self):
menulist = mdom.getroot().findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'setup_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'system_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'network_menu':
menu = item
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.openWithCallback(self.mainMenuClosed, Menu, menu)
def showRFSetup(self):
if SystemInfo["RfModulator"]:
self.session.openWithCallback(self.mainMenuClosed, Setup, 'RFmod')
def mainMenuClosed(self, *val):
self.session.infobar = None
class InfoBarSimpleEventView:
""" Opens the Eventview for now/next """
def __init__(self):
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"showEventInfo": (self.openEventView, _("show event details")),
"InfoPressed": (self.openEventView, _("show event details")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def openEventView(self, simple=False):
if self.servicelist is None:
return
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack.append(self.eventView)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
class SimpleServicelist:
def __init__(self, services):
self.services = services
self.length = len(services)
self.current = 0
def selectService(self, service):
if not self.length:
self.current = -1
return False
else:
self.current = 0
while self.services[self.current].ref != service:
self.current += 1
if self.current >= self.length:
return False
return True
def nextService(self):
if not self.length:
return
if self.current+1 < self.length:
self.current += 1
else:
self.current = 0
def prevService(self):
if not self.length:
return
if self.current-1 > -1:
self.current -= 1
else:
self.current = self.length - 1
def currentService(self):
if not self.length or self.current >= self.length:
return None
return self.services[self.current]
class InfoBarEPG:
""" EPG - Opens an EPG list when the showEPGList action fires """
def __init__(self):
self.is_now_next = False
self.dlg_stack = []
self.bouquetSel = None
self.eventView = None
self.isInfo = None
self.epglist = []
self.defaultEPGType = self.getDefaultEPGtype()
self.defaultGuideType = self.getDefaultGuidetype()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.__evEventInfoChanged,
})
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"RedPressed": (self.RedPressed, _("Show epg")),
"IPressed": (self.IPressed, _("show program information...")),
"InfoPressed": (self.InfoPressed, _("show program information...")),
"showEventInfoPlugin": (self.showEventInfoPlugins, _("List EPG functions...")),
"EPGPressed": (self.showDefaultEPG, _("show EPG...")),
"showEventGuidePlugin": (self.showEventGuidePlugins, _("List EPG functions...")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def getEPGPluginList(self):
pluginlist = [(p.name, boundFunction(self.runPlugin, p)) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EVENTINFO)]
if pluginlist:
pluginlist.append((_("Event Info"), self.openEventView))
pluginlist.append((_("Graphical EPG"), self.openGraphEPG))
pluginlist.append((_("Infobar EPG"), self.openInfoBarEPG))
pluginlist.append((_("Multi EPG"), self.openMultiServiceEPG))
pluginlist.append((_("Show EPG for current channel..."), self.openSingleServiceEPG))
return pluginlist
def getDefaultEPGtype(self):
pluginlist = self.getEPGPluginList()
config.usage.defaultEPGType=ConfigSelection(default = "None", choices = pluginlist)
for plugin in pluginlist:
if plugin[0] == config.usage.defaultEPGType.getValue():
return plugin[1]
return None
def showEventInfoPlugins(self):
if isMoviePlayerInfoBar(self):
self.openEventView()
else:
pluginlist = self.getEPGPluginList()
if pluginlist:
pluginlist.append((_("Select default EPG type..."), self.SelectDefaultInfoPlugin))
self.session.openWithCallback(self.EventInfoPluginChosen, ChoiceBox, title=_("Please choose an extension..."), list = pluginlist, skin_name = "EPGExtensionsList")
else:
self.openSingleServiceEPG()
def SelectDefaultInfoPlugin(self):
self.session.openWithCallback(self.DefaultInfoPluginChosen, ChoiceBox, title=_("Please select a default EPG type..."), list = self.getEPGPluginList(), skin_name = "EPGExtensionsList")
def DefaultInfoPluginChosen(self, answer):
if answer is not None:
self.defaultEPGType = answer[1]
config.usage.defaultEPGType.value = answer[0]
config.usage.defaultEPGType.save()
configfile.save()
def getDefaultGuidetype(self):
pluginlist = self.getEPGPluginList()
config.usage.defaultGuideType=ConfigSelection(default = "None", choices = pluginlist)
for plugin in pluginlist:
if plugin[0] == config.usage.defaultGuideType.value:
return plugin[1]
return None
def showEventGuidePlugins(self):
if isMoviePlayerInfoBar(self):
self.openEventView()
else:
pluginlist = self.getEPGPluginList()
if pluginlist:
pluginlist.append((_("Select default EPG type..."), self.SelectDefaultGuidePlugin))
self.session.openWithCallback(self.EventGuidePluginChosen, ChoiceBox, title=_("Please choose an extension..."), list = pluginlist, skin_name = "EPGExtensionsList")
else:
self.openSingleServiceEPG()
def SelectDefaultGuidePlugin(self):
self.session.openWithCallback(self.DefaultGuidePluginChosen, ChoiceBox, title=_("Please select a default EPG type..."), list = self.getEPGPluginList(), skin_name = "EPGExtensionsList")
def DefaultGuidePluginChosen(self, answer):
if answer is not None:
self.defaultGuideType = answer[1]
config.usage.defaultGuideType.value = answer[0]
config.usage.defaultGuideType.save()
def EventGuidePluginChosen(self, answer):
if answer is not None:
answer[1]()
def runPlugin(self, plugin):
plugin(session = self.session, servicelist=self.servicelist)
def EventInfoPluginChosen(self, answer):
if answer is not None:
answer[1]()
def RedPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if config.usage.defaultEPGType.getValue() != _("Graphical EPG") and config.usage.defaultEPGType.getValue() != _("None"):
self.openGraphEPG()
else:
self.openSingleServiceEPG()
def InfoPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if getBoxType().startswith('vu'):
self.showDefaultEPG()
elif config.plisettings.PLIINFO_mode.getValue() == "eventview":
self.openEventView()
elif config.plisettings.PLIINFO_mode.getValue() == "epgpress":
self.showDefaultEPG()
elif config.plisettings.PLIINFO_mode.getValue() == "single":
self.openSingleServiceEPG()
elif config.plisettings.PLIINFO_mode.getValue() == "coolinfoguide" and COOLTVGUIDE:
self.showCoolInfoGuide()
elif config.plisettings.PLIINFO_mode.getValue() == "coolsingleguide" and COOLTVGUIDE:
self.showCoolSingleGuide()
elif config.plisettings.PLIINFO_mode.getValue() == "cooltvguide" and COOLTVGUIDE:
if self.isInfo:
self.showCoolTVGuide()
else:
self.showDefaultEPG()
def IPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
self.openEventView()
def EPGPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if config.plisettings.PLIEPG_mode.getValue() == "pliepg":
self.openGraphEPG()
elif config.plisettings.PLIEPG_mode.getValue() == "multi":
self.openMultiServiceEPG()
elif config.plisettings.PLIEPG_mode.getValue() == "single":
self.openSingleServiceEPG()
elif config.plisettings.PLIEPG_mode.getValue() == "merlinepgcenter":
self.openMerlinEPGCenter()
elif config.plisettings.PLIEPG_mode.getValue() == "cooltvguide" and COOLTVGUIDE:
if self.isInfo:
self.showCoolTVGuide()
elif config.plisettings.PLIEPG_mode.getValue() == "eventview":
self.openEventView()
else:
self.openSingleServiceEPG()
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
def zapToService(self, service, bouquet = None, preview = False, zapback = False):
if self.servicelist.startServiceRef is None:
self.servicelist.startServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.servicelist.currentServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if service is not None:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
if not zapback or preview:
self.servicelist.zap(preview_zap = preview)
if (self.servicelist.dopipzap or zapback) and not preview:
self.servicelist.zapBack()
if not preview:
self.servicelist.startServiceRef = None
self.servicelist.startRoot = None
def getBouquetServices(self, bouquet):
services = []
servicelist = eServiceCenter.getInstance().list(bouquet)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def openBouquetEPG(self, bouquet = None, bouquets = None):
if bouquet:
self.StartBouquet = bouquet
self.dlg_stack.append(self.session.openWithCallback(self.closed, EPGSelection, zapFunc=self.zapToService, EPGtype=self.EPGtype, StartBouquet=self.StartBouquet, StartRef=self.StartRef, bouquets = bouquets))
def closed(self, ret=False):
if not self.dlg_stack:
return
closedScreen = self.dlg_stack.pop()
if self.bouquetSel and closedScreen == self.bouquetSel:
self.bouquetSel = None
elif self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret == True or ret == 'close':
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
self.reopen(ret)
def MultiServiceEPG(self):
bouquets = self.servicelist.getBouquetList()
if bouquets is None:
cnt = 0
else:
cnt = len(bouquets)
if (self.EPGtype == "multi" and config.epgselection.multi_showbouquet.getValue()) or (self.EPGtype == "graph" and config.epgselection.graph_showbouquet.getValue()):
if cnt > 1: # show bouquet list
self.bouquetSel = self.session.openWithCallback(self.closed, EpgBouquetSelector, bouquets, self.openBouquetEPG, enableWrapAround=True)
self.dlg_stack.append(self.bouquetSel)
elif cnt == 1:
self.openBouquetEPG(bouquets=bouquets)
else:
self.openBouquetEPG(bouquets=bouquets)
def openMultiServiceEPG(self):
if self.servicelist is None:
return
self.EPGtype = "multi"
self.StartBouquet = self.servicelist.getRoot()
if isMoviePlayerInfoBar(self):
self.StartRef = self.lastservice
else:
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.MultiServiceEPG()
def openGraphEPG(self, reopen=False):
if self.servicelist is None:
return
self.EPGtype = "graph"
if not reopen:
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.MultiServiceEPG()
def openSingleServiceEPG(self, reopen=False):
if self.servicelist is None:
return
self.EPGtype = "enhanced"
self.SingleServiceEPG()
def openInfoBarEPG(self, reopen=False):
if self.servicelist is None:
return
if not reopen:
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if config.epgselection.infobar_type_mode.getValue() == 'single':
self.EPGtype = "infobar"
self.SingleServiceEPG()
else:
self.EPGtype = "infobargraph"
self.MultiServiceEPG()
def showCoolTVGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool TV Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def SingleServiceEPG(self):
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if isMoviePlayerInfoBar(self):
ref = self.lastservice
else:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
services = self.getBouquetServices(self.StartBouquet)
self.serviceSel = SimpleServicelist(services)
if self.serviceSel.selectService(ref):
self.session.openWithCallback(self.SingleServiceEPGClosed,EPGSelection, self.servicelist, zapFunc=self.zapToService, serviceChangeCB = self.changeServiceCB, EPGtype=self.EPGtype, StartBouquet=self.StartBouquet, StartRef=self.StartRef)
else:
self.session.openWithCallback(self.SingleServiceEPGClosed, EPGSelection, ref)
def changeServiceCB(self, direction, epg):
if self.serviceSel:
if direction > 0:
self.serviceSel.nextService()
else:
self.serviceSel.prevService()
epg.setService(self.serviceSel.currentService())
def SingleServiceEPGClosed(self, ret=False):
self.serviceSel = None
self.reopen(ret)
def reopen(self, answer):
if answer == 'reopengraph':
self.openGraphEPG(True)
elif answer == 'reopeninfobargraph' or answer == 'reopeninfobar':
self.openInfoBarEPG(True)
elif answer == 'close' and isMoviePlayerInfoBar(self):
self.lastservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.close()
def openMerlinEPGCenter(self):
if self.servicelist is None:
return
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/MerlinEPGCenter/plugin.pyo"):
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Merlin EPG Center"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Merlin EPG Center plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolInfoGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Info Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolSingleGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Single Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, eventid=eventid)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def __evEventInfoChanged(self):
self.isInfo = True
if self.is_now_next and len(self.dlg_stack) == 1:
self.getNowNext()
if self.eventView and self.epglist:
self.eventView.setEvent(self.epglist[0])
def showDefaultEPG(self):
if self.defaultEPGType is not None:
self.defaultEPGType()
return
self.EPGPressed()
def openEventView(self, simple=False):
if self.servicelist is None:
return
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack.append(self.eventView)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0]=epglist[1]
epglist[1]=tmp
setEvent(epglist[0])
class InfoBarRdsDecoder:
"""provides RDS and Rass support/display"""
def __init__(self):
self.rds_display = self.session.instantiateDialog(RdsInfoDisplay)
self.session.instantiateSummaryDialog(self.rds_display)
self.rass_interactive = None
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evUpdatedRassSlidePic: self.RassSlidePicChanged
})
self["RdsActions"] = ActionMap(["InfobarRdsActions"],
{
"startRassInteractive": self.startRassInteractive
},-1)
self["RdsActions"].setEnabled(False)
self.onLayoutFinish.append(self.rds_display.show)
self.rds_display.onRassInteractivePossibilityChanged.append(self.RassInteractivePossibilityChanged)
def RassInteractivePossibilityChanged(self, state):
self["RdsActions"].setEnabled(state)
def RassSlidePicChanged(self):
if not self.rass_interactive:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if decoder:
decoder.showRassSlidePicture()
def __serviceStopped(self):
if self.rass_interactive is not None:
rass_interactive = self.rass_interactive
self.rass_interactive = None
rass_interactive.close()
def startRassInteractive(self):
self.rds_display.hide()
self.rass_interactive = self.session.openWithCallback(self.RassInteractiveClosed, RassInteractive)
def RassInteractiveClosed(self, *val):
if self.rass_interactive is not None:
self.rass_interactive = None
self.RassSlidePicChanged()
self.rds_display.show()
class Seekbar(Screen):
def __init__(self, session, fwd):
Screen.__init__(self, session)
self.setTitle(_("Seek"))
self.session = session
self.fwd = fwd
self.percent = 0.0
self.length = None
service = session.nav.getCurrentService()
if service:
self.seek = service.seek()
if self.seek:
self.length = self.seek.getLength()
position = self.seek.getPlayPosition()
if self.length and position and int(self.length[1]) > 0:
if int(position[1]) > 0:
self.percent = float(position[1]) * 100.0 / float(self.length[1])
else:
self.close()
self["cursor"] = MovingPixmap()
self["time"] = Label()
self["actions"] = ActionMap(["WizardActions", "DirectionActions"], {"back": self.exit, "ok": self.keyOK, "left": self.keyLeft, "right": self.keyRight}, -1)
self.cursorTimer = eTimer()
self.cursorTimer.callback.append(self.updateCursor)
self.cursorTimer.start(200, False)
def updateCursor(self):
if self.length:
x = 145 + int(2.7 * self.percent)
self["cursor"].moveTo(x, 15, 1)
self["cursor"].startMoving()
pts = int(float(self.length[1]) / 100.0 * self.percent)
self["time"].setText("%d:%02d" % ((pts/60/90000), ((pts/90000)%60)))
def exit(self):
self.cursorTimer.stop()
self.close()
def keyOK(self):
if self.length:
self.seek.seekTo(int(float(self.length[1]) / 100.0 * self.percent))
self.exit()
def keyLeft(self):
self.percent -= float(config.seek.sensibility.getValue()) / 10.0
if self.percent < 0.0:
self.percent = 0.0
def keyRight(self):
self.percent += float(config.seek.sensibility.getValue()) / 10.0
if self.percent > 100.0:
self.percent = 100.0
def keyNumberGlobal(self, number):
sel = self["config"].getCurrent()[1]
if sel == self.positionEntry:
self.percent = float(number) * 10.0
else:
ConfigListScreen.keyNumberGlobal(self, number)
class InfoBarSeek:
"""handles actions like seeking, pause"""
SEEK_STATE_PLAY = (0, 0, 0, ">")
SEEK_STATE_PAUSE = (1, 0, 0, "||")
SEEK_STATE_EOF = (1, 0, 0, "END")
def __init__(self, actionmap = "InfobarSeekActions"):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evEOF: self.__evEOF,
iPlayableService.evSOF: self.__evSOF,
})
self.fast_winding_hint_message_showed = False
class InfoBarSeekActionMap(HelpableActionMap):
def __init__(self, screen, *args, **kwargs):
HelpableActionMap.__init__(self, screen, *args, **kwargs)
self.screen = screen
def action(self, contexts, action):
# print "action:", action
if action[:5] == "seek:":
time = int(action[5:])
self.screen.doSeekRelative(time * 90000)
return 1
elif action[:8] == "seekdef:":
key = int(action[8:])
time = (-config.seek.selfdefined_13.getValue(), False, config.seek.selfdefined_13.getValue(),
-config.seek.selfdefined_46.getValue(), False, config.seek.selfdefined_46.getValue(),
-config.seek.selfdefined_79.getValue(), False, config.seek.selfdefined_79.getValue())[key-1]
self.screen.doSeekRelative(time * 90000)
return 1
else:
return HelpableActionMap.action(self, contexts, action)
self["SeekActions"] = InfoBarSeekActionMap(self, actionmap,
{
"playpauseService": self.playpauseService,
"pauseService": (self.pauseService, _("Pause playback")),
"pauseServiceYellow": (self.pauseServiceYellow, _("Pause playback")),
"unPauseService": (self.unPauseService, _("Continue playback")),
"seekFwd": (self.seekFwd, _("Seek forward")),
"seekFwdManual": (self.seekFwdManual, _("Seek forward (enter time)")),
"seekBack": (self.seekBack, _("Seek backward")),
"seekBackManual": (self.seekBackManual, _("Seek backward (enter time)")),
"SeekbarFwd": self.seekFwdSeekbar,
"SeekbarBack": self.seekBackSeekbar
}, prio=-1) # give them a little more priority to win over color buttons
self["SeekActions"].setEnabled(False)
self["SeekActionsPTS"] = InfoBarSeekActionMap(self, "InfobarSeekActionsPTS",
{
"playpauseService": self.playpauseService,
"pauseService": (self.pauseService, _("Pause playback")),
"pauseServiceYellow": (self.pauseServiceYellow, _("Pause playback")),
"unPauseService": (self.unPauseService, _("Continue playback")),
"seekFwd": (self.seekFwd, _("skip forward")),
"seekFwdManual": (self.seekFwdManual, _("skip forward (enter time)")),
"seekBack": (self.seekBack, _("skip backward")),
"seekBackManual": (self.seekBackManual, _("skip backward (enter time)")),
}, prio=-1) # give them a little more priority to win over color buttons
self["SeekActionsPTS"].setEnabled(False)
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.doActivityTimer)
self.seekstate = self.SEEK_STATE_PLAY
self.lastseekstate = self.SEEK_STATE_PLAY
self.onPlayStateChanged = [ ]
self.lockedBecauseOfSkipping = False
self.__seekableStatusChanged()
def makeStateForward(self, n):
return 0, n, 0, ">> %dx" % n
def makeStateBackward(self, n):
return 0, -n, 0, "<< %dx" % n
def makeStateSlowMotion(self, n):
return 0, 0, n, "/%d" % n
def isStateForward(self, state):
return state[1] > 1
def isStateBackward(self, state):
return state[1] < 0
def isStateSlowMotion(self, state):
return state[1] == 0 and state[2] > 1
def getHigher(self, n, lst):
for x in lst:
if x > n:
return x
return False
def getLower(self, n, lst):
lst = lst[:]
lst.reverse()
for x in lst:
if x < n:
return x
return False
def showAfterSeek(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def up(self):
pass
def down(self):
pass
def getSeek(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
seek = service.seek()
if seek is None or not seek.isCurrentlySeekable():
return None
return seek
def isSeekable(self):
if self.getSeek() is None or (isStandardInfoBar(self) and not self.timeshiftEnabled()):
return False
return True
def __seekableStatusChanged(self):
if isStandardInfoBar(self) and self.timeshiftEnabled():
pass
elif not self.isSeekable():
SystemInfo["SeekStatePlay"] = False
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
f = open("/proc/stb/lcd/symbol_hdd", "w")
f.write("0")
f.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
f = open("/proc/stb/lcd/symbol_hddprogress", "w")
f.write("0")
f.close()
# print "not seekable, return to play"
self["SeekActions"].setEnabled(False)
self.setSeekState(self.SEEK_STATE_PLAY)
else:
# print "seekable"
self["SeekActions"].setEnabled(True)
self.activityTimer.start(200, False)
for c in self.onPlayStateChanged:
c(self.seekstate)
def doActivityTimer(self):
if self.isSeekable():
self.activity += 16
hdd = 1
if self.activity >= 100:
self.activity = 0
if SystemInfo["FrontpanelDisplay"] and SystemInfo["Display"]:
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
if config.lcd.hdd.getValue() == "1":
file = open("/proc/stb/lcd/symbol_hdd", "w")
file.write('%d' % int(hdd))
file.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
if config.lcd.hdd.getValue() == "1":
file = open("/proc/stb/lcd/symbol_hddprogress", "w")
file.write('%d' % int(self.activity))
file.close()
else:
self.activityTimer.stop()
self.activity = 0
hdd = 0
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
file = open("/proc/stb/lcd/symbol_hdd", "w")
file.write('%d' % int(hdd))
file.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
file = open("/proc/stb/lcd/symbol_hddprogress", "w")
file.write('%d' % int(self.activity))
file.close()
def __serviceStarted(self):
self.fast_winding_hint_message_showed = False
self.setSeekState(self.SEEK_STATE_PLAY)
self.__seekableStatusChanged()
def setSeekState(self, state):
service = self.session.nav.getCurrentService()
if service is None:
return False
if not self.isSeekable():
if state not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE):
state = self.SEEK_STATE_PLAY
pauseable = service.pause()
if pauseable is None:
# print "not pauseable."
state = self.SEEK_STATE_PLAY
self.seekstate = state
if pauseable is not None:
if self.seekstate[0] and self.seekstate[3] == '||':
# print "resolved to PAUSE"
self.activityTimer.stop()
pauseable.pause()
elif self.seekstate[0] and self.seekstate[3] == 'END':
# print "resolved to STOP"
self.activityTimer.stop()
service.stop()
elif self.seekstate[1]:
if not pauseable.setFastForward(self.seekstate[1]):
pass
# print "resolved to FAST FORWARD"
else:
self.seekstate = self.SEEK_STATE_PLAY
# print "FAST FORWARD not possible: resolved to PLAY"
elif self.seekstate[2]:
if not pauseable.setSlowMotion(self.seekstate[2]):
pass
# print "resolved to SLOW MOTION"
else:
self.seekstate = self.SEEK_STATE_PAUSE
# print "SLOW MOTION not possible: resolved to PAUSE"
else:
# print "resolved to PLAY"
self.activityTimer.start(200, False)
pauseable.unpause()
for c in self.onPlayStateChanged:
c(self.seekstate)
self.checkSkipShowHideLock()
if hasattr(self, "ScreenSaverTimerStart"):
self.ScreenSaverTimerStart()
return True
def playpauseService(self):
if self.seekstate == self.SEEK_STATE_PLAY:
self.pauseService()
else:
if self.seekstate == self.SEEK_STATE_PAUSE:
if config.seek.on_pause.getValue() == "play":
self.unPauseService()
elif config.seek.on_pause.getValue() == "step":
self.doSeekRelative(1)
elif config.seek.on_pause.getValue() == "last":
self.setSeekState(self.lastseekstate)
self.lastseekstate = self.SEEK_STATE_PLAY
else:
self.unPauseService()
def pauseService(self):
if self.seekstate != self.SEEK_STATE_EOF:
self.lastseekstate = self.seekstate
self.setSeekState(self.SEEK_STATE_PAUSE)
def pauseServiceYellow(self):
if config.plugins.infopanel_yellowkey.list.getValue() == '0':
self.audioSelection()
elif config.plugins.infopanel_yellowkey.list.getValue() == '2':
ToggleVideo()
else:
if self.seekstate != self.SEEK_STATE_EOF:
self.lastseekstate = self.seekstate
self.setSeekState(self.SEEK_STATE_PAUSE)
def unPauseService(self):
if self.seekstate == self.SEEK_STATE_PLAY:
return 0
self.setSeekState(self.SEEK_STATE_PLAY)
def doSeek(self, pts):
seekable = self.getSeek()
if seekable is None:
return
seekable.seekTo(pts)
def doSeekRelative(self, pts):
seekable = self.getSeek()
if seekable is None and int(self.seek.getLength()[1]) < 1:
return
prevstate = self.seekstate
if self.seekstate == self.SEEK_STATE_EOF:
if prevstate == self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_PAUSE)
else:
self.setSeekState(self.SEEK_STATE_PLAY)
seekable.seekRelative(pts<0 and -1 or 1, abs(pts))
if abs(pts) > 100 and config.usage.show_infobar_on_skip.getValue():
self.showAfterSeek()
def seekFwd(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0 # trade as unhandled action
if self.seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.getValue())))
elif self.seekstate == self.SEEK_STATE_PAUSE:
if len(config.seek.speeds_slowmotion.getValue()):
self.setSeekState(self.makeStateSlowMotion(config.seek.speeds_slowmotion.getValue()[-1]))
else:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.getValue())))
elif self.seekstate == self.SEEK_STATE_EOF:
pass
elif self.isStateForward(self.seekstate):
speed = self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_forward.getValue()) or config.seek.speeds_forward.getValue()[-1]
self.setSeekState(self.makeStateForward(speed))
elif self.isStateBackward(self.seekstate):
speed = -self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getLower(speed, config.seek.speeds_backward.getValue())
if speed:
self.setSeekState(self.makeStateBackward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateSlowMotion(self.seekstate):
speed = self.getLower(self.seekstate[2], config.seek.speeds_slowmotion.getValue()) or config.seek.speeds_slowmotion.getValue()[0]
self.setSeekState(self.makeStateSlowMotion(speed))
def seekBack(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0 # trade as unhandled action
seekstate = self.seekstate
if seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.getValue())))
elif seekstate == self.SEEK_STATE_EOF:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.getValue())))
self.doSeekRelative(-6)
elif seekstate == self.SEEK_STATE_PAUSE:
self.doSeekRelative(-1)
elif self.isStateForward(seekstate):
speed = seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getLower(speed, config.seek.speeds_forward.getValue())
if speed:
self.setSeekState(self.makeStateForward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateBackward(seekstate):
speed = -seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_backward.getValue()) or config.seek.speeds_backward.getValue()[-1]
self.setSeekState(self.makeStateBackward(speed))
elif self.isStateSlowMotion(seekstate):
speed = self.getHigher(seekstate[2], config.seek.speeds_slowmotion.getValue())
if speed:
self.setSeekState(self.makeStateSlowMotion(speed))
else:
self.setSeekState(self.SEEK_STATE_PAUSE)
self.pts_lastseekspeed = self.seekstate[1]
def seekFwdManual(self, fwd=True):
if config.seek.baractivation.getValue() == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def seekBackManual(self, fwd=False):
if config.seek.baractivation.getValue() == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def seekFwdSeekbar(self, fwd=True):
if not config.seek.baractivation.getValue() == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def fwdSeekTo(self, minutes):
self.doSeekRelative(minutes * 60 * 90000)
def seekBackSeekbar(self, fwd=False):
if not config.seek.baractivation.getValue() == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def rwdSeekTo(self, minutes):
# print "rwdSeekTo"
self.doSeekRelative(-minutes * 60 * 90000)
def checkSkipShowHideLock(self):
if self.seekstate == self.SEEK_STATE_PLAY or self.seekstate == self.SEEK_STATE_EOF:
self.lockedBecauseOfSkipping = False
self.unlockShow()
else:
wantlock = self.seekstate != self.SEEK_STATE_PLAY
if config.usage.show_infobar_on_skip.getValue():
if self.lockedBecauseOfSkipping and not wantlock:
self.unlockShow()
self.lockedBecauseOfSkipping = False
if wantlock and not self.lockedBecauseOfSkipping:
self.lockShow()
self.lockedBecauseOfSkipping = True
def calcRemainingTime(self):
seekable = self.getSeek()
if seekable is not None:
len = seekable.getLength()
try:
tmp = self.cueGetEndCutPosition()
if tmp:
len = (False, tmp)
except:
pass
pos = seekable.getPlayPosition()
speednom = self.seekstate[1] or 1
speedden = self.seekstate[2] or 1
if not len[0] and not pos[0]:
if len[1] <= pos[1]:
return 0
time = (len[1] - pos[1])*speedden/(90*speednom)
return time
return False
def __evEOF(self):
if self.seekstate == self.SEEK_STATE_EOF:
return
# if we are seeking forward, we try to end up ~1s before the end, and pause there.
seekstate = self.seekstate
if self.seekstate != self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_EOF)
if seekstate not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE): # if we are seeking
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-1)
self.doEofInternal(True)
if seekstate == self.SEEK_STATE_PLAY: # regular EOF
self.doEofInternal(True)
else:
self.doEofInternal(False)
def doEofInternal(self, playing):
pass # Defined in subclasses
def __evSOF(self):
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
class InfoBarPVRState:
def __init__(self, screen=PVRState, force_show = False):
self.onChangedEntry = [ ]
self.onPlayStateChanged.append(self.__playStateChanged)
self.pvrStateDialog = self.session.instantiateDialog(screen)
self.onShow.append(self._mayShow)
self.onHide.append(self.pvrStateDialog.hide)
self.force_show = force_show
def createSummary(self):
return InfoBarMoviePlayerSummary
def _mayShow(self):
if self.has_key("state") and not config.usage.movieplayer_pvrstate.getValue():
self["state"].setText("")
self["statusicon"].setPixmapNum(6)
self["speed"].setText("")
if self.shown and self.seekstate != self.SEEK_STATE_EOF and not config.usage.movieplayer_pvrstate.getValue():
self.pvrStateDialog.show()
self.startHideTimer()
def __playStateChanged(self, state):
playstateString = state[3]
state_summary = playstateString
self.pvrStateDialog["state"].setText(playstateString)
if playstateString == '>':
self.pvrStateDialog["statusicon"].setPixmapNum(0)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 0
if self.has_key("state") and config.usage.movieplayer_pvrstate.getValue():
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(0)
self["speed"].setText("")
elif playstateString == '||':
self.pvrStateDialog["statusicon"].setPixmapNum(1)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 1
if self.has_key("state") and config.usage.movieplayer_pvrstate.getValue():
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(1)
self["speed"].setText("")
elif playstateString == 'END':
self.pvrStateDialog["statusicon"].setPixmapNum(2)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 2
if self.has_key("state") and config.usage.movieplayer_pvrstate.getValue():
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(2)
self["speed"].setText("")
elif playstateString.startswith('>>'):
speed = state[3].split()
self.pvrStateDialog["statusicon"].setPixmapNum(3)
self.pvrStateDialog["speed"].setText(speed[1])
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 3
if self.has_key("state") and config.usage.movieplayer_pvrstate.getValue():
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(3)
self["speed"].setText(speed[1])
elif playstateString.startswith('<<'):
speed = state[3].split()
self.pvrStateDialog["statusicon"].setPixmapNum(4)
self.pvrStateDialog["speed"].setText(speed[1])
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 4
if self.has_key("state") and config.usage.movieplayer_pvrstate.getValue():
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(4)
self["speed"].setText(speed[1])
elif playstateString.startswith('/'):
self.pvrStateDialog["statusicon"].setPixmapNum(5)
self.pvrStateDialog["speed"].setText(playstateString)
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 5
if self.has_key("state") and config.usage.movieplayer_pvrstate.getValue():
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(5)
self["speed"].setText(playstateString)
for cb in self.onChangedEntry:
cb(state_summary, speed_summary, statusicon_summary)
# if we return into "PLAY" state, ensure that the dialog gets hidden if there will be no infobar displayed
if not config.usage.show_infobar_on_skip.getValue() and self.seekstate == self.SEEK_STATE_PLAY and not self.force_show:
self.pvrStateDialog.hide()
else:
self._mayShow()
class InfoBarTimeshiftState(InfoBarPVRState):
def __init__(self):
InfoBarPVRState.__init__(self, screen=TimeshiftState, force_show = True)
self.onPlayStateChanged.append(self.__timeshiftEventName)
self.onHide.append(self.__hideTimeshiftState)
def _mayShow(self):
if self.shown and self.timeshiftEnabled() and self.isSeekable():
InfoBarTimeshift.ptsSeekPointerSetCurrentPos(self)
if config.timeshift.showinfobar.getValue():
self["TimeshiftSeekPointerActions"].setEnabled(True)
self.pvrStateDialog.show()
self.startHideTimer()
def __hideTimeshiftState(self):
self["TimeshiftSeekPointerActions"].setEnabled(False)
self.pvrStateDialog.hide()
def __timeshiftEventName(self,state):
if os.path.exists("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.getValue(),self.pts_currplaying)):
readmetafile = open("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.getValue(),self.pts_currplaying), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
self.pvrStateDialog["eventname"].setText(eventname)
else:
self.pvrStateDialog["eventname"].setText("")
class InfoBarShowMovies:
# i don't really like this class.
# it calls a not further specified "movie list" on up/down/movieList,
# so this is not more than an action map
def __init__(self):
self["MovieListActions"] = HelpableActionMap(self, "InfobarMovieListActions",
{
"movieList": (self.showMovies, _("Open the movie list")),
"up": (self.up, _("Open the movie list")),
"down": (self.down, _("Open the movie list"))
})
from Screens.PiPSetup import PiPSetup
class InfoBarExtensions:
EXTENSION_SINGLE = 0
EXTENSION_LIST = 1
def __init__(self):
self.list = []
if config.plisettings.ColouredButtons.getValue():
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"extensions": (self.bluekey_ex, _("Show extensions...")),
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugin browser..")),
"showEventInfo": (self.SelectopenEventView, _("Show the infomation on current event.")),
"openTimerList": (self.showTimerList, _("Show the list of timers.")),
"openAutoTimerList": (self.showAutoTimerList, _("Show the list of AutoTimers.")),
"openEPGSearch": (self.showEPGSearch, _("Search the epg for current event.")),
"openIMDB": (self.showIMDB, _("Search IMDb for information about current event.")),
"showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")),
}, 1) # lower priority
else:
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"extensions": (self.bluekey_ex, _("view extensions...")),
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugin browser..")),
"showEventInfo": (self.SelectopenEventView, _("Show the infomation on current event.")),
"showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")),
}, 1) # lower priority
self.addExtension(extension = self.getLogManager, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getOsd3DSetup, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getCCcamInfo, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getOScamInfo, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getSoftcamPanel, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getRestartNetwork, type = InfoBarExtensions.EXTENSION_LIST)
def bluekey_ex(self):
if config.workaround.blueswitch.getValue() == "1":
self.quickmenuStart()
else:
self.showExtensionSelection()
def quickmenuStart(self):
try:
if not self.session.pipshown:
from Plugins.Extensions.Infopanel.QuickMenu import QuickMenu
self.session.open(QuickMenu)
else:
self.showExtensionSelection()
except:
print "[INFOBARGENERICS] QuickMenu: error pipshow, starting Quick Menu"
from Plugins.Extensions.Infopanel.QuickMenu import QuickMenu
self.session.open(QuickMenu)
def SelectopenEventView(self):
try:
self.openEventView()
except:
pass
def getLMname(self):
return _("Log Manager")
def getLogManager(self):
if config.logmanager.showinextensions.getValue():
return [((boundFunction(self.getLMname), boundFunction(self.openLogManager), lambda: True), None)]
else:
return []
def getSoftcamPanelname(self):
return _("Softcam-Panel")
def getSoftcamPanel(self):
if config.plugins.showinfopanelextensions.getValue():
return [((boundFunction(self.getSoftcamPanelname), boundFunction(self.openSoftcamPanel), lambda: True), None)]
else:
return []
def getRestartNetworkname(self):
return _("Restart Network")
def getRestartNetwork(self):
return [((boundFunction(self.getRestartNetworkname), boundFunction(self.openRestartNetwork), lambda: True), None)]
def get3DSetupname(self):
return _("OSD 3D Setup")
def getOsd3DSetup(self):
if config.osd.show3dextensions .getValue():
return [((boundFunction(self.get3DSetupname), boundFunction(self.open3DSetup), lambda: True), None)]
else:
return []
def getCCname(self):
return _("CCcam Info")
def getCCcamInfo(self):
if pathExists('/usr/bin/'):
softcams = os.listdir('/usr/bin/')
for softcam in softcams:
if softcam.lower().startswith('cccam') and config.cccaminfo.showInExtensions.getValue():
return [((boundFunction(self.getCCname), boundFunction(self.openCCcamInfo), lambda: True), None)] or []
else:
return []
def getOSname(self):
return _("OScam Info")
def getOScamInfo(self):
if pathExists('/usr/bin/'):
softcams = os.listdir('/usr/bin/')
for softcam in softcams:
if softcam.lower().startswith('oscam') and config.oscaminfo.showInExtensions.getValue():
return [((boundFunction(self.getOSname), boundFunction(self.openOScamInfo), lambda: True), None)] or []
else:
return []
def addExtension(self, extension, key = None, type = EXTENSION_SINGLE):
self.list.append((type, extension, key))
if config.usage.sort_extensionslist.getValue():
self.list.sort()
def updateExtension(self, extension, key = None):
self.extensionsList.append(extension)
if key is not None:
if self.extensionKeys.has_key(key):
key = None
if key is None:
for x in self.availableKeys:
if not self.extensionKeys.has_key(x):
key = x
break
if key is not None:
self.extensionKeys[key] = len(self.extensionsList) - 1
def updateExtensions(self):
self.extensionsList = []
self.availableKeys = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "red", "green", "yellow", "blue" ]
self.extensionKeys = {}
for x in self.list:
if x[0] == self.EXTENSION_SINGLE:
self.updateExtension(x[1], x[2])
else:
for y in x[1]():
self.updateExtension(y[0], y[1])
def showExtensionSelection(self):
self.updateExtensions()
extensionsList = self.extensionsList[:]
keys = []
list = []
colorlist = []
for x in self.availableKeys:
if self.extensionKeys.has_key(x):
entry = self.extensionKeys[x]
extension = self.extensionsList[entry]
if extension[2]():
name = str(extension[0]())
if self.availableKeys.index(x) < 10:
list.append((extension[0](), extension))
else:
colorlist.append((extension[0](), extension))
keys.append(x)
extensionsList.remove(extension)
else:
extensionsList.remove(extension)
if config.usage.sort_extensionslist.getValue():
list.sort()
for x in colorlist:
list.append(x)
list.extend([(x[0](), x) for x in extensionsList])
keys += [""] * len(extensionsList)
self.session.openWithCallback(self.extensionCallback, ChoiceBox, title=_("Please choose an extension..."), list = list, keys = keys, skin_name = "ExtensionsList")
def extensionCallback(self, answer):
if answer is not None:
answer[1][1]()
def showPluginBrowser(self):
from Screens.PluginBrowser import PluginBrowser
self.session.open(PluginBrowser)
def openCCcamInfo(self):
from Screens.CCcamInfo import CCcamInfoMain
self.session.open(CCcamInfoMain)
def openOScamInfo(self):
from Screens.OScamInfo import OscamInfoMenu
self.session.open(OscamInfoMenu)
def showTimerList(self):
self.session.open(TimerEditList)
def openLogManager(self):
from Screens.LogManager import LogManager
self.session.open(LogManager)
def open3DSetup(self):
from Screens.UserInterfacePositioner import OSD3DSetupScreen
self.session.open(OSD3DSetupScreen)
def openSoftcamPanel(self):
from Plugins.Extensions.Infopanel.SoftcamPanel import SoftcamPanel
self.session.open(SoftcamPanel)
def openRestartNetwork(self):
try:
from Plugins.Extensions.Infopanel.RestartNetwork import RestartNetwork
self.session.open(RestartNetwork)
except:
print'[INFOBARGENERICS] failed to restart network'
def showAutoTimerList(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/AutoTimer/plugin.pyo"):
from Plugins.Extensions.AutoTimer.plugin import main, autostart
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autotimer = AutoTimer()
try:
self.autotimer.readXml()
except SyntaxError as se:
self.session.open(
MessageBox,
_("Your config file is not well-formed:\n%s") % (str(se)),
type = MessageBox.TYPE_ERROR,
timeout = 10
)
return
# Do not run in background while editing, this might screw things up
if self.autopoller is not None:
self.autopoller.stop()
from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview
self.session.openWithCallback(
self.editCallback,
AutoTimerOverview,
self.autotimer
)
else:
self.session.open(MessageBox, _("The AutoTimer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def editCallback(self, session):
# XXX: canceling of GUI (Overview) won't affect config values which might have been changed - is this intended?
# Don't parse EPG if editing was canceled
if session is not None:
# Save xml
self.autotimer.writeXml()
# Poll EPGCache
self.autotimer.parseEPG()
# Start autopoller again if wanted
if config.plugins.autotimer.autopoll.getValue():
if self.autopoller is None:
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autopoller.start()
# Remove instance if not running in background
else:
self.autopoller = None
self.autotimer = None
def showEPGSearch(self):
from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
if event:
name = event and event.getEventName() or ''
else:
name = self.session.nav.getCurrentlyPlayingServiceOrGroup().toString()
name = name.split('/')
name = name[-1]
name = name.replace('.',' ')
name = name.split('-')
name = name[0]
if name.endswith(' '):
name = name[:-1]
if name:
self.session.open(EPGSearch, name, False)
else:
self.session.open(EPGSearch)
else:
self.session.open(EPGSearch)
def showIMDB(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/IMDb/plugin.pyo"):
from Plugins.Extensions.IMDb.plugin import IMDB
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
name = event and event.getEventName() or ''
self.session.open(IMDB, name)
else:
self.session.open(MessageBox, _("The IMDb plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showMediaPlayer(self):
if isinstance(self, InfoBarExtensions):
if isinstance(self, InfoBar):
try: # falls es nicht installiert ist
from Plugins.Extensions.MediaPlayer.plugin import MediaPlayer
self.session.open(MediaPlayer)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaPlayer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
from Tools.BoundFunction import boundFunction
import inspect
# depends on InfoBarExtensions
class InfoBarPlugins:
def __init__(self):
self.addExtension(extension = self.getPluginList, type = InfoBarExtensions.EXTENSION_LIST)
def getPluginName(self, name):
return name
def getPluginList(self):
l = []
for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EXTENSIONSMENU):
args = inspect.getargspec(p.__call__)[0]
if len(args) == 1 or len(args) == 2 and isinstance(self, InfoBarChannelSelection):
l.append(((boundFunction(self.getPluginName, p.name), boundFunction(self.runPlugin, p), lambda: True), None, p.name))
l.sort(key = lambda e: e[2]) # sort by name
return l
def runPlugin(self, plugin):
if isinstance(self, InfoBarChannelSelection):
plugin(session = self.session, servicelist = self.servicelist)
else:
plugin(session = self.session)
from Components.Task import job_manager
class InfoBarJobman:
def __init__(self):
self.addExtension(extension = self.getJobList, type = InfoBarExtensions.EXTENSION_LIST)
def getJobList(self):
if config.usage.jobtaksextensions.getValue():
return [((boundFunction(self.getJobName, job), boundFunction(self.showJobView, job), lambda: True), None) for job in job_manager.getPendingJobs()]
else:
return []
def getJobName(self, job):
return "%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100*job.progress/float(job.end)))
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
# depends on InfoBarExtensions
class InfoBarPiP:
def __init__(self):
try:
self.session.pipshown
except:
self.session.pipshown = False
if SystemInfo.get("NumVideoDecoders", 1) > 1 and isinstance(self, InfoBarEPG):
self["PiPActions"] = HelpableActionMap(self, "InfobarPiPActions",
{
"activatePiP": (self.showPiP, _("Activate PiP")),
})
if self.allowPiP:
self.addExtension((self.getShowHideName, self.showPiP, lambda: True), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.addExtension((self.getSwapName, self.swapPiP, self.pipShown), "yellow")
self.addExtension((self.getTogglePipzapName, self.togglePipzap, self.pipShown), "red")
else:
self.addExtension((self.getShowHideName, self.showPiP, self.pipShown), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
def pipShown(self):
return self.session.pipshown
def pipHandles0Action(self):
return self.pipShown() and config.usage.pip_zero_button.getValue() != "standard"
def getShowHideName(self):
if self.session.pipshown:
return _("Disable Picture in Picture")
else:
return _("Activate Picture in Picture")
def getSwapName(self):
return _("Swap services")
def getMoveName(self):
return _("Move Picture in Picture")
def getTogglePipzapName(self):
slist = self.servicelist
if slist and slist.dopipzap:
return _("Zap focus to main screen")
return _("Zap focus to Picture in Picture")
def togglePipzap(self):
if not self.session.pipshown:
self.showPiP()
slist = self.servicelist
if slist and self.session.pipshown:
slist.togglePipzap()
if slist.dopipzap:
currentServicePath = self.servicelist.getCurrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.servicePath = currentServicePath
def showPiP(self):
if self.session.pipshown:
slist = self.servicelist
if slist and slist.dopipzap:
self.togglePipzap()
if self.session.pipshown:
del self.session.pip
if SystemInfo["LCDMiniTV"]:
if config.lcd.modepip.value >= "1":
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.modeminitv.value)
f.close()
self.session.pipshown = False
else:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.show()
newservice = self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
if SystemInfo["LCDMiniTV"]:
if config.lcd.modepip.value >= "1":
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.modepip.value)
f.close()
f = open("/proc/stb/vmpeg/1/dst_width", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_height", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_apply", "w")
f.write("1")
f.close()
else:
self.session.pipshown = False
del self.session.pip
def swapPiP(self):
swapservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
pipref = self.session.pip.getCurrentService()
if swapservice and pipref and pipref.toString() != swapservice.toString():
currentServicePath = self.servicelist.getCurrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.playService(swapservice)
self.session.nav.stopService() # stop portal
self.session.nav.playService(pipref, checkParentalControl=False, adjust=False)
self.session.pip.servicePath = currentServicePath
if self.servicelist.dopipzap:
# This unfortunately won't work with subservices
self.servicelist.setCurrentSelection(self.session.pip.getCurrentService())
def movePiP(self):
self.session.open(PiPSetup, pip = self.session.pip)
def pipDoHandle0Action(self):
use = config.usage.pip_zero_button.getValue()
if "swap" == use:
self.swapPiP()
elif "swapstop" == use:
self.swapPiP()
self.showPiP()
elif "stop" == use:
self.showPiP()
class InfoBarINFOpanel:
"""INFO-Panel - handles the infoPanel action"""
def __init__(self):
self["INFOpanelActions"] = HelpableActionMap(self, "InfoBarINFOpanel",
{
"infoPanel": (self.selectRedKeytask, _("INFO-Panel...")),
"softcamPanel": (self.softcamPanel, _("Softcam-Panel...")),
})
self.onHBBTVActivation = [ ]
self.onRedButtonActivation = [ ]
def selectRedKeytask(self):
isWEBBROWSER = None
isHBBTV = None
if os.path.isfile("/usr/lib/enigma2/python/Plugins/Extensions/WebBrowser/browser.pyo"):
isWEBBROWSER = True
if os.path.isfile("/usr/lib/enigma2/python/Plugins/Extensions/HbbTV/plugin.pyo"):
isHBBTV = True
if os.path.isfile("/usr/lib/enigma2/python/Plugins/Extensions/E3Opera/plugin.pyo"):
isHBBTV = True
if isWEBBROWSER or isHBBTV:
service = self.session.nav.getCurrentService()
info = service and service.info()
if info and info.getInfoString(iServiceInformation.sHBBTVUrl) != "":
for x in self.onHBBTVActivation:
x()
elif config.plugins.infopanel_redpanel.enabled.getValue() == True:
try:
from Plugins.Extensions.Infopanel.plugin import Infopanel
self.session.open(Infopanel, services = self.servicelist)
except:
pass
else:
self.instantRecord()
elif config.plugins.infopanel_redpanel.enabled.getValue() == True:
try:
from Plugins.Extensions.Infopanel.plugin import Infopanel
self.session.open(Infopanel, services = self.servicelist)
except:
pass
else:
self.instantRecord()
def softcamPanel(self):
if config.plugins.infopanel_redpanel.enabledlong.getValue() == True:
try:
from Plugins.Extensions.Infopanel.SoftcamPanel import SoftcamPanel
self.session.open(SoftcamPanel)
except:
pass
else:
pass
class InfoBarQuickMenu:
def __init__(self):
self["QuickMenuActions"] = HelpableActionMap(self, "InfoBarQuickMenu",
{
"quickmenu": (self.bluekey_qm, _("Quick Menu...")),
})
def bluekey_qm(self):
if config.workaround.blueswitch.getValue() == "1":
self.showExtensionSelection()
else:
self.quickmenuStart()
def quickmenuStart(self):
try:
if not self.session.pipshown:
from Plugins.Extensions.Infopanel.QuickMenu import QuickMenu
self.session.open(QuickMenu)
else:
self.showExtensionSelection()
except:
print "[INFOBARGENERICS] QuickMenu: error pipshow, starting Quick Menu"
from Plugins.Extensions.Infopanel.QuickMenu import QuickMenu
self.session.open(QuickMenu)
class InfoBarInstantRecord:
"""Instant Record - handles the instantRecord action in order to
start/stop instant records"""
def __init__(self):
self["InstantRecordActions"] = HelpableActionMap(self, "InfobarInstantRecord",
{
"instantRecord": (self.instantRecord, _("Instant recording...")),
})
if isStandardInfoBar(self):
self.recording = []
else:
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance:
self.recording = InfoBarInstance.recording
def stopCurrentRecording(self, entry = -1):
if entry is not None and entry != -1:
self.session.nav.RecordTimer.removeEntry(self.recording[entry])
self.recording.remove(self.recording[entry])
def getProgramInfoAndEvent(self, info, name):
info["serviceref"] = self.session.nav.getCurrentlyPlayingServiceOrGroup()
# try to get event info
event = None
try:
service = self.session.nav.getCurrentService()
epg = eEPGCache.getInstance()
event = epg.lookupEventTime(info["serviceref"], -1, 0)
if event is None:
event = service.info().getEvent(0)
except:
pass
info["event"] = event
info["name"] = name
info["description"] = ""
info["eventid"] = None
if event is not None:
curEvent = parseEvent(event)
info["name"] = curEvent[2]
info["description"] = curEvent[3]
info["eventid"] = curEvent[4]
info["end"] = curEvent[1]
def startInstantRecording(self, limitEvent = False):
begin = int(time())
end = begin + 3600 # dummy
name = "instant record"
info = { }
self.getProgramInfoAndEvent(info, name)
serviceref = info["serviceref"]
event = info["event"]
if event is not None:
if limitEvent:
end = info["end"]
else:
if limitEvent:
self.session.open(MessageBox, _("No event info found, recording indefinitely."), MessageBox.TYPE_INFO)
if isinstance(serviceref, eServiceReference):
serviceref = ServiceReference(serviceref)
recording = RecordTimerEntry(serviceref, begin, end, info["name"], info["description"], info["eventid"], dirname = preferredInstantRecordPath())
recording.dontSave = True
if event is None or limitEvent == False:
recording.autoincrease = True
recording.setAutoincreaseEnd()
simulTimerList = self.session.nav.RecordTimer.record(recording)
if simulTimerList is None: # no conflict
recording.autoincrease = False
self.recording.append(recording)
else:
if len(simulTimerList) > 1: # with other recording
name = simulTimerList[1].name
name_date = ' '.join((name, strftime('%F %T', localtime(simulTimerList[1].begin))))
# print "[TIMER] conflicts with", name_date
recording.autoincrease = True # start with max available length, then increment
if recording.setAutoincreaseEnd():
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
self.session.open(MessageBox, _("Record time limited due to conflicting timer %s") % name_date, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to conflicting timer %s") % name, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to invalid service %s") % serviceref, MessageBox.TYPE_INFO)
recording.autoincrease = False
def isInstantRecordRunning(self):
# print "self.recording:", self.recording
if self.recording:
for x in self.recording:
if x.isRunning():
return True
return False
def recordQuestionCallback(self, answer):
# print 'recordQuestionCallback'
# print "pre:\n", self.recording
# print 'test1'
if answer is None or answer[1] == "no":
# print 'test2'
return
list = []
recording = self.recording[:]
for x in recording:
if not x in self.session.nav.RecordTimer.timer_list:
self.recording.remove(x)
elif x.dontSave and x.isRunning():
list.append((x, False))
if answer[1] == "changeduration":
if len(self.recording) == 1:
self.changeDuration(0)
else:
self.session.openWithCallback(self.changeDuration, TimerSelection, list)
elif answer[1] == "changeendtime":
if len(self.recording) == 1:
self.setEndtime(0)
else:
self.session.openWithCallback(self.setEndtime, TimerSelection, list)
elif answer[1] == "timer":
import TimerEdit
self.session.open(TimerEdit.TimerEditList)
elif answer[1] == "stop":
self.session.openWithCallback(self.stopCurrentRecording, TimerSelection, list)
elif answer[1] in ( "indefinitely" , "manualduration", "manualendtime", "event"):
self.startInstantRecording(limitEvent = answer[1] in ("event", "manualendtime") or False)
if answer[1] == "manualduration":
self.changeDuration(len(self.recording)-1)
elif answer[1] == "manualendtime":
self.setEndtime(len(self.recording)-1)
elif answer[1] == "savetimeshift":
# print 'test1'
if self.isSeekable() and self.pts_eventcount != self.pts_currplaying:
# print 'test2'
InfoBarTimeshift.SaveTimeshift(self, timeshiftfile="pts_livebuffer_%s" % self.pts_currplaying)
else:
# print 'test3'
Notifications.AddNotification(MessageBox,_("Timeshift will get saved at end of event!"), MessageBox.TYPE_INFO, timeout=5)
self.save_current_timeshift = True
config.timeshift.isRecording.value = True
elif answer[1] == "savetimeshiftEvent":
# print 'test4'
InfoBarTimeshift.saveTimeshiftEventPopup(self)
elif answer[1].startswith("pts_livebuffer") is True:
# print 'test2'
InfoBarTimeshift.SaveTimeshift(self, timeshiftfile=answer[1])
def setEndtime(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.endtime=ConfigClock(default = self.recording[self.selectedEntry].end)
dlg = self.session.openWithCallback(self.TimeDateInputClosed, TimeDateInput, self.endtime)
dlg.setTitle(_("Please change recording endtime"))
def TimeDateInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
# print "stopping recording at", strftime("%F %T", localtime(ret[1]))
if self.recording[self.selectedEntry].end != ret[1]:
self.recording[self.selectedEntry].autoincrease = False
self.recording[self.selectedEntry].end = ret[1]
else:
if self.recording[self.selectedEntry].end != int(time()):
self.recording[self.selectedEntry].autoincrease = False
self.recording[self.selectedEntry].end = int(time())
self.session.nav.RecordTimer.timeChanged(self.recording[self.selectedEntry])
def changeDuration(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.session.openWithCallback(self.inputCallback, InputBox, title=_("How many minutes do you want to record?"), text="5", maxSize=False, type=Input.NUMBER)
def inputCallback(self, value):
# print "stopping recording after", int(value), "minutes."
entry = self.recording[self.selectedEntry]
if value is not None:
if int(value) != 0:
entry.autoincrease = False
entry.end = int(time()) + 60 * int(value)
else:
if entry.end != int(time()):
entry.autoincrease = False
entry.end = int(time())
self.session.nav.RecordTimer.timeChanged(entry)
def isTimerRecordRunning(self):
identical = timers = 0
for timer in self.session.nav.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers += 1
if self.recording:
for x in self.recording:
if x.isRunning() and x == timer:
identical += 1
return timers > identical
def instantRecord(self):
pirr = preferredInstantRecordPath()
if not findSafeRecordPath(pirr) and not findSafeRecordPath(defaultMoviePath()):
if not pirr:
pirr = ""
self.session.open(MessageBox, _("Missing ") + "\n" + pirr +
"\n" + _("No HDD found or HDD not initialized!"), MessageBox.TYPE_ERROR)
return
if isStandardInfoBar(self):
common = ((_("Add recording (stop after current event)"), "event"),
(_("Add recording (indefinitely)"), "indefinitely"),
(_("Add recording (enter recording duration)"), "manualduration"),
(_("Add recording (enter recording endtime)"), "manualendtime"),)
timeshiftcommon = ((_("Timeshift save recording (stop after current event)"), "savetimeshift"),
(_("Timeshift save recording (Select event)"), "savetimeshiftEvent"),)
else:
common = ()
timeshiftcommon = ()
if self.isInstantRecordRunning():
title =_("A recording is currently running.\nWhat do you want to do?")
list = ((_("Stop recording"), "stop"),) + common + \
((_("Change recording (duration)"), "changeduration"),
(_("Change recording (endtime)"), "changeendtime"),)
if self.isTimerRecordRunning():
list += ((_("Stop timer recording"), "timer"),)
else:
title=_("Start recording?")
list = common
if self.isTimerRecordRunning():
list += ((_("Stop timer recording"), "timer"),)
if isStandardInfoBar(self) and self.timeshiftEnabled():
list = list + timeshiftcommon
if isStandardInfoBar(self):
list = list + ((_("Do not record"), "no"),)
if list:
self.session.openWithCallback(self.recordQuestionCallback, ChoiceBox,title=title,list=list)
else:
return 0
class InfoBarAudioSelection:
def __init__(self):
self["AudioSelectionAction"] = HelpableActionMap(self, "InfobarAudioSelectionActions",
{
"audioSelection": (self.audioSelection, _("Audio options...")),
"audio_key": (self.audio_key, _("Audio options...")),
})
def audioSelection(self):
if config.plugins.infopanel_yellowkey.list.getValue() == '0':
from Screens.AudioSelection import AudioSelection
self.session.openWithCallback(self.audioSelected, AudioSelection, infobar=self)
elif config.plugins.infopanel_yellowkey.list.getValue() == '2':
global AUDIO
AUDIO = True
ToggleVideo()
else:
try:
self.startTimeshift()
except:
pass
def audio_key(self):
from Screens.AudioSelection import AudioSelection
self.session.openWithCallback(self.audioSelected, AudioSelection, infobar=self)
def audioSelected(self, ret=None):
print "[infobar::audioSelected]", ret
class InfoBarSubserviceSelection:
def __init__(self):
self["SubserviceSelectionAction"] = HelpableActionMap(self, "InfobarSubserviceSelectionActions",
{
"GreenPressed": (self.GreenPressed),
"subserviceSelection": (self.subserviceSelection),
})
self["SubserviceQuickzapAction"] = HelpableActionMap(self, "InfobarSubserviceQuickzapActions",
{
"nextSubservice": (self.nextSubservice, _("Switch to next sub service")),
"prevSubservice": (self.prevSubservice, _("Switch to previous sub service"))
}, -1)
self["SubserviceQuickzapAction"].setEnabled(False)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.checkSubservicesAvail
})
self.onClose.append(self.__removeNotifications)
self.bsel = None
def GreenPressed(self):
if not config.plisettings.Subservice.getValue():
self.openTimerList()
else:
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
if not subservices or subservices.getNumberOfSubservices() == 0:
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CustomSubservices/plugin.pyo"):
serviceRef = self.session.nav.getCurrentlyPlayingServiceReference()
subservices = self.getAvailableSubservices(serviceRef)
if not subservices or len(subservices) == 0:
self.openPluginBrowser()
else:
self.subserviceSelection()
else:
self.openPluginBrowser()
else:
self.subserviceSelection()
def openPluginBrowser(self):
try:
from Screens.PluginBrowser import PluginBrowser
self.session.open(PluginBrowser)
except:
pass
def __removeNotifications(self):
self.session.nav.event.remove(self.checkSubservicesAvail)
def checkSubservicesAvail(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
if not subservices or subservices.getNumberOfSubservices() == 0:
self["SubserviceQuickzapAction"].setEnabled(False)
def nextSubservice(self):
self.changeSubservice(+1)
def prevSubservice(self):
self.changeSubservice(-1)
def changeSubservice(self, direction):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
n = subservices and subservices.getNumberOfSubservices()
if n and n > 0:
selection = -1
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
idx = 0
while idx < n:
if subservices.getSubservice(idx).toString() == ref.toString():
selection = idx
break
idx += 1
if selection != -1:
selection += direction
if selection >= n:
selection=0
elif selection < 0:
selection=n-1
newservice = subservices.getSubservice(selection)
if newservice.valid():
del subservices
del service
self.session.nav.playService(newservice, False)
def subserviceSelection(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
self.bouquets = self.servicelist.getBouquetList()
n = subservices and subservices.getNumberOfSubservices()
selection = 0
if n and n > 0:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
tlist = []
idx = 0
while idx < n:
i = subservices.getSubservice(idx)
if i.toString() == ref.toString():
selection = idx
tlist.append((i.getName(), i))
idx += 1
if self.bouquets and len(self.bouquets):
keys = ["red", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
if config.usage.multibouquet.getValue():
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to bouquet"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to favourites"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
selection += 3
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), ("--", "")] + tlist
keys = ["red", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
selection += 2
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a sub service..."), list = tlist, selection = selection, keys = keys, skin_name = "SubserviceSelection")
def subserviceSelected(self, service):
del self.bouquets
if not service is None:
if isinstance(service[1], str):
if service[1] == "quickzap":
from Screens.SubservicesQuickzap import SubservicesQuickzap
self.session.open(SubservicesQuickzap, service[2])
else:
self["SubserviceQuickzapAction"].setEnabled(True)
self.session.nav.playService(service[1], False)
def addSubserviceToBouquetCallback(self, service):
if len(service) > 1 and isinstance(service[1], eServiceReference):
self.selectedSubservice = service
if self.bouquets is None:
cnt = 0
else:
cnt = len(self.bouquets)
if cnt > 1: # show bouquet list
self.bsel = self.session.openWithCallback(self.bouquetSelClosed, BouquetSelector, self.bouquets, self.addSubserviceToBouquet)
elif cnt == 1: # add to only one existing bouquet
self.addSubserviceToBouquet(self.bouquets[0][1])
self.session.open(MessageBox, _("Service has been added to the favourites."), MessageBox.TYPE_INFO)
def bouquetSelClosed(self, confirmed):
self.bsel = None
del self.selectedSubservice
if confirmed:
self.session.open(MessageBox, _("Service has been added to the selected bouquet."), MessageBox.TYPE_INFO)
def addSubserviceToBouquet(self, dest):
self.servicelist.addServiceToBouquet(dest, self.selectedSubservice[1])
if self.bsel:
self.bsel.close(True)
else:
del self.selectedSubservice
def openTimerList(self):
self.session.open(TimerEditList)
class InfoBarRedButton:
def __init__(self):
self["RedButtonActions"] = HelpableActionMap(self, "InfobarRedButtonActions",
{
"activateRedButton": (self.activateRedButton, _("Red button...")),
})
self.onHBBTVActivation = [ ]
self.onRedButtonActivation = [ ]
def activateRedButton(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
if info and info.getInfoString(iServiceInformation.sHBBTVUrl) != "":
for x in self.onHBBTVActivation:
x()
elif False: # TODO: other red button services
for x in self.onRedButtonActivation:
x()
class InfoBarTimerButton:
def __init__(self):
self["TimerButtonActions"] = HelpableActionMap(self, "InfobarTimerButtonActions",
{
"timerSelection": (self.timerSelection, _("Timer selection...")),
})
def timerSelection(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
class InfoBarAspectSelection:
STATE_HIDDEN = 0
STATE_ASPECT = 1
STATE_RESOLUTION = 2
def __init__(self):
self["AspectSelectionAction"] = HelpableActionMap(self, "InfobarAspectSelectionActions",
{
"aspectSelection": (self.ExGreen_toggleGreen, _("Aspect list...")),
})
self.__ExGreen_state = self.STATE_HIDDEN
def ExGreen_doAspect(self):
print "do self.STATE_ASPECT"
self.__ExGreen_state = self.STATE_ASPECT
self.aspectSelection()
def ExGreen_doResolution(self):
print "do self.STATE_RESOLUTION"
self.__ExGreen_state = self.STATE_RESOLUTION
self.resolutionSelection()
def ExGreen_doHide(self):
print "do self.STATE_HIDDEN"
self.__ExGreen_state = self.STATE_HIDDEN
def ExGreen_toggleGreen(self, arg=""):
print self.__ExGreen_state
if self.__ExGreen_state == self.STATE_HIDDEN:
print "self.STATE_HIDDEN"
self.ExGreen_doAspect()
elif self.__ExGreen_state == self.STATE_ASPECT:
print "self.STATE_ASPECT"
self.ExGreen_doResolution()
elif self.__ExGreen_state == self.STATE_RESOLUTION:
print "self.STATE_RESOLUTION"
self.ExGreen_doHide()
def aspectSelection(self):
selection = 0
tlist = []
tlist.append((_("Resolution"), "resolution"))
tlist.append(("--", ""))
try:
policy = open("/proc/stb/video/policy_choices").read()[:-1]
except IOError:
print "couldn't read available policymodes."
policy_available = [ ]
return
policy_available = policy.split(' ')
for x in policy_available:
tlist.append((x[0].upper() + x[1:], _(x)))
mode = open("/proc/stb/video/policy").read()[:-1]
print mode
for x in range(len(tlist)):
if tlist[x][1] == mode:
selection = x
keys = ["green", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
self.session.openWithCallback(self.aspectSelected, ChoiceBox, title=_("Please select an aspect ratio..."), list = tlist, selection = selection, keys = keys)
def aspectSelected(self, aspect):
if not aspect is None:
if isinstance(aspect[1], str):
if aspect[1] == "":
self.ExGreen_doHide()
elif aspect[1] == "resolution":
self.ExGreen_toggleGreen()
else:
if aspect[1] == "letterbox":
f = open("/proc/stb/video/policy", "w")
f.write("panscan")
f.close()
elif aspect[1] == "panscan":
f = open("/proc/stb/video/policy", "w")
f.write("letterbox")
f.close()
else:
f = open("/proc/stb/video/policy", "w")
f.write(aspect[1])
f.close()
self.ExGreen_doHide()
else:
self.ExGreen_doHide()
return
class InfoBarResolutionSelection:
def __init__(self):
return
def resolutionSelection(self):
f = open("/proc/stb/vmpeg/0/xres", "r")
xresString = f.read()
f.close()
f = open("/proc/stb/vmpeg/0/yres", "r")
yresString = f.read()
f.close()
if getBoxType().startswith('azbox'):
fpsString = '50000'
else:
try:
f = open("/proc/stb/vmpeg/0/framerate", "r")
fpsString = f.read()
f.close()
except:
print"[InfoBarResolutionSelection] Error open /proc/stb/vmpeg/0/framerate !!"
fpsString = '50000'
xres = int(xresString, 16)
yres = int(yresString, 16)
fps = int(fpsString)
fpsFloat = float(fps)
fpsFloat = fpsFloat/1000
selection = 0
tlist = []
tlist.append((_("Exit"), "exit"))
tlist.append((_("Auto(not available)"), "auto"))
tlist.append(("Video: " + str(xres) + "x" + str(yres) + "@" + str(fpsFloat) + "hz", ""))
tlist.append(("--", ""))
tlist.append(("576i", "576i50"))
tlist.append(("576p", "576p50"))
tlist.append(("720p@50hz", "720p50"))
tlist.append(("720p@60hz", "720p60"))
tlist.append(("1080i@50hz", "1080i50"))
tlist.append(("1080i@60hz", "1080i60"))
tlist.append(("1080p@23.976hz", "1080p23"))
tlist.append(("1080p@24hz", "1080p24"))
tlist.append(("1080p@25hz", "1080p25"))
tlist.append(("1080p@29hz", "1080p29"))
tlist.append(("1080p@30hz", "1080p30"))
keys = ["green", "yellow", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
mode = open("/proc/stb/video/videomode").read()[:-1]
print mode
for x in range(len(tlist)):
if tlist[x][1] == mode:
selection = x
self.session.openWithCallback(self.ResolutionSelected, ChoiceBox, title=_("Please select a resolution..."), list = tlist, selection = selection, keys = keys)
def ResolutionSelected(self, Resolution):
if not Resolution is None:
if isinstance(Resolution[1], str):
if Resolution[1] == "exit" or Resolution[1] == "" or Resolution[1] == "auto":
self.ExGreen_toggleGreen()
if Resolution[1] != "auto":
f = open("/proc/stb/video/videomode", "w")
f.write(Resolution[1])
f.close()
#from enigma import gMainDC
#gMainDC.getInstance().setResolution(-1, -1)
self.ExGreen_doHide()
else:
self.ExGreen_doHide()
return
class InfoBarVmodeButton:
def __init__(self):
self["VmodeButtonActions"] = HelpableActionMap(self, "InfobarVmodeButtonActions",
{
"vmodeSelection": (self.vmodeSelection, _("Letterbox zoom")),
})
def vmodeSelection(self):
self.session.open(VideoMode)
class VideoMode(Screen):
def __init__(self,session):
Screen.__init__(self, session)
self["videomode"] = Label()
self["actions"] = NumberActionMap( [ "InfobarVmodeButtonActions" ],
{
"vmodeSelection": self.selectVMode
})
self.Timer = eTimer()
self.Timer.callback.append(self.quit)
self.selectVMode()
def selectVMode(self):
policy = config.av.policy_43
if self.isWideScreen():
policy = config.av.policy_169
idx = policy.choices.index(policy.value)
idx = (idx + 1) % len(policy.choices)
policy.value = policy.choices[idx]
self["videomode"].setText(policy.value)
self.Timer.start(1000, True)
def isWideScreen(self):
from Components.Converter.ServiceInfo import WIDESCREEN
service = self.session.nav.getCurrentService()
info = service and service.info()
return info.getInfo(iServiceInformation.sAspect) in WIDESCREEN
def quit(self):
self.Timer.stop()
self.close()
class InfoBarAdditionalInfo:
def __init__(self):
self["RecordingPossible"] = Boolean(fixed=harddiskmanager.HDDCount() > 0)
self["TimeshiftPossible"] = self["RecordingPossible"]
self["ExtensionsAvailable"] = Boolean(fixed=1)
# TODO: these properties should be queried from the input device keymap
self["ShowTimeshiftOnYellow"] = Boolean(fixed=0)
self["ShowAudioOnYellow"] = Boolean(fixed=0)
self["ShowRecordOnRed"] = Boolean(fixed=0)
class InfoBarNotifications:
def __init__(self):
self.onExecBegin.append(self.checkNotifications)
Notifications.notificationAdded.append(self.checkNotificationsIfExecing)
self.onClose.append(self.__removeNotification)
def __removeNotification(self):
Notifications.notificationAdded.remove(self.checkNotificationsIfExecing)
def checkNotificationsIfExecing(self):
if self.execing:
self.checkNotifications()
def checkNotifications(self):
notifications = Notifications.notifications
if notifications:
n = notifications[0]
del notifications[0]
cb = n[0]
if n[3].has_key("onSessionOpenCallback"):
n[3]["onSessionOpenCallback"]()
del n[3]["onSessionOpenCallback"]
if cb:
dlg = self.session.openWithCallback(cb, n[1], *n[2], **n[3])
elif not Notifications.current_notifications and n[4] == "ZapError":
if n[3].has_key("timeout"):
del n[3]["timeout"]
n[3]["enable_input"] = False
dlg = self.session.instantiateDialog(n[1], *n[2], **n[3])
self.hide()
dlg.show()
self.notificationDialog = dlg
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressNotification)
else:
dlg = self.session.open(n[1], *n[2], **n[3])
# remember that this notification is currently active
d = (n[4], dlg)
Notifications.current_notifications.append(d)
dlg.onClose.append(boundFunction(self.__notificationClosed, d))
def closeNotificationInstantiateDialog(self):
if hasattr(self, "notificationDialog"):
self.session.deleteDialog(self.notificationDialog)
del self.notificationDialog
eActionMap.getInstance().unbindAction('', self.keypressNotification)
def keypressNotification(self, key, flag):
if flag:
self.closeNotificationInstantiateDialog()
def __notificationClosed(self, d):
Notifications.current_notifications.remove(d)
class InfoBarServiceNotifications:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.serviceHasEnded
})
def serviceHasEnded(self):
# print "service end!"
try:
self.setSeekState(self.SEEK_STATE_PLAY)
except:
pass
class InfoBarCueSheetSupport:
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
ENABLE_RESUME_SUPPORT = False
def __init__(self, actionmap = "InfobarCueSheetActions"):
self["CueSheetActions"] = HelpableActionMap(self, actionmap,
{
"jumpPreviousMark": (self.jumpPreviousMark, _("Jump to previous marked position")),
"jumpNextMark": (self.jumpNextMark, _("Jump to next marked position")),
"toggleMark": (self.toggleMark, _("Toggle a cut mark at the current position"))
}, prio=1)
self.cut_list = [ ]
self.is_closing = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evCuesheetChanged: self.downloadCuesheet,
})
def __serviceStarted(self):
if self.is_closing:
return
# print "new service started! trying to download cuts!"
self.downloadCuesheet()
self.resume_point = None
if self.ENABLE_RESUME_SUPPORT:
for (pts, what) in self.cut_list:
if what == self.CUT_TYPE_LAST:
last = pts
break
else:
last = getResumePoint(self.session)
if last is None:
return
# only resume if at least 10 seconds ahead, or <10 seconds before the end.
seekable = self.__getSeekable()
if seekable is None:
return # Should not happen?
length = seekable.getLength() or (None,0)
# print "seekable.getLength() returns:", length
# Hmm, this implies we don't resume if the length is unknown...
if (last > 900000) and (not length[1] or (last < length[1] - 900000)):
self.resume_point = last
l = last / 90000
if config.usage.on_movie_start.getValue() == "ask" or not length[1]:
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Do you want to resume this playback?") + "\n" + (_("Resume position at %s") % ("%d:%02d:%02d" % (l/3600, l%3600/60, l%60))), timeout=10)
elif config.usage.on_movie_start.getValue() == "resume":
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Resuming playback"), timeout=2, type=MessageBox.TYPE_INFO)
def playLastCB(self, answer):
if answer == True and self.resume_point:
self.doSeek(self.resume_point)
self.hideAfterResume()
def hideAfterResume(self):
if isinstance(self, InfoBarShowHide):
self.hide()
def __getSeekable(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.seek()
def cueGetCurrentPosition(self):
seek = self.__getSeekable()
if seek is None:
return None
r = seek.getPlayPosition()
if r[0]:
return None
return long(r[1])
def cueGetEndCutPosition(self):
ret = False
isin = True
for cp in self.cut_list:
if cp[1] == self.CUT_TYPE_OUT:
if isin:
isin = False
ret = cp[0]
elif cp[1] == self.CUT_TYPE_IN:
isin = True
return ret
def jumpPreviousNextMark(self, cmp, start=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
return False
mark = self.getNearestCutPoint(current_pos, cmp=cmp, start=start)
if mark is not None:
pts = mark[0]
else:
return False
self.doSeek(pts)
return True
def jumpPreviousMark(self):
# we add 5 seconds, so if the play position is <5s after
# the mark, the mark before will be used
self.jumpPreviousNextMark(lambda x: -x-5*90000, start=True)
def jumpNextMark(self):
if not self.jumpPreviousNextMark(lambda x: x-90000):
self.doSeek(-1)
def getNearestCutPoint(self, pts, cmp=abs, start=False):
# can be optimized
beforecut = True
nearest = None
bestdiff = -1
instate = True
if start:
bestdiff = cmp(0 - pts)
if bestdiff >= 0:
nearest = [0, False]
for cp in self.cut_list:
if beforecut and cp[1] in (self.CUT_TYPE_IN, self.CUT_TYPE_OUT):
beforecut = False
if cp[1] == self.CUT_TYPE_IN: # Start is here, disregard previous marks
diff = cmp(cp[0] - pts)
if start and diff >= 0:
nearest = cp
bestdiff = diff
else:
nearest = None
bestdiff = -1
if cp[1] == self.CUT_TYPE_IN:
instate = True
elif cp[1] == self.CUT_TYPE_OUT:
instate = False
elif cp[1] in (self.CUT_TYPE_MARK, self.CUT_TYPE_LAST):
diff = cmp(cp[0] - pts)
if instate and diff >= 0 and (nearest is None or bestdiff > diff):
nearest = cp
bestdiff = diff
return nearest
def toggleMark(self, onlyremove=False, onlyadd=False, tolerance=5*90000, onlyreturn=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
# print "not seekable"
return
nearest_cutpoint = self.getNearestCutPoint(current_pos)
if nearest_cutpoint is not None and abs(nearest_cutpoint[0] - current_pos) < tolerance:
if onlyreturn:
return nearest_cutpoint
if not onlyadd:
self.removeMark(nearest_cutpoint)
elif not onlyremove and not onlyreturn:
self.addMark((current_pos, self.CUT_TYPE_MARK))
if onlyreturn:
return None
def addMark(self, point):
insort(self.cut_list, point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def removeMark(self, point):
self.cut_list.remove(point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def showAfterCuesheetOperation(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def __getCuesheet(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.cueSheet()
def uploadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
# print "upload failed, no cuesheet interface"
return
cue.setCutList(self.cut_list)
def downloadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
# print "download failed, no cuesheet interface"
self.cut_list = [ ]
else:
self.cut_list = cue.getCutList()
class InfoBarSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="82,18" font="Regular;16" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="82,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.Event_Now" render="Progress" position="6,46" size="46,18" borderWidth="1" >
<convert type="EventTime">Progress</convert>
</widget>
</screen>"""
# for picon: (path="piconlcd" will use LCD picons)
# <widget source="session.CurrentService" render="Picon" position="6,0" size="120,64" path="piconlcd" >
# <convert type="ServiceName">Reference</convert>
# </widget>
class InfoBarSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarSummary
class InfoBarMoviePlayerSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="64,18" font="Regular;16" halign="right" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="64,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.CurrentService" render="Progress" position="6,46" size="56,18" borderWidth="1" >
<convert type="ServicePosition">Position</convert>
</widget>
</screen>"""
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["state_summary"] = StaticText("")
self["speed_summary"] = StaticText("")
self["statusicon_summary"] = MultiPixmap()
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, state_summary, speed_summary, statusicon_summary):
self["state_summary"].setText(state_summary)
self["speed_summary"].setText(speed_summary)
self["statusicon_summary"].setPixmapNum(int(statusicon_summary))
class InfoBarMoviePlayerSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarMoviePlayerSummary
class InfoBarTeletextPlugin:
def __init__(self):
self.teletext_plugin = None
for p in plugins.getPlugins(PluginDescriptor.WHERE_TELETEXT):
self.teletext_plugin = p
if self.teletext_plugin is not None:
self["TeletextActions"] = HelpableActionMap(self, "InfobarTeletextActions",
{
"startTeletext": (self.startTeletext, _("View teletext..."))
})
else:
print "no teletext plugin found!"
def startTeletext(self):
self.teletext_plugin(session=self.session, service=self.session.nav.getCurrentService())
class InfoBarSubtitleSupport(object):
def __init__(self):
object.__init__(self)
self["SubtitleSelectionAction"] = HelpableActionMap(self, "InfobarSubtitleSelectionActions",
{
"subtitleSelection": (self.subtitleSelection, _("Subtitle selection...")),
})
self.selected_subtitle = None
if isStandardInfoBar(self):
self.subtitle_window = self.session.instantiateDialog(SubtitleDisplay)
else:
from Screens.InfoBar import InfoBar
self.subtitle_window = InfoBar.instance.subtitle_window
self.subtitle_window.hide()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceChanged,
iPlayableService.evEnd: self.__serviceChanged,
iPlayableService.evUpdatedInfo: self.__updatedInfo
})
def getCurrentServiceSubtitle(self):
service = self.session.nav.getCurrentService()
return service and service.subtitle()
def subtitleSelection(self):
service = self.session.nav.getCurrentService()
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if self.selected_subtitle or subtitlelist and len(subtitlelist)>0:
from Screens.AudioSelection import SubtitleSelection
self.session.open(SubtitleSelection, self)
else:
return 0
def __serviceChanged(self):
if self.selected_subtitle:
self.selected_subtitle = None
self.subtitle_window.hide()
def __updatedInfo(self):
if not self.selected_subtitle:
subtitle = self.getCurrentServiceSubtitle()
cachedsubtitle = subtitle.getCachedSubtitle()
if cachedsubtitle:
self.enableSubtitle(cachedsubtitle)
def enableSubtitle(self, selectedSubtitle):
subtitle = self.getCurrentServiceSubtitle()
self.selected_subtitle = selectedSubtitle
if subtitle and self.selected_subtitle:
subtitle.enableSubtitles(self.subtitle_window.instance, self.selected_subtitle)
self.subtitle_window.show()
else:
if subtitle:
subtitle.disableSubtitles(self.subtitle_window.instance)
self.subtitle_window.hide()
def restartSubtitle(self):
if self.selected_subtitle:
self.enableSubtitle(self.selected_subtitle)
class InfoBarServiceErrorPopupSupport:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evTuneFailed: self.__tuneFailed,
iPlayableService.evTunedIn: self.__serviceStarted,
iPlayableService.evStart: self.__serviceStarted
})
self.__serviceStarted()
def __serviceStarted(self):
self.closeNotificationInstantiateDialog()
self.last_error = None
Notifications.RemovePopup(id = "ZapError")
def __tuneFailed(self):
if not config.usage.hide_zap_errors.getValue():
service = self.session.nav.getCurrentService()
info = service and service.info()
error = info and info.getInfo(iServiceInformation.sDVBState)
if error == self.last_error:
error = None
else:
self.last_error = error
error = {
eDVBServicePMTHandler.eventNoResources: _("No free tuner!"),
eDVBServicePMTHandler.eventTuneFailed: _("Tune failed!"),
eDVBServicePMTHandler.eventNoPAT: _("No data on transponder!\n(Timeout reading PAT)"),
eDVBServicePMTHandler.eventNoPATEntry: _("Service not found!\n(SID not found in PAT)"),
eDVBServicePMTHandler.eventNoPMT: _("Service invalid!\n(Timeout reading PMT)"),
eDVBServicePMTHandler.eventNewProgramInfo: None,
eDVBServicePMTHandler.eventTuned: None,
eDVBServicePMTHandler.eventSOF: None,
eDVBServicePMTHandler.eventEOF: None,
eDVBServicePMTHandler.eventMisconfiguration: _("Service unavailable!\nCheck tuner configuration!"),
}.get(error) #this returns None when the key not exist in the dict
if error:
self.closeNotificationInstantiateDialog()
if hasattr(self, "dishDialog") and not self.dishDialog.dishState():
Notifications.AddPopup(text = error, type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapError")
class InfoBarZoom:
def __init__(self):
self.zoomrate=0
self.zoomin=1
self["ZoomActions"] = HelpableActionMap(self, "InfobarZoomActions",
{
"ZoomInOut":(self.ZoomInOut, _("Zoom In/Out TV...")),
"ZoomOff":(self.ZoomOff, _("Zoom Off...")),
}, prio=2)
def ZoomInOut(self):
zoomval=0
if self.zoomrate > 3:
self.zoomin = 0
elif self.zoomrate < -9:
self.zoomin = 1
if self.zoomin == 1:
self.zoomrate += 1
else:
self.zoomrate -= 1
if self.zoomrate < 0:
zoomval=abs(self.zoomrate)+10
else:
zoomval=self.zoomrate
# print "zoomRate:", self.zoomrate
# print "zoomval:", zoomval
file = open("/proc/stb/vmpeg/0/zoomrate", "w")
file.write('%d' % int(zoomval))
file.close()
def ZoomOff(self):
self.zoomrate = 0
self.zoomin = 1
f = open("/proc/stb/vmpeg/0/zoomrate", "w")
f.write(str(0))
f.close()
|
Mariusz1970/enigma2
|
lib/python/Screens/InfoBarGenerics.py
|
Python
|
gpl-2.0
| 140,459 | 0.031426 |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from collections import namedtuple
from typing import List, Union
import numpy as np
from astropy.io import fits
TimeChunk = namedtuple('TimeChunk', 'start_time num_of_samples')
def split_time_range(time_length: float,
num_of_chunks: int,
sampfreq: float,
time0=0.0) -> List[TimeChunk]:
'''Split a time interval in a number of chunks.
Return a list of objects of kind :class:`stripeline.timetools.TimeChunk`.
'''
delta_time = time_length / num_of_chunks
result = []
for chunk_idx in range(num_of_chunks):
# Determine the time of each sample in this chunk
cur_time = chunk_idx * delta_time
chunk_time0 = np.ceil(cur_time * sampfreq) / sampfreq - cur_time
start_time = time0 + cur_time + chunk_time0
num_of_samples = int(delta_time * sampfreq)
result.append(TimeChunk(start_time=start_time,
num_of_samples=num_of_samples))
return result
DET_NAMES = {'Q1': 0,
'Q2': 1,
'U1': 2,
'U2': 3
}
class ToiProvider:
'''Load a TOI and split it evenly among MPI processes.
.. note:: This is an abstract base class, and it should not be instantiated.
Consider using any of its derived classes, like
:class:`stripeline.timetools.FitsToiProvider`.
In the case of a run split among many MPI processes, this class balances the
load of a long TOI. If every MPI process creates a
:class:`stripeline.timetools.ToiProvider` object, every object will take
responsibility of reading one section of the TOI. The methods
:func:`stripeline.timetools.ToiProvider.get_signal`,
:func:`stripeline.timetools.ToiProvider.get_pointings`, and
:func:`stripeline.timetools.ToiProvider.get_pixel_index` can be used by
processes to read the chunk of data which belongs to each.
'''
def __init__(self, rank: int, num_of_processes: int):
'''Create a new object.
Parameters:
* "rank" is the rank of the running MPI process
* "num_of_processes" is the number of MPI processes
'''
self.rank = rank
self.num_of_processes = num_of_processes
self.total_num_of_samples = 0
def get_time(self):
'''Return a vector containing the time of each sample in the TOI.
Only the part of the TOI that belongs to the rank of this process
is returned.'''
return None
def get_signal(self, det_idx: Union[int, str]):
# Unused
del det_idx
return None
def get_pixel_index(self, nside: int, nest=False, lonlat=False):
'''Return a vector containing the pixel index for each sample in the
TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
theta, phi, psi = self.get_pointings()
return healpy.ang2pix(nside, theta, phi, nest=nest, lonlat=lonlat)
def get_pointings(self):
'''Return two vectors containing the colatitude and longitude for each
sample in the TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
return None, None
def get_polarization_angle(self):
'''Return a vector containing the polarization angle for each sample
in the TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
return None
ToiFile = namedtuple('ToiFile', ['file_name', 'num_of_samples'])
def read_fits_file_information(file_name: str, hdu=1) -> ToiFile:
'''Read the number of rows in the first tabular HDU of a FITS file
Return a :class:`stripeline.timetools.ToiFile` object.
'''
with fits.open(file_name) as fin:
num_of_samples = fin[hdu].header['NAXIS2']
return ToiFile(file_name=file_name, num_of_samples=num_of_samples)
def split_into_n(length: int, num_of_segments: int) -> List[int]:
'''Split a set of `length` elements into `num_of_segments` subsets.
Example::
>>> split_into_n(10, 4)
[2 3 2 3]
>>> split_into_n(201, 2)
[100 101]
'''
assert num_of_segments > 0
assert length > num_of_segments
start_points = np.array([int(i * length / num_of_segments)
for i in range(num_of_segments + 1)])
return start_points[1:] - start_points[:-1]
def assign_toi_files_to_processes(samples_per_processes: List[int],
tod_files: List[ToiFile]):
'''Determine how to balance the load of TOI files among processes.
Given a list of samples to be processed by each MPI process, decide which
TOD and samples must be loaded by each process, using the principle that
all the processes should read the same number of TODs, when possible.
Return a list of :class:`stripeline.timetools.ToiFile` objects.
'''
assert (sum(samples_per_processes) ==
sum([x.num_of_samples for x in tod_files]))
result = [] # Type: List[List[ToiFile]]
file_idx = 0
element_idx = 0
# Iterate over the MPI processes
for samples_in_this_proc in samples_per_processes:
# This is the list of FITS segments that the current MPI process is
# going to load
segments = [] # Type: List[ToiFileSegment]
elements_in_this_segment = 0
# Iterate over the files to be read by the current MPI process
while elements_in_this_segment < samples_in_this_proc:
if elements_in_this_segment + (tod_files[file_idx].num_of_samples - element_idx) <= samples_in_this_proc:
# The whole FITS file is going to be read by the current MPI
# process
num = tod_files[file_idx].num_of_samples - element_idx
segments.append(ToiFileSegment(file_name=tod_files[file_idx].file_name,
first_element=element_idx,
num_of_elements=num))
elements_in_this_segment += num
file_idx += 1
element_idx = 0
else:
# This is the size of the segment we're going to append to "segments"
num = samples_in_this_proc - elements_in_this_segment
# Only a subset of this FITS file will be read by the current MPI process
segments.append(ToiFileSegment(file_name=tod_files[file_idx].file_name,
first_element=element_idx,
num_of_elements=num))
elements_in_this_segment += num
element_idx += num
result.append(segments)
return result
ToiFileSegment = namedtuple(
'ToiFileSegment', ['file_name', 'first_element', 'num_of_elements'])
FitsColumn = namedtuple(
'FitsColumn', ['hdu', 'column']
)
FitsTableLayout = namedtuple(
'FitsTableLayout', ['time_col', 'theta_col',
'phi_col', 'psi_col', 'signal_cols']
)
def _load_array_from_fits(segments: List[ToiFileSegment], cols_to_read: List[FitsColumn]):
'''Read a set of columns from a list of FITS files.
The chunks to read from each FITS file are specified in the parameter `segments`,
while the columns to read are in `cols_to_read`. The function returns a tuple
containing all the data from the columns (each in a NumPy array) in the same
order as in `cols_to_read`.'''
arrays = [np.array([], dtype=np.float64) for i in range(len(cols_to_read))]
for cur_segment in segments:
start = cur_segment.first_element
end = cur_segment.first_element + cur_segment.num_of_elements
with fits.open(cur_segment.file_name) as f:
# TODO: maybe this is not the most efficient way to load
# chunks of data from a FITS column
cur_chunk_arr = [f[x.hdu].data.field(x.column)[start:end]
for x in cols_to_read]
for col_idx in range(len(cols_to_read)):
arrays[col_idx] = np.concatenate(
[arrays[col_idx], cur_chunk_arr[col_idx]])
return tuple(arrays)
class FitsToiProvider(ToiProvider):
'''Distribute a TOI saved in FITS files among MPI processes.
This class specializes :class:`stripeline.timetools.ToiProvider` in order to
load the TOI from a set of FITS files.'''
def __init__(self,
rank: int,
num_of_processes: int,
file_names: List[str],
file_layout: FitsTableLayout,
comm=None):
ToiProvider.__init__(self, rank, num_of_processes)
self.file_layout = file_layout
self.fits_files = [] # Type: List[ToiFile]
if rank == 0 or comm is None:
for cur_file in file_names:
self.fits_files.append(read_fits_file_information(cur_file))
if comm:
self.fits_files = comm.bcast(self.fits_files, root=0)
self.total_num_of_samples = sum(
[x.num_of_samples for x in self.fits_files])
self.samples_per_process = split_into_n(self.total_num_of_samples,
num_of_processes)
self.segments_per_process = assign_toi_files_to_processes(self.samples_per_process,
self.fits_files)
def get_time(self):
'''Return a vector containing the time of each sample in the TOI.
Only the part of the TOI that belongs to the rank of this process
is returned.'''
result = _load_array_from_fits(segments=self.segments_per_process[self.rank],
cols_to_read=[self.file_layout.time_col])
return result[0]
def get_signal(self, det_idx: Union[int, str]):
'''Return a vector containing the signal from the TOI.
Parameters:
* det_idx is either the number of the detector or its name, with the following
associations:
- 0: ``Q1``
- 1: ``Q2``
- 2: ``U1``
- 3: ``U2``
Only the part of the TOI that belongs to the rank of this process is
returned.'''
if type(det_idx) is str:
det_idx = DET_NAMES[det_idx]
result = _load_array_from_fits(segments=self.segments_per_process[self.rank],
cols_to_read=[self.file_layout.signal_cols[det_idx]])
return result[0]
def get_pointings(self):
'''Return two vectors containing the colatitude and longitude for each
sample in the TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
theta, phi = _load_array_from_fits(segments=self.segments_per_process[self.rank],
cols_to_read=[self.file_layout.theta_col,
self.file_layout.phi_col])
return theta, phi
def get_polarization_angle(self):
'''Return two vectors containing the colatitude and longitude for each
sample in the TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
psi = _load_array_from_fits(segments=self.segments_per_process[self.rank],
cols_to_read=[self.file_layout.psi_col])
return psi[0]
|
ziotom78/stripeline
|
stripeline/timetools.py
|
Python
|
mit
| 11,654 | 0.001974 |
class C:
''' <caret>
'''
|
asedunov/intellij-community
|
python/testData/editing/spaceDocStringStubInClass.after.py
|
Python
|
apache-2.0
| 28 | 0.071429 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationListResult(Model):
"""The operation list response that contains all operations for Azure
Container Instance service.
:param value: The list of operations.
:type value: list of :class:`Operation
<azure.mgmt.containerinstance.models.Operation>`
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(self, value=None):
self.value = value
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/operation_list_result.py
|
Python
|
mit
| 955 | 0 |
import autograd.numpy as np
from autograd.scipy.misc import logsumexp
from features import num_atom_features, num_bond_features
from util import memoize, WeightsParser
from mol_graph import graph_from_smiles_tuple, degrees
from build_vanilla_net import build_fingerprint_deep_net, relu, batch_normalize
def fast_array_from_list(xs):
return np.concatenate([np.expand_dims(x, axis=0) for x in xs], axis=0)
def sum_and_stack(features, idxs_list_of_lists):
return fast_array_from_list([np.sum(features[idx_list], axis=0) for idx_list in idxs_list_of_lists])
def softmax(X, axis=0):
return np.exp(X - logsumexp(X, axis=axis, keepdims=True))
def matmult_neighbors(array_rep, atom_features, bond_features, get_weights):
activations_by_degree = []
for degree in degrees:
atom_neighbors_list = array_rep[('atom_neighbors', degree)]
bond_neighbors_list = array_rep[('bond_neighbors', degree)]
if len(atom_neighbors_list) > 0:
neighbor_features = [atom_features[atom_neighbors_list],
bond_features[bond_neighbors_list]]
# dims of stacked_neighbors are [atoms, neighbors, atom and bond features]
stacked_neighbors = np.concatenate(neighbor_features, axis=2)
summed_neighbors = np.sum(stacked_neighbors, axis=1)
activations = np.dot(summed_neighbors, get_weights(degree))
activations_by_degree.append(activations)
# This operation relies on atoms being sorted by degree,
# in Node.graph_from_smiles_tuple()
return np.concatenate(activations_by_degree, axis=0)
def weights_name(layer, degree):
return "layer " + str(layer) + " degree " + str(degree) + " filter"
def build_convnet_fingerprint_fun(num_hidden_features=[100, 100], fp_length=512,
normalize=True, activation_function=relu,
return_atom_activations=False):
"""Sets up functions to compute convnets over all molecules in a minibatch together."""
# Specify weight shapes.
parser = WeightsParser()
all_layer_sizes = [num_atom_features()] + num_hidden_features
for layer in range(len(all_layer_sizes)):
parser.add_weights(('layer output weights', layer), (all_layer_sizes[layer], fp_length))
parser.add_weights(('layer output bias', layer), (1, fp_length))
in_and_out_sizes = zip(all_layer_sizes[:-1], all_layer_sizes[1:])
for layer, (N_prev, N_cur) in enumerate(in_and_out_sizes):
parser.add_weights(("layer", layer, "biases"), (1, N_cur))
parser.add_weights(("layer", layer, "self filter"), (N_prev, N_cur))
for degree in degrees:
parser.add_weights(weights_name(layer, degree), (N_prev + num_bond_features(), N_cur))
def update_layer(weights, layer, atom_features, bond_features, array_rep, normalize=False):
def get_weights_func(degree):
return parser.get(weights, weights_name(layer, degree))
layer_bias = parser.get(weights, ("layer", layer, "biases"))
layer_self_weights = parser.get(weights, ("layer", layer, "self filter"))
self_activations = np.dot(atom_features, layer_self_weights)
neighbour_activations = matmult_neighbors(
array_rep, atom_features, bond_features, get_weights_func)
total_activations = neighbour_activations + self_activations + layer_bias
if normalize:
total_activations = batch_normalize(total_activations)
return activation_function(total_activations)
def output_layer_fun_and_atom_activations(weights, smiles):
"""Computes layer-wise convolution, and returns a fixed-size output."""
array_rep = array_rep_from_smiles(tuple(smiles))
atom_features = array_rep['atom_features']
bond_features = array_rep['bond_features']
all_layer_fps = []
atom_activations = []
def write_to_fingerprint(atom_features, layer):
cur_out_weights = parser.get(weights, ('layer output weights', layer))
cur_out_bias = parser.get(weights, ('layer output bias', layer))
atom_outputs = softmax(cur_out_bias + np.dot(atom_features, cur_out_weights), axis=1)
atom_activations.append(atom_outputs)
# Sum over all atoms within a moleclue:
layer_output = sum_and_stack(atom_outputs, array_rep['atom_list'])
all_layer_fps.append(layer_output)
num_layers = len(num_hidden_features)
for layer in xrange(num_layers):
write_to_fingerprint(atom_features, layer)
atom_features = update_layer(weights, layer, atom_features, bond_features, array_rep,
normalize=normalize)
write_to_fingerprint(atom_features, num_layers)
return sum(all_layer_fps), atom_activations, array_rep
def output_layer_fun(weights, smiles):
output, _, _ = output_layer_fun_and_atom_activations(weights, smiles)
return output
def compute_atom_activations(weights, smiles):
_, atom_activations, array_rep = output_layer_fun_and_atom_activations(weights, smiles)
return atom_activations, array_rep
if return_atom_activations:
return output_layer_fun, parser, compute_atom_activations
else:
return output_layer_fun, parser
@memoize
def array_rep_from_smiles(smiles):
"""Precompute everything we need from MolGraph so that we can free the memory asap."""
molgraph = graph_from_smiles_tuple(smiles)
arrayrep = {'atom_features' : molgraph.feature_array('atom'),
'bond_features' : molgraph.feature_array('bond'),
'atom_list' : molgraph.neighbor_list('molecule', 'atom'), # List of lists.
'rdkit_ix' : molgraph.rdkit_ix_array()} # For plotting only.
for degree in degrees:
arrayrep[('atom_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'atom'), dtype=int)
arrayrep[('bond_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'bond'), dtype=int)
return arrayrep
def build_conv_deep_net(conv_params, net_params, fp_l2_penalty=0.0):
"""Returns loss_fun(all_weights, smiles, targets), pred_fun, combined_parser."""
conv_fp_func, conv_parser = build_convnet_fingerprint_fun(**conv_params)
return build_fingerprint_deep_net(net_params, conv_fp_func, conv_parser, fp_l2_penalty)
|
HIPS/neural-fingerprint
|
neuralfingerprint/build_convnet.py
|
Python
|
mit
| 6,508 | 0.005532 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DamageScenario.customlandusegeoimage'
db.add_column('lizard_damage_damagescenario', 'customlandusegeoimage', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_damage.GeoImage'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DamageScenario.customlandusegeoimage'
db.delete_column('lizard_damage_damagescenario', 'customlandusegeoimage_id')
models = {
'lizard_damage.benefitscenario': {
'Meta': {'object_name': 'BenefitScenario'},
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'zip_result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zip_risk_a': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'zip_risk_b': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.benefitscenarioresult': {
'Meta': {'object_name': 'BenefitScenarioResult'},
'benefit_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.BenefitScenario']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageevent': {
'Meta': {'object_name': 'DamageEvent'},
'depth_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'floodmonth': ('django.db.models.fields.IntegerField', [], {'default': '9'}),
'floodtime': ('django.db.models.fields.FloatField', [], {}),
'height_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'landuse_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'max_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repairtime_buildings': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repairtime_roads': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repetition_time': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'table': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_damage.damageeventresult': {
'Meta': {'object_name': 'DamageEventResult'},
'damage_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageeventwaterlevel': {
'Meta': {'ordering': "(u'index',)", 'object_name': 'DamageEventWaterlevel'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'waterlevel': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.damagescenario': {
'Meta': {'object_name': 'DamageScenario'},
'calc_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'customheights': ('django.db.models.fields.FilePathField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'customlanduse': ('django.db.models.fields.FilePathField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'customlandusegeoimage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.GeoImage']", 'null': 'True', 'blank': 'True'}),
'damagetable': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scenario_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'lizard_damage.geoimage': {
'Meta': {'object_name': 'GeoImage'},
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.riskresult': {
'Meta': {'object_name': 'RiskResult'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}),
'zip_risk': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.roads': {
'Meta': {'object_name': 'Roads', 'db_table': "u'data_roads'"},
'gid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'gridcode': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'typeinfr_1': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'typeweg': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
},
'lizard_damage.unit': {
'Meta': {'object_name': 'Unit'},
'factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_damage']
|
lizardsystem/lizard-damage
|
lizard_damage/migrations/0012_auto__add_field_damagescenario_customlandusegeoimage.py
|
Python
|
gpl-3.0
| 9,306 | 0.007629 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tully.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
evonove/urt-tully
|
django-tully/manage.py
|
Python
|
mit
| 803 | 0 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Wed May 25 13:43:28 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 752)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/podbicon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.statusBarLabel = QtGui.QLabel(self.centralwidget)
self.statusBarLabel.setGeometry(QtCore.QRect(0, 690, 801, 20))
self.statusBarLabel.setFrameShape(QtGui.QFrame.StyledPanel)
self.statusBarLabel.setFrameShadow(QtGui.QFrame.Sunken)
self.statusBarLabel.setText(_fromUtf8(""))
self.statusBarLabel.setObjectName(_fromUtf8("statusBarLabel"))
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 801, 31))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.clearToolButton = QtGui.QToolButton(self.frame)
self.clearToolButton.setGeometry(QtCore.QRect(90, 0, 32, 32))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/clear.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.clearToolButton.setIcon(icon1)
self.clearToolButton.setIconSize(QtCore.QSize(32, 32))
self.clearToolButton.setObjectName(_fromUtf8("clearToolButton"))
self.saveToolButton = QtGui.QToolButton(self.frame)
self.saveToolButton.setGeometry(QtCore.QRect(60, 0, 32, 32))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.saveToolButton.setIcon(icon2)
self.saveToolButton.setIconSize(QtCore.QSize(32, 32))
self.saveToolButton.setObjectName(_fromUtf8("saveToolButton"))
self.openToolButton = QtGui.QToolButton(self.frame)
self.openToolButton.setGeometry(QtCore.QRect(30, 0, 32, 32))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/open.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.openToolButton.setIcon(icon3)
self.openToolButton.setIconSize(QtCore.QSize(32, 32))
self.openToolButton.setObjectName(_fromUtf8("openToolButton"))
self.newToolButton = QtGui.QToolButton(self.frame)
self.newToolButton.setGeometry(QtCore.QRect(0, 0, 32, 32))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newToolButton.setIcon(icon4)
self.newToolButton.setIconSize(QtCore.QSize(32, 32))
self.newToolButton.setObjectName(_fromUtf8("newToolButton"))
self.printToolButton = QtGui.QToolButton(self.frame)
self.printToolButton.setGeometry(QtCore.QRect(770, 0, 32, 32))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/print.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.printToolButton.setIcon(icon5)
self.printToolButton.setIconSize(QtCore.QSize(32, 32))
self.printToolButton.setObjectName(_fromUtf8("printToolButton"))
self.exportToolButton = QtGui.QToolButton(self.frame)
self.exportToolButton.setGeometry(QtCore.QRect(740, 0, 32, 32))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/exportpdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.exportToolButton.setIcon(icon6)
self.exportToolButton.setIconSize(QtCore.QSize(32, 32))
self.exportToolButton.setObjectName(_fromUtf8("exportToolButton"))
self.orderDetailsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.orderDetailsGroupBox.setGeometry(QtCore.QRect(0, 40, 801, 71))
self.orderDetailsGroupBox.setObjectName(_fromUtf8("orderDetailsGroupBox"))
self.layoutWidget = QtGui.QWidget(self.orderDetailsGroupBox)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 781, 48))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.orderNumberLabel = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.orderNumberLabel.setFont(font)
self.orderNumberLabel.setText(_fromUtf8(""))
self.orderNumberLabel.setObjectName(_fromUtf8("orderNumberLabel"))
self.gridLayout.addWidget(self.orderNumberLabel, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.layoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 2, 1, 1)
self.orderDateEdit = QtGui.QDateEdit(self.layoutWidget)
self.orderDateEdit.setObjectName(_fromUtf8("orderDateEdit"))
self.gridLayout.addWidget(self.orderDateEdit, 0, 3, 1, 1)
self.label_5 = QtGui.QLabel(self.layoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 0, 4, 1, 1)
self.paymentTermsComboBox = QtGui.QComboBox(self.layoutWidget)
self.paymentTermsComboBox.setObjectName(_fromUtf8("paymentTermsComboBox"))
self.gridLayout.addWidget(self.paymentTermsComboBox, 0, 5, 1, 1)
self.label_18 = QtGui.QLabel(self.layoutWidget)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout.addWidget(self.label_18, 1, 0, 1, 1)
self.projectComboBox = QtGui.QComboBox(self.layoutWidget)
self.projectComboBox.setObjectName(_fromUtf8("projectComboBox"))
self.gridLayout.addWidget(self.projectComboBox, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.layoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 1, 2, 1, 1)
self.orderStatusComboBox = QtGui.QComboBox(self.layoutWidget)
self.orderStatusComboBox.setObjectName(_fromUtf8("orderStatusComboBox"))
self.gridLayout.addWidget(self.orderStatusComboBox, 1, 3, 1, 1)
self.taxRateLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateLabel.setObjectName(_fromUtf8("taxRateLabel"))
self.gridLayout.addWidget(self.taxRateLabel, 1, 4, 1, 1)
self.taxRateValueLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateValueLabel.setText(_fromUtf8(""))
self.taxRateValueLabel.setObjectName(_fromUtf8("taxRateValueLabel"))
self.gridLayout.addWidget(self.taxRateValueLabel, 1, 5, 1, 1)
self.supplierGroupBox = QtGui.QGroupBox(self.centralwidget)
self.supplierGroupBox.setGeometry(QtCore.QRect(0, 120, 801, 80))
self.supplierGroupBox.setObjectName(_fromUtf8("supplierGroupBox"))
self.layoutWidget1 = QtGui.QWidget(self.supplierGroupBox)
self.layoutWidget1.setGeometry(QtCore.QRect(280, 12, 512, 62))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.gridLayout_2 = QtGui.QGridLayout(self.layoutWidget1)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_11 = QtGui.QLabel(self.layoutWidget1)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 0, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.layoutWidget1)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_2.addWidget(self.label_8, 0, 2, 1, 1)
self.supplierPhoneLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierPhoneLabel.setText(_fromUtf8(""))
self.supplierPhoneLabel.setObjectName(_fromUtf8("supplierPhoneLabel"))
self.gridLayout_2.addWidget(self.supplierPhoneLabel, 0, 3, 1, 1)
self.label_9 = QtGui.QLabel(self.layoutWidget1)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1)
self.supplierFaxLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierFaxLabel.setText(_fromUtf8(""))
self.supplierFaxLabel.setObjectName(_fromUtf8("supplierFaxLabel"))
self.gridLayout_2.addWidget(self.supplierFaxLabel, 1, 3, 1, 1)
self.label_7 = QtGui.QLabel(self.layoutWidget1)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_2.addWidget(self.label_7, 2, 0, 1, 1)
self.supplierContactLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierContactLabel.setText(_fromUtf8(""))
self.supplierContactLabel.setObjectName(_fromUtf8("supplierContactLabel"))
self.gridLayout_2.addWidget(self.supplierContactLabel, 2, 1, 1, 1)
self.label_10 = QtGui.QLabel(self.layoutWidget1)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 2, 2, 1, 1)
self.supplierEmailLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierEmailLabel.setText(_fromUtf8(""))
self.supplierEmailLabel.setObjectName(_fromUtf8("supplierEmailLabel"))
self.gridLayout_2.addWidget(self.supplierEmailLabel, 2, 3, 1, 1)
self.supplierAddressLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierAddressLabel.setText(_fromUtf8(""))
self.supplierAddressLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.supplierAddressLabel.setWordWrap(True)
self.supplierAddressLabel.setObjectName(_fromUtf8("supplierAddressLabel"))
self.gridLayout_2.addWidget(self.supplierAddressLabel, 0, 1, 2, 1)
self.gridLayout_2.setColumnMinimumWidth(0, 80)
self.gridLayout_2.setColumnMinimumWidth(1, 166)
self.gridLayout_2.setColumnMinimumWidth(2, 80)
self.gridLayout_2.setColumnMinimumWidth(3, 166)
self.gridLayout_2.setRowMinimumHeight(0, 16)
self.gridLayout_2.setRowMinimumHeight(1, 16)
self.gridLayout_2.setRowMinimumHeight(2, 16)
self.supplierComboBox = QtGui.QComboBox(self.supplierGroupBox)
self.supplierComboBox.setGeometry(QtCore.QRect(11, 18, 256, 20))
self.supplierComboBox.setObjectName(_fromUtf8("supplierComboBox"))
self.productsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.productsGroupBox.setGeometry(QtCore.QRect(0, 210, 801, 331))
self.productsGroupBox.setObjectName(_fromUtf8("productsGroupBox"))
self.productsTableView = QtGui.QTableView(self.productsGroupBox)
self.productsTableView.setGeometry(QtCore.QRect(10, 20, 781, 241))
self.productsTableView.setObjectName(_fromUtf8("productsTableView"))
self.layoutWidget2 = QtGui.QWidget(self.productsGroupBox)
self.layoutWidget2.setGeometry(QtCore.QRect(590, 270, 201, 53))
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.gridLayout_3 = QtGui.QGridLayout(self.layoutWidget2)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.totalExcludingTaxLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalExcludingTaxLabel.setFont(font)
self.totalExcludingTaxLabel.setObjectName(_fromUtf8("totalExcludingTaxLabel"))
self.gridLayout_3.addWidget(self.totalExcludingTaxLabel, 0, 0, 1, 1)
self.totalExcludingTaxResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalExcludingTaxResultLabel.setFont(font)
self.totalExcludingTaxResultLabel.setText(_fromUtf8(""))
self.totalExcludingTaxResultLabel.setObjectName(_fromUtf8("totalExcludingTaxResultLabel"))
self.gridLayout_3.addWidget(self.totalExcludingTaxResultLabel, 0, 1, 1, 1)
self.totalTaxLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalTaxLabel.setFont(font)
self.totalTaxLabel.setObjectName(_fromUtf8("totalTaxLabel"))
self.gridLayout_3.addWidget(self.totalTaxLabel, 1, 0, 1, 1)
self.totalTaxResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalTaxResultLabel.setFont(font)
self.totalTaxResultLabel.setText(_fromUtf8(""))
self.totalTaxResultLabel.setObjectName(_fromUtf8("totalTaxResultLabel"))
self.gridLayout_3.addWidget(self.totalTaxResultLabel, 1, 1, 1, 1)
self.totalLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalLabel.setFont(font)
self.totalLabel.setObjectName(_fromUtf8("totalLabel"))
self.gridLayout_3.addWidget(self.totalLabel, 2, 0, 1, 1)
self.totalResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalResultLabel.setFont(font)
self.totalResultLabel.setText(_fromUtf8(""))
self.totalResultLabel.setObjectName(_fromUtf8("totalResultLabel"))
self.gridLayout_3.addWidget(self.totalResultLabel, 2, 1, 1, 1)
self.deliveryGroupBox = QtGui.QGroupBox(self.centralwidget)
self.deliveryGroupBox.setGeometry(QtCore.QRect(0, 550, 801, 131))
self.deliveryGroupBox.setObjectName(_fromUtf8("deliveryGroupBox"))
self.layoutWidget3 = QtGui.QWidget(self.deliveryGroupBox)
self.layoutWidget3.setGeometry(QtCore.QRect(10, 20, 781, 99))
self.layoutWidget3.setObjectName(_fromUtf8("layoutWidget3"))
self.gridLayout_4 = QtGui.QGridLayout(self.layoutWidget3)
self.gridLayout_4.setMargin(0)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_16 = QtGui.QLabel(self.layoutWidget3)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_4.addWidget(self.label_16, 0, 3, 1, 1)
self.label_14 = QtGui.QLabel(self.layoutWidget3)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_4.addWidget(self.label_14, 0, 1, 1, 1)
self.gpsCoordinatesLineEdit = QtGui.QLineEdit(self.layoutWidget3)
self.gpsCoordinatesLineEdit.setObjectName(_fromUtf8("gpsCoordinatesLineEdit"))
self.gridLayout_4.addWidget(self.gpsCoordinatesLineEdit, 3, 2, 1, 1)
self.notesPlainTextEdit = QtGui.QPlainTextEdit(self.layoutWidget3)
self.notesPlainTextEdit.setPlainText(_fromUtf8(""))
self.notesPlainTextEdit.setObjectName(_fromUtf8("notesPlainTextEdit"))
self.gridLayout_4.addWidget(self.notesPlainTextEdit, 0, 4, 4, 1)
self.deliveryAddressPlainTextEdit = QtGui.QPlainTextEdit(self.layoutWidget3)
self.deliveryAddressPlainTextEdit.setObjectName(_fromUtf8("deliveryAddressPlainTextEdit"))
self.gridLayout_4.addWidget(self.deliveryAddressPlainTextEdit, 0, 2, 3, 1)
self.label_17 = QtGui.QLabel(self.layoutWidget3)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_4.addWidget(self.label_17, 3, 1, 1, 1)
self.label_15 = QtGui.QLabel(self.layoutWidget3)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout_4.addWidget(self.label_15, 0, 0, 1, 1)
self.deliveryDateEdit = QtGui.QDateEdit(self.layoutWidget3)
self.deliveryDateEdit.setObjectName(_fromUtf8("deliveryDateEdit"))
self.gridLayout_4.addWidget(self.deliveryDateEdit, 1, 0, 1, 1)
self.gridLayout_4.setColumnMinimumWidth(0, 125)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName(_fromUtf8("menuView"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionNewPurchaseOrder = QtGui.QAction(MainWindow)
self.actionNewPurchaseOrder.setObjectName(_fromUtf8("actionNewPurchaseOrder"))
self.actionView_Purchase_Order = QtGui.QAction(MainWindow)
self.actionView_Purchase_Order.setObjectName(_fromUtf8("actionView_Purchase_Order"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionExit_2 = QtGui.QAction(MainWindow)
self.actionExit_2.setObjectName(_fromUtf8("actionExit_2"))
self.actionPurchase_Order = QtGui.QAction(MainWindow)
self.actionPurchase_Order.setObjectName(_fromUtf8("actionPurchase_Order"))
self.actionViewReports = QtGui.QAction(MainWindow)
self.actionViewReports.setObjectName(_fromUtf8("actionViewReports"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionOpenPurchaseOrder = QtGui.QAction(MainWindow)
self.actionOpenPurchaseOrder.setObjectName(_fromUtf8("actionOpenPurchaseOrder"))
self.actionCopyPurchaseOrder = QtGui.QAction(MainWindow)
self.actionCopyPurchaseOrder.setObjectName(_fromUtf8("actionCopyPurchaseOrder"))
self.actionClearPurchaseOrder = QtGui.QAction(MainWindow)
self.actionClearPurchaseOrder.setObjectName(_fromUtf8("actionClearPurchaseOrder"))
self.actionPrintPurchaseOrder = QtGui.QAction(MainWindow)
self.actionPrintPurchaseOrder.setObjectName(_fromUtf8("actionPrintPurchaseOrder"))
self.actionEditProjects = QtGui.QAction(MainWindow)
self.actionEditProjects.setObjectName(_fromUtf8("actionEditProjects"))
self.actionEditSuppliers = QtGui.QAction(MainWindow)
self.actionEditSuppliers.setObjectName(_fromUtf8("actionEditSuppliers"))
self.actionEditProducts = QtGui.QAction(MainWindow)
self.actionEditProducts.setObjectName(_fromUtf8("actionEditProducts"))
self.actionSavePurchaseOrder = QtGui.QAction(MainWindow)
self.actionSavePurchaseOrder.setObjectName(_fromUtf8("actionSavePurchaseOrder"))
self.actionExportPurchaseOrder = QtGui.QAction(MainWindow)
self.actionExportPurchaseOrder.setObjectName(_fromUtf8("actionExportPurchaseOrder"))
self.actionEditConfiguration = QtGui.QAction(MainWindow)
self.actionEditConfiguration.setObjectName(_fromUtf8("actionEditConfiguration"))
self.menuFile.addAction(self.actionNewPurchaseOrder)
self.menuFile.addAction(self.actionOpenPurchaseOrder)
self.menuFile.addAction(self.actionSavePurchaseOrder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExportPurchaseOrder)
self.menuFile.addAction(self.actionPrintPurchaseOrder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit_2)
self.menuView.addAction(self.actionViewReports)
self.menuHelp.addAction(self.actionAbout)
self.menuEdit.addAction(self.actionClearPurchaseOrder)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditProjects)
self.menuEdit.addAction(self.actionEditSuppliers)
self.menuEdit.addAction(self.actionEditProducts)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditConfiguration)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.label_3.setBuddy(self.orderDateEdit)
self.label_5.setBuddy(self.paymentTermsComboBox)
self.label_18.setBuddy(self.orderStatusComboBox)
self.label_4.setBuddy(self.orderStatusComboBox)
self.label_16.setBuddy(self.notesPlainTextEdit)
self.label_14.setBuddy(self.deliveryAddressPlainTextEdit)
self.label_17.setBuddy(self.gpsCoordinatesLineEdit)
self.label_15.setBuddy(self.deliveryDateEdit)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.newToolButton, self.projectComboBox)
MainWindow.setTabOrder(self.projectComboBox, self.orderDateEdit)
MainWindow.setTabOrder(self.orderDateEdit, self.orderStatusComboBox)
MainWindow.setTabOrder(self.orderStatusComboBox, self.paymentTermsComboBox)
MainWindow.setTabOrder(self.paymentTermsComboBox, self.supplierComboBox)
MainWindow.setTabOrder(self.supplierComboBox, self.productsTableView)
MainWindow.setTabOrder(self.productsTableView, self.deliveryDateEdit)
MainWindow.setTabOrder(self.deliveryDateEdit, self.deliveryAddressPlainTextEdit)
MainWindow.setTabOrder(self.deliveryAddressPlainTextEdit, self.gpsCoordinatesLineEdit)
MainWindow.setTabOrder(self.gpsCoordinatesLineEdit, self.notesPlainTextEdit)
MainWindow.setTabOrder(self.notesPlainTextEdit, self.saveToolButton)
MainWindow.setTabOrder(self.saveToolButton, self.printToolButton)
MainWindow.setTabOrder(self.printToolButton, self.openToolButton)
MainWindow.setTabOrder(self.openToolButton, self.clearToolButton)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.clearToolButton.setToolTip(_translate("MainWindow", "Clear data", None))
self.clearToolButton.setText(_translate("MainWindow", "...", None))
self.saveToolButton.setToolTip(_translate("MainWindow", "Save purchase order", None))
self.saveToolButton.setText(_translate("MainWindow", "...", None))
self.openToolButton.setToolTip(_translate("MainWindow", "Open an existing purchase order", None))
self.openToolButton.setText(_translate("MainWindow", "...", None))
self.newToolButton.setToolTip(_translate("MainWindow", "Create a new purchase order", None))
self.newToolButton.setText(_translate("MainWindow", "...", None))
self.printToolButton.setToolTip(_translate("MainWindow", "Print purchase order", None))
self.printToolButton.setText(_translate("MainWindow", "...", None))
self.exportToolButton.setToolTip(_translate("MainWindow", "Export purchase order to PDF file", None))
self.exportToolButton.setText(_translate("MainWindow", "...", None))
self.orderDetailsGroupBox.setTitle(_translate("MainWindow", "Order Details", None))
self.label_2.setText(_translate("MainWindow", "Order Number:", None))
self.label_3.setText(_translate("MainWindow", "Order Date:", None))
self.label_5.setText(_translate("MainWindow", "Payment Terms:", None))
self.label_18.setText(_translate("MainWindow", "Project:", None))
self.label_4.setText(_translate("MainWindow", "Order Status:", None))
self.taxRateLabel.setText(_translate("MainWindow", "Tax Rate:", None))
self.supplierGroupBox.setTitle(_translate("MainWindow", "Supplier", None))
self.label_11.setText(_translate("MainWindow", "Address:", None))
self.label_8.setText(_translate("MainWindow", "Phone Number:", None))
self.label_9.setText(_translate("MainWindow", "Fax Number:", None))
self.label_7.setText(_translate("MainWindow", "Contact Person:", None))
self.label_10.setText(_translate("MainWindow", "Email:", None))
self.productsGroupBox.setTitle(_translate("MainWindow", "Products", None))
self.totalExcludingTaxLabel.setText(_translate("MainWindow", "Total Excluding Tax:", None))
self.totalTaxLabel.setText(_translate("MainWindow", "Total Tax:", None))
self.totalLabel.setText(_translate("MainWindow", "Total:", None))
self.deliveryGroupBox.setTitle(_translate("MainWindow", "Delivery", None))
self.label_16.setText(_translate("MainWindow", "Notes:", None))
self.label_14.setText(_translate("MainWindow", "Delivery Address:", None))
self.label_17.setText(_translate("MainWindow", "GPS Coordinates:", None))
self.label_15.setText(_translate("MainWindow", "Delivery Date:", None))
self.menuFile.setTitle(_translate("MainWindow", "&File", None))
self.menuView.setTitle(_translate("MainWindow", "&View", None))
self.menuHelp.setTitle(_translate("MainWindow", "&Help", None))
self.menuEdit.setTitle(_translate("MainWindow", "&Edit", None))
self.actionNewPurchaseOrder.setText(_translate("MainWindow", "Create &New Purchase Order", None))
self.actionView_Purchase_Order.setText(_translate("MainWindow", "View Purchase Order...", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
self.actionExit_2.setText(_translate("MainWindow", "E&xit", None))
self.actionPurchase_Order.setText(_translate("MainWindow", "Purchase Order...", None))
self.actionViewReports.setText(_translate("MainWindow", "View &Reports...", None))
self.actionAbout.setText(_translate("MainWindow", "&About", None))
self.actionOpenPurchaseOrder.setText(_translate("MainWindow", "&Open Purchase Order...", None))
self.actionCopyPurchaseOrder.setText(_translate("MainWindow", "&Copy Purchase Order", None))
self.actionClearPurchaseOrder.setText(_translate("MainWindow", "C&lear Purchase Order", None))
self.actionPrintPurchaseOrder.setText(_translate("MainWindow", "&Print Purchase Order...", None))
self.actionEditProjects.setText(_translate("MainWindow", "Edit Projects...", None))
self.actionEditSuppliers.setText(_translate("MainWindow", "Edit Suppliers...", None))
self.actionEditProducts.setText(_translate("MainWindow", "Edit Products...", None))
self.actionSavePurchaseOrder.setText(_translate("MainWindow", "Save Purchase Order", None))
self.actionExportPurchaseOrder.setText(_translate("MainWindow", "Export Purchase Order...", None))
self.actionEditConfiguration.setText(_translate("MainWindow", "Configuration Wizard...", None))
import resources_rc
|
psvnl/podb
|
ui_mainwindow.py
|
Python
|
gpl-3.0
| 28,313 | 0.002508 |
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from getting_started import main
def test_main(cloud_config, capsys):
main(cloud_config.project)
out, _ = capsys.readouterr()
assert re.search(re.compile(
r'Query Results:.hamlet', re.DOTALL), out)
|
clarko1/Cramd
|
bigquery/api/getting_started_test.py
|
Python
|
apache-2.0
| 808 | 0 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.models.db.rule_enforcement import rule_enforcement_access
from st2common.persistence.base import Access
class RuleEnforcement(Access):
impl = rule_enforcement_access
@classmethod
def _get_impl(cls):
return cls.impl
|
StackStorm/st2
|
st2common/st2common/persistence/rule_enforcement.py
|
Python
|
apache-2.0
| 920 | 0 |
"""
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore():
import botocore.session
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path)
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider)
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def get_testenv():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
def assert_samelines(testcase, text1, text2, msg=None):
"""Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
|
umrashrf/scrapy
|
scrapy/utils/test.py
|
Python
|
bsd-3-clause
| 3,020 | 0.002649 |
## @package csnStandardModuleProject
# Definition of the methods used for project configuration.
# This should be the only CSnake import in a project configuration.
import csnUtility
import csnProject
import csnBuild
import os.path
import inspect
from csnProject import GenericProject
class StandardModuleProject(GenericProject):
""" GenericProject with applications and modules in specific folders. """
def __init__(self, _name, _type, _sourceRootFolder = None, _categories = None):
if _sourceRootFolder is None:
filename = csnProject.FindFilename(1)
dirname = os.path.dirname(filename)
_sourceRootFolder = csnUtility.NormalizePath(dirname, _correctCase = False)
GenericProject.__init__(self, _name=_name, _type=_type, _sourceRootFolder=_sourceRootFolder, _categories=_categories, _context=csnProject.globalCurrentContext)
self.applicationsProject = None
def AddLibraryModules(self, _libModules):
"""
Adds source files (anything matching *.c??) and public include folders to self, using a set of libmodules.
It is assumed that the root folder of self has a subfolder called libmodules. The subfolders of libmodules should
contain a subfolder called src (e.g. for mymodule, this would be libmodules/mymodule/src).
If the src folder has a subfolder called 'stub', it is also added to the source tree.
_libModules - a list of subfolders of the libmodules folder that should be 'added' to self.
"""
# add sources
sourceRootFolder = self.GetSourceRootFolder()
includeFileExtensions = csnUtility.GetIncludeFileExtensions()
sourceFileExtensions = csnUtility.GetSourceFileExtensions()
for libModule in _libModules:
for stub in ("/stub", ""):
srcFolder = "libmodules/%s/src%s" % (libModule, stub)
srcFolderAbs = "%s/%s" % (sourceRootFolder, srcFolder)
if( os.path.exists(srcFolderAbs) ):
self.AddIncludeFolders([srcFolder])
for extension in sourceFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for libModule in _libModules:
for stub in ("/stub", ""):
includeFolder = "libmodules/%s/include%s" % (libModule, stub)
includeFolderAbs = "%s/%s" % (sourceRootFolder, includeFolder)
if( os.path.exists(includeFolderAbs) ):
self.AddIncludeFolders([includeFolder])
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (includeFolder, extension)], _checkExists = 0)
def AddApplications(self, _modules, _pch="", _applicationDependenciesList=None, _holderName=None, _properties = []):
"""
Creates extra CSnake projects, each project building one application in the 'Applications' subfolder of the current project.
_modules - List of the subfolders within the 'Applications' subfolder that must be scanned for applications.
_pch - If not "", this is the include file used to generate a precompiled header for each application.
"""
dependencies = [self]
if not _applicationDependenciesList is None:
dependencies.extend(_applicationDependenciesList)
if _holderName is None:
_holderName = "%sApplications" % self.name
csnProject.globalCurrentContext.SetSuperSubCategory("Applications", _holderName)
if self.applicationsProject is None:
self.applicationsProject = csnBuild.Project(self.name + "Applications", "container", _sourceRootFolder = self.GetSourceRootFolder(), _categories = [_holderName])
#self.applicationsProject.AddSources([csnUtility.GetDummyCppFilename()], _sourceGroup = "CSnakeGeneratedFiles")
self.applicationsProject.AddProjects([self])
self.AddProjects([self.applicationsProject], _dependency = 0)
# look for an 'applications' or 'Applications' folder
_modulesFolder = "%s/applications" % self.GetSourceRootFolder()
if not os.path.exists(_modulesFolder):
_modulesFolder = "%s/Applications" % self.GetSourceRootFolder()
self.__AddApplications(self.applicationsProject, dependencies, _modules, _modulesFolder, _pch, _holderName, _properties)
def __AddApplications(self, _holderProject, _applicationDependenciesList, _modules, _modulesFolder, _pch = "", _holderName=None, _properties = []):
"""
Creates application projects and adds them to _holderProject (using _holderProject.AddProject). The holder
project does not depend on these application projects.
It is assumed that _modules is a list containing subfolders of _modulesFolder.
Each subfolder in _modules should contain source files (.cpp, .cxx or .cc), where each source file corresponds to a single application.
Hence, each source file is used to create a new application project. For example, assuming that the _modulesFolder
is called 'Applications', the file 'Applications/Small/Tiny.cpp' will be used to build the 'Tiny' application.
_applicationDependenciesList - List of projects that each new application project is dependent on.
_modulesFolder - Folder containing subfolders with applications.
_modules = List of subfolders of _modulesFolder that should be processed.
_pch - If not "", this is the C++ include file which is used for building a precompiled header file for each application.
"""
for module in _modules:
moduleFolder = "%s/%s" % (_modulesFolder, module)
sourceFiles = []
headerFiles = []
for extension in csnUtility.GetSourceFileExtensions():
sourceFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for extension in csnUtility.GetIncludeFileExtensions():
headerFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for sourceFile in sourceFiles:
if os.path.isdir(sourceFile):
continue
name = os.path.splitext( os.path.basename(sourceFile) )[0]
name = name.replace(' ', '_')
if _holderName is None:
_holderName = _holderProject.name
app = csnBuild.Project("%s_%s" % (_holderName, name), "executable", _sourceRootFolder = _holderProject.GetSourceRootFolder())
app.AddIncludeFolders([moduleFolder])
app.AddProjects(_applicationDependenciesList)
app.AddSources([sourceFile])
app.AddProperties( _properties )
# add header files so that they appear in visual studio
app.AddSources(headerFiles)
if( _pch != "" ):
app.SetPrecompiledHeader(_pch)
_holderProject.AddProjects([app])
|
csnake-org/CSnake
|
src/csnStandardModuleProject.py
|
Python
|
bsd-3-clause
| 7,313 | 0.012033 |
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mapproxy.util.collections import LRU, ImmutableDictList
from nose.tools import eq_, raises
class TestLRU(object):
@raises(KeyError)
def test_missing_key(self):
lru = LRU(10)
lru['foo']
def test_contains(self):
lru = LRU(10)
lru['foo1'] = 1
assert 'foo1' in lru
assert 'foo2' not in lru
def test_repr(self):
lru = LRU(10)
lru['foo1'] = 1
assert 'size=10' in repr(lru)
assert 'foo1' in repr(lru)
def test_getitem(self):
lru = LRU(10)
lru['foo1'] = 1
lru['foo2'] = 2
eq_(lru['foo1'], 1)
eq_(lru['foo2'], 2)
def test_get(self):
lru = LRU(10)
lru['foo1'] = 1
eq_(lru.get('foo1'), 1)
eq_(lru.get('foo1', 2), 1)
def test_get_default(self):
lru = LRU(10)
lru['foo1'] = 1
eq_(lru.get('foo2'), None)
eq_(lru.get('foo2', 2), 2)
def test_delitem(self):
lru = LRU(10)
lru['foo1'] = 1
assert 'foo1' in lru
del lru['foo1']
assert 'foo1' not in lru
def test_empty(self):
lru = LRU(10)
assert bool(lru) == False
lru['foo1'] = '1'
assert bool(lru) == True
def test_setitem_overflow(self):
lru = LRU(2)
lru['foo1'] = 1
lru['foo2'] = 2
lru['foo3'] = 3
assert 'foo1' not in lru
assert 'foo2' in lru
assert 'foo3' in lru
def test_length(self):
lru = LRU(2)
eq_(len(lru), 0)
lru['foo1'] = 1
eq_(len(lru), 1)
lru['foo2'] = 2
eq_(len(lru), 2)
lru['foo3'] = 3
eq_(len(lru), 2)
del lru['foo3']
eq_(len(lru), 1)
class TestImmutableDictList(object):
def test_named(self):
res = ImmutableDictList([('one', 10), ('two', 5), ('three', 3)])
assert res[0] == 10
assert res[2] == 3
assert res['one'] == 10
assert res['three'] == 3
assert len(res) == 3
def test_named_iteritems(self):
res = ImmutableDictList([('one', 10), ('two', 5), ('three', 3)])
itr = res.iteritems()
eq_(next(itr), ('one', 10))
eq_(next(itr), ('two', 5))
eq_(next(itr), ('three', 3))
try:
next(itr)
except StopIteration:
pass
else:
assert False, 'StopIteration expected'
|
procrastinatio/mapproxy
|
mapproxy/test/unit/test_collections.py
|
Python
|
apache-2.0
| 3,064 | 0.001305 |
#!/usr/bin/env python
#########################################
#
# fasta2tax.py
#
########################################
import sys,os
import argparse
import pymysql as MySQLdb
import json
py_pipeline_path = os.path.expanduser('~/programming/py_mbl_sequencing_pipeline')
#my $rdpFile = "$inputfile.rdp";
print "rdp file: start\n";
#my $rdpFile = dirname($inputfile)."/$project--$dataset.fa.rdp";
rdpFile = inputfile+".rdp";
#my $rdpFile = "$project--$dataset.fa.rdp";
print "rdp file: rdpFile\n";
loadFile1 = inputfile+".load1"
loadFile2 = inputfile+".load2"
outFile = inputfile+".rdpout"
logFile = inputfile+".rdplog"
# $logFile => /usr/local/www/vamps/tmp/fasta2tax.log
if DEBUG:
print "DEBUG: Invoked with arguments (post processing):\n"
print "DEBUG: user: user\n"
print "DEBUG: inputfile: inputfile\n"
print "DEBUG: project: project\n"
print "DEBUG: dataset: dataset\n"
print "DEBUG: path_to_apps: path_to_apps\n"
print "DEBUG: database: database\n"
print "DEBUG: table1: table1\n"
print "DEBUG: table2: table2\n"
print "DEBUG: db_user: db_user\n"
print "DEBUG: db_password: db_password\n"
print "DEBUG: db_hostname: db_hostname\n"
#######################################
#
# Do sanity checking for presence of
# values from argument processing...
#
#######################################
#######################################
#
# Run RDP and rdp_file_creator...
#
#######################################
def run(project):
path_to_rdp = py_pipeline_path+"/bin/rdp"
print path_to_rdp
rdpCmd = path_to_rdp+' ' +inputfile+' '+rdpFile
print "Preparing to execute RDP Command: rdpCmd\n";
rdpCmdOutput = subprocess.check_output(rdpCmd, shell=True)
#my $rdpCheckCmd = "$path_to_apps/rdp_checker -q -log $logFile -b 80 -project \"$project\" -dataset \"$dataset\" -f1 $loadFile1 -f2 $loadFile2 $rdpFile";
rdpCheckCmd = py_pipeline_path+"/bin/rdp_file_creator -s database -q -log logFile -b 80 -project \"$project\" -dataset \"$dataset\" -f1 $loadFile1 -f2 $loadFile2 $rdpFile";
rdpCheckOutput = subprocess.check_output(rdpCheckCmd, shell=True)
# $DEBUG && print "DEBUG: rdp_file_creator exited with result code: $rdpCheckExitCode<br><br>\n";
# if ($DEBUG) {
# my @rdpCheckOutput_lines = split /\n/, $rdpCheckOutput;
# foreach my $output_line (@rdpCheckOutput_lines) {
# print "DEBUG: $output_line<br>\n";
# }
# }
# my $rdpCheckExitString;
# if ($rdpCheckExitCode == 0) {
# $rdpCheckExitString = "0";
# } elsif ($rdpCheckExitCode == 253) {
# $rdpCheckExitString = "RDP boot score value is not valid.";
# } elsif ($rdpCheckExitCode == 254) {
# $rdpCheckExitString = "Taxonomy file is not valid.";
# } elsif ($rdpCheckExitCode == 255) {
# $rdpCheckExitString = "Internal error: Could not locate taxonomy file.";
# } else {
# $rdpCheckExitString = "Unknown error.";
# }
#
# if ($rdpCheckExitCode != 0) {
# print "Error performing RDP taxonomic checks: $rdpCheckExitString. Data has not been uploaded. Project=\"$project\", Dataset=\"$dataset\", User name=\"$user\"\n";
# exit $rdpCheckExitCode;
# }
#######################################
#
# Load the final taxonomy into the tables specified in the @tables array...
# It would be really nice if we could roll this back on failure.
#
#######################################
# my $dsn = "dbi:mysql:$database:$db_hostname";
# #$DEBUG && print "DEBUG: Connecting to database\n$dsn\n";
#
# my $dbh = DBI->connect($dsn, $db_user, $db_password) or die "Unable to connect to $database database\n";
#
# if ($use_transactions) {
# # Encapsulate the changes to these tables in a transaction...
# my $query = "START TRANSACTION";
# my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n";
# $handle->execute or die "Unable to execute query: $query\n";
# }
#
# my %load_files = ($table1 => $loadFile1, $table2 => $loadFile2);
# foreach (keys %load_files) {
# # Get a table...
# # Table1 = vamps_data_cube_uploads, Table2 = vamps_junk_data_cube_pipe;
# my $table = $_;
#
# # Clear out the old data and replace with the new
# #$DEBUG && print "DEBUG: Removing old project/dataset records from table $dsn.$table...\n";
# my $cleanQuery = "delete from $table where project='" . $project ."' and dataset = '" . $dataset . "'";
# #$DEBUG && print "DEBUG: Preparing query: \"$cleanQuery\"...\n";
# my $clean_h = $dbh->prepare($cleanQuery) or die "Unable to prepare query: $cleanQuery\n";
# $clean_h->execute or die "Unable to execute query: $cleanQuery\n";
#
# # Add the new data into the table
# #$DEBUG && print "DEBUG: Loading final taxonomy into the table $dsn.$table...\n";
#
# # Set up the query to Load the data
# my $loadQuery = "load data local infile '" . $load_files{$table} . "' replace into table $table fields terminated by '\t' lines terminated by '\n'
# set classifier='RDP'";
#
# #$DEBUG && print "DEBUG: Preparing query: \"$loadQuery\"...\n";
#
# my $load_h = $dbh->prepare($loadQuery) or die "Unable to prepare query: $loadQuery\n";
#
# $load_h->execute or die "Unable to execute query: $loadQuery\n";
#
# if ($dbh->err) {
# if ($use_transactions) {
# # Encapsulate the changes to these tables in a transaction...
# my $query = "ROLLBACK";
# my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n";
# $handle->execute or die "Unable to execute query: $query\n";
# }
# print "Application Error: An error has occured while trying to load the data into the MySQL database. The following query was used: \"$loadQuery\".\n";
# print "The database engine reports the error as: \"".$dbh->errstr."\".\n";
# print "This is a fatal error. Exiting.\n";
# exit 1;
# }
# }
#
# if ($use_transactions) {
# # commit the transaction...
# my $query = "COMMIT";
# my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n";
# $handle->execute or die "Unable to execute query: $query\n";
# }
#$DEBUG && print "DEBUG: Cleaning out tmp files...\n";
# foreach my $i ($inputfile, $rdpFile, $loadFile1, $loadFile2, $logFile)
# {
# #my $rmErr = system("rm -f $i");
# }
#$DEBUG && print "DEBUG: Execution complete.\n";
#print "Done and clean from fasta2tax.pl<br>\n";
|
avoorhis/vamps-node.js
|
public/scripts/rdp/rdp_fasta2tax.py
|
Python
|
mit
| 6,320 | 0.024525 |
"""
Project additional elements
"""
from .menu_button import *
from .new_key_qframe import *
|
MindLoad/MonkeyManager
|
widgets/__init__.py
|
Python
|
gpl-3.0
| 94 | 0 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# This software is licensed as described in the README.rst and LICENSE
# files, which you should have received as part of this distribution.
import setuptools
# noinspection PyPep8Naming
from raspi_pir import __version__ as VERSION
DEPS = ['RPi.Sensor>=0.5.3']
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'Topic :: Home Automation',
'Topic :: System :: Hardware',
'Topic :: Terminals'
]
with open("README.rst", "r") as fp:
sensor_long_description = fp.read()
setuptools.setup(
name='RPi.PIR',
version=VERSION,
author="Richard von Kellner",
author_email="richard.kellner [at] gmail.com",
url="https://github.com/ricco386/RPi",
description='PIR sensor state monitor',
long_description=sensor_long_description,
license="MIT",
packages=setuptools.find_packages(),
classifiers=CLASSIFIERS,
install_requires=DEPS,
scripts=['bin/raspi-pir'],
include_package_data=True
)
|
ricco386/broadcaster
|
RPi.PIR/setup.py
|
Python
|
bsd-3-clause
| 1,382 | 0 |
#!/usr/bin/python
#requires the following:
#sudo apt-get install curl
#curl http://apt.wxwidgets.org/key.asc | apt-key add -
#sudo apt-get update
#sudo apt-get install python-wxgtk2.8 python-wxtools wx2.8-i18n
#sudo apt-get install python-gdata
import wx
import os
import sys
def pinger():
f = os.popen('ping -c 1 google.com')
y = ''
for x in f.readlines():
y += x
a = y.find('--- google.com ping statistics ---')
return a
#a = pinger()
"""abc = wx.ShowMessageDialog(None,-1,'No Internet connectoin found!. Will now exit','Error')
abc.ShowModal()
abc.Destroy()
self.Destroy()
return False"""
#uplist = ['115.com','2shared','4shared','Badongo','Data.hu','DepositFiles','divShare','dl.free.fr','Humyo','Mediafire*','Megaupload','Netload.in','Rapidshare*','Sendspace','Uploading.com','Usershare','x7.to','ZShare']
uplist = ['Megaupload','2shared','Mediafire*','ZShare']
uplist2 =['megaupload','2shared','mediafire','zshare']
class FinalFrame(wx.Frame):
def __init__(self):
pass
mupl = ''
tshl = ''
medl = ''
zshl = ''
def add(self,typ,string):
if typ == 0:
self.mupl += string + '\n\n'
elif typ == 1:
self.tshl += string + '\n\n'
elif typ == 2:
self.medl += string + '\n\n'
elif typ == 3:
self.zshl += string + '\n\n'
def doit(self):
self.display(self.mupl,self.tshl,self.medl,self.zshl)
self.Show()
def display(self,megaupload_links,tshared_links,mediafire_links,zshare_links):
wx.Frame.__init__(self,None,-1,'Upload Complete!',size=(600,550))
self.panel = wx.Panel(self)
wx.StaticText(self.panel,-1,'Your Upload has completed :) Here are your links:',pos = (30,30))
wx.StaticText(self.panel,-1,'Megaupload links:',pos=(30,80))
mupld_link_box = wx.TextCtrl(self.panel,-1,megaupload_links,size=(540,80),pos=(30,100),style=wx.TE_MULTILINE | wx.TE_READONLY)
wx.StaticText(self.panel,-1,'2shared links:',pos=(30,190))
tshrd_link_box = wx.TextCtrl(self.panel,-1,tshared_links,size=(540,80),pos=(30,210),style=wx.TE_MULTILINE | wx.TE_READONLY)
wx.StaticText(self.panel,-1,'Mediafire links:',pos=(30,300))
mfire_link_box = wx.TextCtrl(self.panel,-1,mediafire_links,size=(540,80),pos=(30,320),style=wx.TE_MULTILINE | wx.TE_READONLY)
wx.StaticText(self.panel,-1,'ZShare Links:',pos=(30,410))
zshre_link_box = wx.TextCtrl(self.panel,-1,zshare_links,size=(540,80),pos=(30,430),style=wx.TE_MULTILINE | wx.TE_READONLY)
class MyFrame(wx.Frame):
fframe = FinalFrame()
def __init__(self):
self.param = ''
self.check=0
self.args = sys.argv[1:]
if len(self.args)==0:
self.check=1
wx.Frame.__init__(self,None,-1,'Pshare',size=(600,330))
self.panel = wx.Panel(self)
wx.StaticText(self.panel,-1,'Welcome to the Plowshare Uploader GUI.\n\nThis app lets you upload any file to any of the supported file-sharing sites. To proceed, please select one (or more) of the uploading sites:',pos = (30,30), size = (540,70))
wx.StaticText(self.panel,-1,'Available Sites to upload:',pos = (30,160))
self.choice_box = wx.ListBox(self.panel,-1,(30,120),(540,100),uplist, wx.LB_EXTENDED | wx.LB_HSCROLL)
wx.StaticText(self.panel,-1,'*Upload to these sites may NOT work at the moment; developers are trying to fix the issues',pos=(30,225),size=(540,50))
if self.check==1:
self.button_browse_files = wx.Button(self.panel,-1,'Browse for files',pos=(420,270),size=(150,30))
self.button_upload = wx.Button(self.panel,-1,'Start Upload',pos=(30,270),size=(150,30))
self.button_login_mupload = wx.Button(self.panel,-1,'Login to Megaupload Account',pos=(190,270),size = (220,30))
self.Bind(wx.EVT_BUTTON,self.browsefiles,self.button_browse_files)
else:
self.button_upload = wx.Button(self.panel,-1,'Start Upload',pos=(30,270),size=(265,30))
self.button_login_mupload = wx.Button(self.panel,-1,'Login to Megaupload Account',pos=(305,270),size = (265,30))
self.Bind(wx.EVT_BUTTON,self.upload,self.button_upload)
self.Bind(wx.EVT_BUTTON,self.login_mega,self.button_login_mupload)
def upload(self,evt):
temp1 = len(self.args)
temp2 = len(self.choice_box.GetSelections())
if temp1==0:
nofile_dlg = wx.MessageDialog(None,'No files Chosen!\nChoose atleast 1 file','Error',wx.OK | wx.ICON_ERROR)
nofile_dlg.ShowModal()
nofile_dlg.Destroy()
return
if temp2==0:
nofile_dlg = wx.MessageDialog(None,'No Upload sites Chosen!\nChoose atleast 1 Upload Site','Error',wx.OK | wx.ICON_ERROR)
nofile_dlg.ShowModal()
nofile_dlg.Destroy()
return
self.udlg = wx.ProgressDialog('Processing Request','Wait while we upload your file(s)',maximum=60)
self.udlg.Update(1)
y = 0
temp2 = 30/temp1
val = 'bash ~/.plowshare/src/upload.sh '
for x in self.args:
val += '\"' + x + '\" '
y += temp2
self.udlg.Update(y)
y = 30
self.linkss = []
#print val
temp3 = self.choice_box.GetSelections()
#print temp3
for x in temp3:
temp4 = val
if uplist2[x] == 'megaupload':
temp4 += self.param
temp4 += uplist2[x]
#print temp4
file1=os.popen(temp4)
file1_lines = file1.readlines()
if len(file1_lines)==0:
err_dlg = wx.MessageDialog(None,'Upload Failed! Possible Reasons:\n1. No Internet connection\n2. Upload error (choose different upload\nsite in this case)','Error',wx.OK | wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
self.udlg.Update(60)
self.udlg.Destroy()
return;
for x2 in file1_lines:
ind = x2.find('(http:')
if ind != -1:
x2 = 'Link\n====================\n' + x2[0:ind] + '\n\nDelete_link\n====================\n' + x2[ind+1:]
self.fframe.add(x,x2)
y += temp2
self.udlg.Update(y)
self.fframe.doit()
self.udlg.Update(60)
self.udlg.Destroy()
##
self.panel.Destroy()
self.Destroy()
def login_mega(self,evt):
self.username = ''
self.password = ''
ubox = wx.TextEntryDialog(None,"Please Enter Username","UserName",'username')
if ubox.ShowModal()==wx.ID_OK:
self.username = ubox.GetValue()
ubox.Destroy()
ubox = wx.TextEntryDialog(None,'Please Enter Password','Password','********',wx.TE_PASSWORD | wx.OK | wx.CANCEL)
if ubox.ShowModal()==wx.ID_OK:
self.password = ubox.GetValue()
self.param = ' -a ' + self.username + ':' + self.password + ' '
#print '\n\n' + self.param + '\n\n'
ubox.Destroy()
def browsefiles(self,evt):
filed = wx.FileDialog(None,"Choose a file",style=wx.FD_MULTIPLE)
filed.ShowModal()
a = filed.GetPaths()
# print a
if len(a) > 0:
self.args = a
# print len(self.args)
filed.Destroy()
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame()
frame.Show()
return True
if __name__=='__main__':
app = MyApp(redirect=True)
app.MainLoop()
|
kernt/linuxtools
|
gnome3-shell/nautilus-scripts/Archiving/PlowShare-Upload.py
|
Python
|
gpl-3.0
| 6,622 | 0.056025 |
# for Python3 we need a fully qualified name import
from mappyscript._mappyscript import version, version_number, load, loads, dumps, create_request, load_map_from_params, Layer, convert_sld
|
geographika/mappyscript
|
mappyscript/__init__.py
|
Python
|
mit
| 190 | 0.010526 |
import os
import pytest
import numpy as np
from imageio import imread
def compare_2_images(validator_path, output_path):
val_abs_path = os.path.join(os.path.dirname(__file__), validator_path)
out_abs_path = os.path.join(os.path.dirname(__file__), output_path)
val_img = imread(val_abs_path, pilmode='RGB')
out_img = imread(out_abs_path, pilmode='RGB')
assert np.all(np.equal(val_img, out_img))
def clean_test_results(output_file_no_ext):
os.remove("tests/" + output_file_no_ext + "_probs.jpg")
os.remove("tests/" + output_file_no_ext + "_seg.jpg")
os.remove("tests/" + output_file_no_ext + "_seg_blended.jpg")
os.remove("tests/" + output_file_no_ext + "_seg_read.jpg")
def test_main_flip_ade20k(cli_args_ade):
from pspnet import main
main(cli_args_ade)
compare_2_images("ade20k_test_probs.jpg", "validators/ade20k_test_probs.jpg")
compare_2_images("ade20k_test_seg.jpg", "validators/ade20k_test_seg.jpg")
compare_2_images("ade20k_test_seg_read.jpg", "validators/ade20k_test_seg_read.jpg")
clean_test_results("ade20k_test")
@pytest.mark.skip
def test_main_flip_cityscapes(cli_args_cityscapes):
"""
TODO: Add images
:param cli_args_cityscapes:
:return:
"""
from pspnet import main
main(cli_args_cityscapes)
compare_2_images("cityscapes_test_probs.jpg", "validators/cityscapes_test_probs.jpg")
compare_2_images("cityscapes_test_seg.jpg", "validators/cityscapes_test_seg.jpg")
compare_2_images("cityscapes_test_seg_read.jpg", "validators/cityscapes_test_seg_read.jpg")
clean_test_results("cityscapes_test")
@pytest.mark.skip
def test_main_flip_voc(cli_args_voc):
"""
TODO: Add images
:param cli_args_voc:
:return:
"""
from pspnet import main
main(cli_args_voc)
compare_2_images("pascal_voc_test_probs.jpg", "validators/pascal_voc_test_probs.jpg")
compare_2_images("pascal_voc_test_seg.jpg", "validators/pascal_voc_test_seg.jpg")
compare_2_images("pascal_voc_test_seg_read.jpg", "validators/pascal_voc_test_seg_read.jpg")
clean_test_results("pascal_voc_test")
|
Vladkryvoruchko/PSPNet-Keras-tensorflow
|
tests/test_smoke.py
|
Python
|
mit
| 2,114 | 0.005676 |
from django.db import models
# from django.contrib.gis.geoip import GeoIP
#
# g = GeoIP()
# Create your models here.
class TempMail(models.Model):
mailfrom = models.EmailField()
mailsubj = models.CharField(max_length=20)
mailrcvd = models.DateTimeField()
mailhdrs = models.CharField()
class SavedMail(models.Model):
mailrcvd = models.DateTimeField()
mailhdrs = models.CharField()
organization = models.ForeignKey('Organization')
class Organization(models.Model):
emailsuffix = models.CharField(max_length=255)
class Follower(models.Model):
email = models.EmailField()
|
bitsoffreedom/baas
|
baas/boem/models.py
|
Python
|
gpl-2.0
| 607 | 0.008237 |
from django.contrib import admin
from api.models import *
admin.site.register(Question)
admin.site.register(Answer)
admin.site.register(Sighting)
admin.site.register(Picture)
admin.site.register(UserComment)
admin.site.register(ExpertComment)
admin.site.register(SightingFAQ)
|
Habitissimo/vespapp-web
|
api/admin.py
|
Python
|
gpl-3.0
| 278 | 0 |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from io import BytesIO
from devtools_testutils.aio import recorded_by_proxy_async
from azure.core.exceptions import ServiceRequestError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.v2_1.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models
from azure.ai.formrecognizer.aio import FormRecognizerClient
from azure.ai.formrecognizer import FormRecognizerApiVersion
from asynctestcase import AsyncFormRecognizerTest
from preparers import FormRecognizerPreparer
from preparers import GlobalClientPreparer as _GlobalClientPreparer
FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestIdDocumentsAsync(AsyncFormRecognizerTest):
def teardown(self):
self.sleep(4)
@pytest.mark.skip()
@FormRecognizerPreparer()
@recorded_by_proxy_async
async def test_identity_document_bad_endpoint(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key))
async with client:
poller = await client.begin_recognize_identity_documents(my_file)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_damaged_file_bytes_fails_autodetect_content_type(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = b"\x50\x44\x46\x55\x55\x55" # doesn't match any magic file numbers
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_damaged_file_bytes_io_fails_autodetect(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_passing_bad_content_type_param_passed(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
my_file,
content_type="application/jpeg"
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_auto_detect_unsupported_stream_content(self, **kwargs):
client = kwargs.pop("client")
with open(self.unsupported_content_py, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
my_file
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_stream_transform_jpg(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_id_document = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_id_document)
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(
identity_document=my_file,
include_field_elements=True,
cls=callback
)
result = await poller.result()
raw_response = responses[0]
returned_model = responses[1]
id_document = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertFormFieldsTransformCorrect(id_document.fields, actual, read_results)
# check page range
assert id_document.page_range.first_page_number == document_results[0].page_range[0]
assert id_document.page_range.last_page_number == document_results[0].page_range[1]
# Check page metadata
self.assertFormPagesTransformCorrect(id_document.pages, read_results, page_results)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_jpg_include_field_elements(self, client):
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(id_document, include_field_elements=True)
result = await poller.result()
assert len(result) == 1
id_document = result[0]
self.assertFormPagesHasValues(id_document.pages)
for field in id_document.fields.values():
if field.name == "CountryRegion":
assert field.value == "USA"
continue
elif field.name == "Region":
assert field.value == "Washington"
else:
self.assertFieldElementsHasValues(field.value_data.field_elements, id_document.page_range.first_page_number)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_identity_document_continuation_token(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
initial_poller = await client.begin_recognize_identity_documents(id_document)
cont_token = initial_poller.continuation_token()
poller = await client.begin_recognize_identity_documents(None, continuation_token=cont_token)
result = await poller.result()
assert result is not None
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_identity_document_v2(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
with pytest.raises(ValueError) as e:
async with client:
await client.begin_recognize_identity_documents(id_document)
assert "Method 'begin_recognize_identity_documents' is only available for API version V2_1 and up" in str(e.value)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_pages_kwarg_specified(self, client):
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(id_document, pages=["1"])
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = await poller.result()
assert result
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_frc_identity_documents_async.py
|
Python
|
mit
| 8,158 | 0.003187 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from PyQt4 import QtGui, QtCore
import gettext
import locale
import getopt
import os.path
import shutil
import signal
import sys
from collections import deque
# Install gettext "noop" function.
import __builtin__
__builtin__.__dict__['N_'] = lambda a: a
# Py2exe 0.6.6 has broken fake_getline which doesn't work with Python 2.5
if hasattr(sys, "frozen"):
import linecache
def fake_getline(filename, lineno, module_globals = None):
return ''
linecache.getline = fake_getline
del linecache, fake_getline
# A "fix" for http://python.org/sf/1438480
def _patched_shutil_copystat(src, dst):
try: _orig_shutil_copystat(src, dst)
except OSError: pass
_orig_shutil_copystat = shutil.copystat
shutil.copystat = _patched_shutil_copystat
import picard.resources
import picard.plugins
from picard import version_string, log, acoustid
from picard.album import Album, NatAlbum
from picard.browser.browser import BrowserIntegration
from picard.browser.filelookup import FileLookup
from picard.cluster import Cluster, ClusterList, UnmatchedFiles
from picard.config import Config
from picard.disc import Disc
from picard.file import File
from picard.formats import open as open_file
from picard.track import Track, NonAlbumTrack
from picard.releasegroup import ReleaseGroup
from picard.collection import load_user_collections
from picard.ui.mainwindow import MainWindow
from picard.ui.itemviews import BaseTreeView
from picard.plugin import PluginManager
from picard.acoustidmanager import AcoustIDManager
from picard.util import (
decode_filename,
encode_filename,
partial,
queue,
thread,
mbid_validate,
check_io_encoding
)
from picard.webservice import XmlWebService
class Tagger(QtGui.QApplication):
file_state_changed = QtCore.pyqtSignal(int)
listen_port_changed = QtCore.pyqtSignal(int)
cluster_added = QtCore.pyqtSignal(Cluster)
cluster_removed = QtCore.pyqtSignal(Cluster)
album_added = QtCore.pyqtSignal(Album)
album_removed = QtCore.pyqtSignal(Album)
__instance = None
def __init__(self, args, localedir, autoupdate, debug=False):
QtGui.QApplication.__init__(self, args)
self.__class__.__instance = self
self._args = args
self._autoupdate = autoupdate
self.config = Config()
if sys.platform == "win32":
userdir = os.environ.get("APPDATA", "~\\Application Data")
else:
userdir = os.environ.get("XDG_CONFIG_HOME", "~/.config")
self.userdir = os.path.join(os.path.expanduser(userdir), "MusicBrainz", "Picard")
# Initialize threading and allocate threads
self.thread_pool = thread.ThreadPool(self)
self.load_queue = queue.Queue()
self.save_queue = queue.Queue()
self.analyze_queue = queue.Queue()
self.other_queue = queue.Queue()
threads = self.thread_pool.threads
for i in range(4):
threads.append(thread.Thread(self.thread_pool, self.load_queue))
threads.append(thread.Thread(self.thread_pool, self.save_queue))
threads.append(thread.Thread(self.thread_pool, self.other_queue))
threads.append(thread.Thread(self.thread_pool, self.other_queue))
threads.append(thread.Thread(self.thread_pool, self.analyze_queue))
self.thread_pool.start()
self.stopping = False
# Setup logging
if debug or "PICARD_DEBUG" in os.environ:
self.log = log.DebugLog()
else:
self.log = log.Log()
self.log.debug("Starting Picard %s from %r", picard.__version__, os.path.abspath(__file__))
# TODO remove this before the final release
if sys.platform == "win32":
olduserdir = "~\\Local Settings\\Application Data\\MusicBrainz Picard"
else:
olduserdir = "~/.picard"
olduserdir = os.path.expanduser(olduserdir)
if os.path.isdir(olduserdir):
self.log.info("Moving %s to %s", olduserdir, self.userdir)
try:
shutil.move(olduserdir, self.userdir)
except:
pass
QtCore.QObject.tagger = self
QtCore.QObject.config = self.config
QtCore.QObject.log = self.log
check_io_encoding()
self.setup_gettext(localedir)
self.xmlws = XmlWebService()
load_user_collections()
# Initialize fingerprinting
self._acoustid = acoustid.AcoustIDClient()
self._acoustid.init()
# Load plugins
self.pluginmanager = PluginManager()
self.user_plugin_dir = os.path.join(self.userdir, "plugins")
if not os.path.exists(self.user_plugin_dir):
os.makedirs(self.user_plugin_dir)
self.pluginmanager.load_plugindir(self.user_plugin_dir)
if hasattr(sys, "frozen"):
self.pluginmanager.load_plugindir(os.path.join(os.path.dirname(sys.argv[0]), "plugins"))
else:
self.pluginmanager.load_plugindir(os.path.join(os.path.dirname(__file__), "plugins"))
self.acoustidmanager = AcoustIDManager()
self.browser_integration = BrowserIntegration()
self.files = {}
self.clusters = ClusterList()
self.albums = {}
self.release_groups = {}
self.mbid_redirects = {}
self.unmatched_files = UnmatchedFiles()
self.nats = None
self.window = MainWindow()
def remove_va_file_naming_format(merge=True):
if merge:
self.config.setting["file_naming_format"] = \
"$if($eq(%compilation%,1),\n$noop(Various Artist albums)\n"+\
"%s,\n$noop(Single Artist Albums)\n%s)" %\
(self.config.setting["va_file_naming_format"].toString(),
self.config.setting["file_naming_format"])
self.config.setting.remove("va_file_naming_format")
self.config.setting.remove("use_va_format")
if "va_file_naming_format" in self.config.setting\
and "use_va_format" in self.config.setting:
if self.config.setting["use_va_format"].toBool():
remove_va_file_naming_format()
self.window.show_va_removal_notice()
elif self.config.setting["va_file_naming_format"].toString() !=\
r"$if2(%albumartist%,%artist%)/%album%/$if($gt(%totaldiscs%,1),%discnumber%-,)$num(%tracknumber%,2) %artist% - %title%":
if self.window.confirm_va_removal():
remove_va_file_naming_format(merge=False)
else:
remove_va_file_naming_format()
else:
# default format, disabled
remove_va_file_naming_format(merge=False)
def setup_gettext(self, localedir):
"""Setup locales, load translations, install gettext functions."""
ui_language = self.config.setting["ui_language"]
if ui_language:
os.environ['LANGUAGE'] = ''
os.environ['LANG'] = ui_language
if sys.platform == "win32":
try:
locale.setlocale(locale.LC_ALL, os.environ["LANG"])
except KeyError:
os.environ["LANG"] = locale.getdefaultlocale()[0]
try:
locale.setlocale(locale.LC_ALL, "")
except:
pass
except:
pass
else:
if sys.platform == "darwin" and not ui_language:
try:
import Foundation
defaults = Foundation.NSUserDefaults.standardUserDefaults()
os.environ["LANG"] = defaults.objectForKey_("AppleLanguages")[0]
except:
pass
try:
locale.setlocale(locale.LC_ALL, "")
except:
pass
try:
self.log.debug("Loading gettext translation, localedir=%r", localedir)
self.translation = gettext.translation("picard", localedir)
self.translation.install(True)
ungettext = self.translation.ungettext
except IOError:
__builtin__.__dict__['_'] = lambda a: a
def ungettext(a, b, c):
if c == 1: return a
else: return b
__builtin__.__dict__['ungettext'] = ungettext
def move_files_to_album(self, files, albumid=None, album=None):
"""Move `files` to tracks on album `albumid`."""
if album is None:
album = self.load_album(albumid)
if album.loaded:
album.match_files(files)
else:
for file in list(files):
file.move(album.unmatched_files)
def move_file_to_album(self, file, albumid):
"""Move `file` to a track on album `albumid`."""
self.move_files_to_album([file], albumid)
def move_file_to_track(self, file, albumid, trackid):
"""Move `file` to track `trackid` on album `albumid`."""
album = self.load_album(albumid)
file.move(album.unmatched_files)
album.run_when_loaded(partial(album.match_file, file, trackid))
def create_nats(self):
if self.nats is None:
self.nats = NatAlbum()
self.albums["NATS"] = self.nats
self.album_added.emit(self.nats)
return self.nats
def move_file_to_nat(self, file, trackid, node=None):
self.create_nats()
file.move(self.nats.unmatched_files)
nat = self.load_nat(trackid, node=node)
nat.run_when_loaded(partial(file.move, nat))
if nat.loaded:
self.nats.update()
def exit(self):
self.stopping = True
self._acoustid.done()
self.thread_pool.stop()
self.browser_integration.stop()
self.xmlws.stop()
def _run_init(self):
if self._args:
files = []
for file in self._args:
if os.path.isdir(file):
self.add_directory(decode_filename(file))
else:
files.append(decode_filename(file))
if files:
self.add_files(files)
del self._args
def run(self):
self.browser_integration.start()
self.window.show()
QtCore.QTimer.singleShot(0, self._run_init)
res = self.exec_()
self.exit()
return res
def event(self, event):
if event.type() == QtCore.QEvent.FileOpen:
f = str(event.file())
self.add_files([f])
# We should just return True here, except that seems to
# cause the event's sender to get a -9874 error, so
# apparently there's some magic inside QFileOpenEvent...
return 1
return QtGui.QApplication.event(self, event)
def _file_loaded(self, target, result=None, error=None):
file = result
if file is not None and error is None and not file.has_error():
trackid = file.metadata['musicbrainz_trackid']
if target is not None:
self.move_files([file], target)
elif not self.config.setting["ignore_file_mbids"]:
albumid = file.metadata['musicbrainz_albumid']
if mbid_validate(albumid):
if mbid_validate(trackid):
self.move_file_to_track(file, albumid, trackid)
else:
self.move_file_to_album(file, albumid)
elif mbid_validate(trackid):
self.move_file_to_nat(file, trackid)
elif self.config.setting['analyze_new_files'] and file.can_analyze():
self.analyze([file])
elif self.config.setting['analyze_new_files'] and file.can_analyze():
self.analyze([file])
def move_files(self, files, target):
if isinstance(target, (Track, Cluster)):
for file in files:
file.move(target)
elif isinstance(target, File):
for file in files:
file.move(target.parent)
elif isinstance(target, Album):
self.move_files_to_album(files, album=target)
elif isinstance(target, ClusterList):
self.cluster(files)
def add_files(self, filenames, target=None):
"""Add files to the tagger."""
self.log.debug("Adding files %r", filenames)
new_files = []
for filename in filenames:
filename = os.path.normpath(os.path.realpath(filename))
if filename not in self.files:
file = open_file(filename)
if file:
self.files[filename] = file
new_files.append(file)
if new_files:
if target is None or target is self.unmatched_files:
self.unmatched_files.add_files(new_files)
target = None
for file in new_files:
file.load(partial(self._file_loaded, target))
def add_directory(self, path):
walk = os.walk(unicode(path))
def get_files():
try:
root, dirs, files = walk.next()
except StopIteration:
return None
else:
self.window.set_statusbar_message(N_("Loading directory %s"), root)
return (os.path.join(root, f) for f in files)
def process(result=None, error=None):
if result:
if error is None:
self.add_files(result)
self.other_queue.put((get_files, process, QtCore.Qt.LowEventPriority))
process(True, False)
def get_file_lookup(self):
"""Return a FileLookup object."""
return FileLookup(self, self.config.setting["server_host"],
self.config.setting["server_port"],
self.browser_integration.port)
def search(self, text, type, adv=False):
"""Search on the MusicBrainz website."""
lookup = self.get_file_lookup()
getattr(lookup, type + "Search")(text, adv)
def browser_lookup(self, item):
"""Lookup the object's metadata on the MusicBrainz website."""
lookup = self.get_file_lookup()
metadata = item.metadata
albumid = metadata["musicbrainz_albumid"]
trackid = metadata["musicbrainz_trackid"]
# Only lookup via MB IDs if matched to a DataObject; otherwise ignore and use metadata details
if isinstance(item, Track) and trackid:
lookup.trackLookup(trackid)
elif isinstance(item, Album) and albumid:
lookup.albumLookup(albumid)
else:
lookup.tagLookup(
metadata["albumartist"] if item.is_album_like() else metadata["artist"],
metadata["album"],
metadata["title"],
metadata["tracknumber"],
'' if item.is_album_like() else str(metadata.length),
item.filename if isinstance(item, File) else '')
def get_files_from_objects(self, objects, save=False):
"""Return list of files from list of albums, clusters, tracks or files."""
files = set()
for obj in objects:
files.update(obj.iterfiles(save))
return list(files)
def _file_saved(self, result=None, error=None):
if error is None:
file, old_filename, new_filename = result
del self.files[old_filename]
self.files[new_filename] = file
def save(self, objects):
"""Save the specified objects."""
files = self.get_files_from_objects(objects, save=True)
for file in files:
file.save(self._file_saved, self.tagger.config.setting)
def load_album(self, id, discid=None):
id = self.mbid_redirects.get(id, id)
album = self.albums.get(id)
if album:
return album
album = Album(id, discid=discid)
self.albums[id] = album
self.album_added.emit(album)
album.load()
return album
def load_nat(self, id, node=None):
self.create_nats()
nat = self.get_nat_by_id(id)
if nat:
return nat
nat = NonAlbumTrack(id)
self.nats.tracks.append(nat)
self.nats.update(True)
if node:
nat._parse_recording(node)
else:
nat.load()
return nat
def get_nat_by_id(self, id):
if self.nats is not None:
for nat in self.nats.tracks:
if nat.id == id:
return nat
def get_release_group_by_id(self, id):
return self.release_groups.setdefault(id, ReleaseGroup(id))
def remove_files(self, files, from_parent=True):
"""Remove files from the tagger."""
for file in files:
if self.files.has_key(file.filename):
file.clear_lookup_task()
self._acoustid.stop_analyze(file)
del self.files[file.filename]
file.remove(from_parent)
def remove_album(self, album):
"""Remove the specified album."""
self.log.debug("Removing %r", album)
album.stop_loading()
self.remove_files(self.get_files_from_objects([album]))
del self.albums[album.id]
if album.release_group:
album.release_group.remove_album(album.id)
if album == self.nats:
self.nats = None
self.album_removed.emit(album)
def remove_cluster(self, cluster):
"""Remove the specified cluster."""
if not cluster.special:
self.log.debug("Removing %r", cluster)
files = list(cluster.files)
cluster.files = []
cluster.clear_lookup_task()
self.remove_files(files, from_parent=False)
self.clusters.remove(cluster)
self.cluster_removed.emit(cluster)
def remove(self, objects):
"""Remove the specified objects."""
files = []
for obj in objects:
if isinstance(obj, File):
files.append(obj)
elif isinstance(obj, Track):
files.extend(obj.linked_files)
elif isinstance(obj, Album):
self.remove_album(obj)
elif isinstance(obj, Cluster):
self.remove_cluster(obj)
if files:
self.remove_files(files)
def _lookup_disc(self, disc, result=None, error=None):
self.restore_cursor()
if error is not None:
QtGui.QMessageBox.critical(self.window, _(u"CD Lookup Error"),
_(u"Error while reading CD:\n\n%s") % error)
else:
disc.lookup()
def lookup_cd(self, action):
"""Reads CD from the selected drive and tries to lookup the DiscID on MusicBrainz."""
if isinstance(action, QtGui.QAction):
device = unicode(action.text())
else:
device = self.config.setting["cd_lookup_device"].split(",", 1)[0]
disc = Disc()
self.set_wait_cursor()
self.other_queue.put((
partial(disc.read, encode_filename(device)),
partial(self._lookup_disc, disc),
QtCore.Qt.LowEventPriority))
@property
def use_acoustid(self):
return self.config.setting["fingerprinting_system"] == "acoustid"
def analyze(self, objs):
"""Analyze the file(s)."""
files = self.get_files_from_objects(objs)
for file in files:
file.set_pending()
if self.use_acoustid:
self._acoustid.analyze(file, partial(file._lookup_finished, 'acoustid'))
# =======================================================================
# Metadata-based lookups
# =======================================================================
def autotag(self, objects):
for obj in objects:
if obj.can_autotag():
obj.lookup_metadata()
# =======================================================================
# Clusters
# =======================================================================
def cluster(self, objs):
"""Group files with similar metadata to 'clusters'."""
self.log.debug("Clustering %r", objs)
if len(objs) <= 1 or self.unmatched_files in objs:
files = list(self.unmatched_files.files)
else:
files = self.get_files_from_objects(objs)
fcmp = lambda a, b: (
cmp(a.discnumber, b.discnumber) or
cmp(a.tracknumber, b.tracknumber) or
cmp(a.base_filename, b.base_filename))
for name, artist, files in Cluster.cluster(files, 1.0):
QtCore.QCoreApplication.processEvents()
cluster = self.load_cluster(name, artist)
for file in sorted(files, fcmp):
file.move(cluster)
def load_cluster(self, name, artist):
for cluster in self.clusters:
cm = cluster.metadata
if name == cm["album"] and artist == cm["artist"]:
return cluster
cluster = Cluster(name, artist)
self.clusters.append(cluster)
self.cluster_added.emit(cluster)
return cluster
# =======================================================================
# Utils
# =======================================================================
def set_wait_cursor(self):
"""Sets the waiting cursor."""
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
def restore_cursor(self):
"""Restores the cursor set by ``set_wait_cursor``."""
QtGui.QApplication.restoreOverrideCursor()
def refresh(self, objs):
for obj in objs:
obj.load()
@classmethod
def instance(cls):
return cls.__instance
def num_files(self):
return len(self.files)
def help():
print """Usage: %s [OPTIONS] [FILE] [FILE] ...
Options:
-d, --debug enable debug-level logging
-h, --help display this help and exit
-v, --version display version information and exit
""" % (sys.argv[0],)
def version():
print """MusicBrainz Picard %s""" % (version_string)
def main(localedir=None, autoupdate=True):
signal.signal(signal.SIGINT, signal.SIG_DFL)
opts, args = getopt.getopt(sys.argv[1:], "hvd", ["help", "version", "debug"])
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
return help()
elif opt in ("-v", "--version"):
return version()
elif opt in ("-d", "--debug"):
kwargs["debug"] = True
tagger = Tagger(args, localedir, autoupdate, **kwargs)
sys.exit(tagger.run())
|
mwiencek/picard
|
picard/tagger.py
|
Python
|
gpl-2.0
| 23,746 | 0.002653 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM9_if_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM9_if_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM9_if_IsolatedLHS, self).__init__(name='HMM9_if_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM9_if')
# Set the node attributes
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM9_if_IsolatedLHS.py
|
Python
|
mit
| 2,478 | 0.010896 |
#!/usr/bin/env python
import argparse
import logging
import os
import shutil
import sys
import glob
# Location of saved templates
SAVE_DIR = os.environ.get("RECYCLE_TEMPLATES_DIR", "~/.recycle")
try:
input = raw_input
except NameError:
pass
def should_overwrite(typeOfThing, path):
assert os.path.exists(path)
nameOfThing = get_name(path)
logging.debug("{} already exists. Asking to overwrite...".format(path))
res = ""
while res != "y" and res != "n":
prompt = "{0} {1} already exists. Do you want to replace it? " \
"(y/n) ".format(typeOfThing, nameOfThing)
res = input(prompt)
res = res.lower()
if res == "y":
logging.debug("Overwrite approved. Deleting {}".format(path))
return True
else:
logging.debug("Overwrite denied.")
return False
def copy(contents, dest):
if not os.path.isdir(dest):
os.makedirs(dest)
for obj in contents:
name = os.path.basename(os.path.normpath(obj))
destName = os.path.join(dest, name)
if os.path.exists(destName):
if should_overwrite("File or directory", destName):
if os.path.isdir(destName):
shutil.rmtree(destName)
else:
os.remove(destName)
else:
continue
assert not os.path.exists(destName)
if os.path.isdir(obj):
shutil.copytree(obj, destName)
elif os.path.isfile(obj):
shutil.copy(obj, dest)
else:
raise IOError("Source doest not exist!")
def get_name(path):
return os.path.basename(os.path.normpath(path))
def get_save_path(templateName):
global SAVE_DIR
return os.path.join(SAVE_DIR, templateName)
def setup_logging():
global SAVE_DIR
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M",
filename=os.path.join(SAVE_DIR, "recycle.log"),
filemode="w")
console = logging.StreamHandler()
# INFO or higher goes to console
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)-8s %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def init():
global SAVE_DIR
SAVE_DIR = os.path.expanduser(SAVE_DIR)
SAVE_DIR = os.path.expandvars(SAVE_DIR)
SAVE_DIR = os.path.abspath(SAVE_DIR)
if not os.path.isdir(SAVE_DIR):
os.makedirs(SAVE_DIR)
setup_logging()
logging.debug("Using Python version {}".format(sys.version))
def handle_new(name, files):
save_path = get_save_path(name)
fileList = []
for filename in files:
fileList += [os.path.abspath(f) for f in glob.glob(filename)]
# Remove duplicates
fileList = list(set(fileList))
if len(fileList) is 0:
logging.error("No files found matching '{}'".format(files))
return
if os.path.isdir(save_path):
# Boilerplate with that name already exists
if should_overwrite("Template", save_path):
handle_delete(name)
else:
return
assert not os.path.isdir(save_path)
logging.debug("Creating new template '{}' from {}".format(name, files))
try:
copy(fileList, save_path)
except IOError as e:
logging.error(e.strerror)
assert os.path.isdir(save_path)
logging.debug("Boilerplate created!")
def handle_use(name):
save_path = get_save_path(name)
if os.path.isdir(save_path):
logging.debug("Using template '{}'".format(name))
contents = os.listdir(save_path)
contentPaths = [os.path.join(save_path, c) for c in contents]
try:
copy(contentPaths, os.getcwd())
except IOError as e:
logging.error("Your recycle directory doesn't seem to exist...")
else:
logging.error("No template with the name '{}' was found!".format(name))
def handle_list():
global SAVE_DIR
assert os.path.isdir(SAVE_DIR)
names = next(os.walk(SAVE_DIR))[1]
for line in names:
if line.startswith(SAVE_DIR):
line = line[len(SAVE_DIR):-1]
print(line)
def handle_delete(name):
save_path = get_save_path(name)
if os.path.isdir(save_path):
shutil.rmtree(save_path)
else:
logging.error("No template with the name '{}' was found!".format(name))
assert not os.path.isdir(save_path)
def handle_location():
global SAVE_DIR
print(os.path.normpath(SAVE_DIR) + os.sep)
def parseargs():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
new_parser = subparsers.add_parser(
"new", help="Create a new template or overwrite an existing one")
new_parser.add_argument(
"name", type=str, help="The name under which to save this template")
new_parser.add_argument(
"files", type=str, nargs="+",
help="The file or directory to save as the template")
new_parser.set_defaults(mode="new")
use_parser = subparsers.add_parser(
"use", help="Insert existing template in the current directory")
use_parser.add_argument(
"name", type=str, help="The name of the template to use")
use_parser.set_defaults(mode="use")
list_parser = subparsers.add_parser(
"list", help="List the available template")
list_parser.set_defaults(mode="list")
delete_parser = subparsers.add_parser(
"delete", help="Delete a template")
delete_parser.add_argument(
"name", type=str, help="The name of the template to delete")
delete_parser.set_defaults(mode="delete")
location_parser = subparsers.add_parser(
"location", help="Print the current location of the templates directory")
location_parser.set_defaults(mode="location")
return parser.parse_args()
def main():
args = parseargs()
init()
if args.mode is None:
logging.error("Mode must be provided. Use --help for more information.")
return
if args.mode is "new":
handle_new(args.name, args.files)
elif args.mode is "use":
handle_use(args.name)
elif args.mode is "list":
handle_list()
elif args.mode is "delete":
handle_delete(args.name)
elif args.mode is "location":
handle_location()
else:
logging.error("Invalid mode")
if __name__ == "__main__":
main()
|
williamg/recycle
|
recycle/recycle.py
|
Python
|
mit
| 6,538 | 0.00153 |
# -*- coding: utf-8 -*-
"""
Ozone Bricklet Plugin
Copyright (C) 2015 Olaf Lüke <olaf@tinkerforge.com>
ozone.py: Ozone Bricklet Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QVBoxLayout, QLabel, QHBoxLayout, QSpinBox
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings.bricklet_ozone import BrickletOzone
from brickv.plot_widget import PlotWidget
from brickv.async_call import async_call
from brickv.callback_emulator import CallbackEmulator
class OzoneConcentrationLabel(QLabel):
def setText(self, text):
text = "Ozone Concentration: " + text + " ppb (parts per billion)"
super(OzoneConcentrationLabel, self).setText(text)
class Ozone(PluginBase):
def __init__(self, *args):
PluginBase.__init__(self, BrickletOzone, *args)
self.ozone = self.device
self.cbe_ozone_concentration = CallbackEmulator(self.ozone.get_ozone_concentration,
self.cb_ozone_concentration,
self.increase_error_count)
self.ozone_concentration_label = OzoneConcentrationLabel('Ozone Concentration: ')
self.current_value = None
plot_list = [['', Qt.red, self.get_current_value]]
self.plot_widget = PlotWidget('Ozone Concentration [ppb]', plot_list)
layout_h2 = QHBoxLayout()
layout_h2.addStretch()
layout_h2.addWidget(self.ozone_concentration_label)
layout_h2.addStretch()
layout = QVBoxLayout(self)
layout.addLayout(layout_h2)
layout.addWidget(self.plot_widget)
self.spin_average = QSpinBox()
self.spin_average.setMinimum(1)
self.spin_average.setMaximum(50)
self.spin_average.setSingleStep(1)
self.spin_average.setValue(50)
self.spin_average.editingFinished.connect(self.spin_average_finished)
layout_h1 = QHBoxLayout()
layout_h1.addWidget(QLabel('Length of moving average:'))
layout_h1.addWidget(self.spin_average)
layout_h1.addStretch()
layout.addLayout(layout_h1)
def get_moving_average_async(self, average):
self.spin_average.setValue(average)
def start(self):
async_call(self.ozone.get_moving_average, None, self.get_moving_average_async, self.increase_error_count)
async_call(self.ozone.get_ozone_concentration, None, self.cb_ozone_concentration, self.increase_error_count)
self.cbe_ozone_concentration.set_period(100)
self.plot_widget.stop = False
def stop(self):
self.cbe_ozone_concentration.set_period(0)
self.plot_widget.stop = True
def destroy(self):
pass
def get_url_part(self):
return 'ozone'
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletOzone.DEVICE_IDENTIFIER
def get_current_value(self):
return self.current_value
def cb_ozone_concentration(self, ozone_concentration):
self.current_value = ozone_concentration
self.ozone_concentration_label.setText(str(ozone_concentration))
def spin_average_finished(self):
self.ozone.set_moving_average(self.spin_average.value())
|
D4wN/brickv
|
src/brickv/plugin_system/plugins/ozone/ozone.py
|
Python
|
gpl-2.0
| 3,949 | 0.002026 |
#!/usr/bin/python
import sys
import time
import datetime
import re
import ConfigParser
import os
from operator import attrgetter
scriptdir = os.path.abspath(os.path.dirname(sys.argv[0]))
conffile = scriptdir + "/ovirt-vm-rolling-snapshot.conf"
Config = ConfigParser.ConfigParser()
if not os.path.isfile(conffile):
print "Config file %s does not exists. Exiting." % conffile
sys.exit(1)
Config.read(conffile)
if len(Config.sections()) < 1:
print "Config file is not valid. Exiting."
sys.exit(1)
basetime = datetime.datetime.now()
for vmname in Config.sections():
starttime = time.time()
try:
etime_to_keep = int(Config.get(vmname, 'etime_to_keep'))
hourly_to_keep = int(Config.get(vmname, 'hourly_to_keep'))
daily_to_keep = int(Config.get(vmname, 'daily_to_keep'))
weekly_to_keep = int(Config.get(vmname, 'weekly_to_keep'))
monthly_to_keep = int(Config.get(vmname, 'monthly_to_keep'))
time_hours = "%02d" % int(Config.get(vmname, 'time_hours'))
time_minutes = "%02d" % int(Config.get(vmname, 'time_minutes'))
time_weekday = "%d" % int(Config.get(vmname, 'time_weekday'))
time_monthweek = int(Config.get(vmname, 'time_monthweek'))
if time_monthweek < 1 or time_monthweek > 5:
time_monthweek = 1
if time_weekday == "7":
time_weekday = "0"
last_to_keep = {"____": etime_to_keep, "H___": hourly_to_keep, "HD__": daily_to_keep, "HDW_": weekly_to_keep,
"HDWM": monthly_to_keep}
hpos = dpos = wpos = mpos = "_"
if basetime.strftime("%M") == time_minutes: # minutes is 00
hpos = "H"
if basetime.strftime("%H") == time_hours: # hour is 00
dpos = "D"
if basetime.strftime("%w") == time_weekday: # day of week is sunday
wpos = "W"
if (int(basetime.strftime("%d")) <= (7 * time_monthweek)) and (
int(basetime.strftime("%d")) > (7 * (time_monthweek - 1))): # is the first week of month
mpos = "M"
snap_time_id = hpos + dpos + wpos + mpos
deleteonly = ''
if len(sys.argv) > 1:
snap_time_id = sys.argv[1]
if not last_to_keep[snap_time_id]:
last_to_keep[snap_time_id] = 1
if len(sys.argv) > 2:
deleteonly = sys.argv[2]
if last_to_keep[snap_time_id]:
print
print "------------------------------------------------------------"
print "VM name: " + vmname
try:
ovirtsdk
except:
import ovirtsdk.api
from ovirtsdk.xml import params
api = ovirtsdk.api.API(
url=Config.get(vmname, 'server'),
username=Config.get(vmname, 'username'),
password=Config.get(vmname, 'password'),
insecure=True,
debug=False
)
vm = api.vms.get(vmname)
print "Begin backup of VM '%s' at %s" % (vmname, datetime.datetime.now().isoformat(" "))
print "VM status: %s" % str(vm.get_status().state)
if deleteonly == 'deleteonly':
print "Skipping snapshot creation."
else:
snap_description = "Rolling snapshot " + snap_time_id + " at " + datetime.datetime.now().isoformat(" ")
print "Creating Snapshot '" + snap_description + "'"
snapcreation = vm.snapshots.add(params.Snapshot(description=snap_description))
snaptoclone = ""
snap_status = ""
sys.stdout.write( "Snapshot in progress..." )
sys.stdout.flush()
while True:
snaptoclone = vm.snapshots.get(id=snapcreation.get_id())
snap_status = snaptoclone.get_snapshot_status()
if snap_status == "locked":
time.sleep(5)
sys.stdout.write('.')
sys.stdout.flush()
else:
print
break
for snapi in vm.get_snapshots().list():
snapi_id = snapi.get_id()
if vm.snapshots.get(id=snapi_id).description == snap_description:
snap_status = "ok"
break
else:
snap_status = "error"
if snap_status != "ok":
print "Snapshot creation ERROR!!!"
continue
print "Snapshot done"
time.sleep(1)
snapshots_param = params.Snapshots(snapshot=[params.Snapshot(id=snaptoclone.get_id())])
snaptodel = []
for snapi in vm.get_snapshots().list():
snapi_id = snapi.get_id()
snapi_descr = vm.snapshots.get(id=snapi_id).description
snapi_time_match = re.match('^Rolling snapshot ' + snap_time_id + ' at', snapi_descr)
if snapi_time_match:
snaptodel.append(snapi)
snaptodel = sorted(snaptodel, key=attrgetter('creation_time'))
if last_to_keep[snap_time_id] > 0:
del snaptodel[-last_to_keep[snap_time_id]:]
for snapitodel in snaptodel:
print "Deleting old snapshot '" + snapitodel.description + "'"
snapitodel.delete(async=False)
oldsndelstatus = sndelstatus = ''
while True:
try:
sndelstatus = vm.snapshots.get(id=snapitodel.get_id()).get_snapshot_status()
except Exception, e:
break
if sndelstatus == oldsndelstatus:
sys.stdout.write('.')
else:
if sndelstatus == 'ok':
break
sys.stdout.write( "Delete snapshot in progress..." )
oldsndelstatus = sndelstatus
sys.stdout.flush()
time.sleep(5)
print
if sndelstatus == 'ok':
print "Delete snapshot ERROR!!!"
else:
print "Delete snapshot done."
eltime = time.time() - starttime
print "Finished backup of VM '%s' at %s. %d seconds." % (vmname,
datetime.datetime.now().isoformat(" "),
eltime)
print
except Exception, e:
print e
print "Backup ERROR!!!"
|
daelmaselli/ovirt-vm-hot-backup
|
ovirt-vm-rolling-snapshot.py
|
Python
|
mit
| 6,894 | 0.003046 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Pytroll developers
# Author(s):
# Adam Dybbroe <Firstname.Lastname @ smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unittesting the helper functions for the AAPP-runner.
"""
import logging
import unittest
from datetime import datetime
from unittest.mock import patch
from aapp_runner.helper_functions import check_if_scene_is_unique
from aapp_runner.read_aapp_config import AappL1Config, AappRunnerConfig
from aapp_runner.tests.test_config import (TEST_YAML_CONTENT_OK,
create_config_from_yaml)
class TestProcessConfigChecking(unittest.TestCase):
"""Test various functions checking on the (non-static) config during processing."""
def setUp(self):
self.config_complete = create_config_from_yaml(TEST_YAML_CONTENT_OK)
@patch('aapp_runner.read_aapp_config.load_config_from_file')
def test_check_if_scene_is_unique_return_value(self, config):
"""Test checking if the current scene is unique or if it has been processed earlier."""
config.return_value = self.config_complete
myfilename = "/tmp/mytestfile"
aapp_run_config = AappRunnerConfig(myfilename, 'norrkoping', 'xl-band')
aapp_config = AappL1Config(aapp_run_config.config, 'xl-band')
aapp_config['platform_name'] = 'metop03'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 49, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0, 26)
aapp_config.job_register = {}
result = check_if_scene_is_unique(aapp_config)
assert result
aapp_config.job_register = {'metop03': [(datetime(2022, 1, 8, 12, 49, 50),
datetime(2022, 1, 8, 13, 0, 26), 'euron1')]}
# An EARS scene (same platform and overlapping time interval and over
# the same area of interest) arrives shortly after:
aapp_config['platform_name'] = 'metop03'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0)
result = check_if_scene_is_unique(aapp_config)
assert not result
@patch('aapp_runner.read_aapp_config.load_config_from_file')
def test_check_if_scene_is_unique_logging(self, config):
"""Test the logging when checking if the current scene is unique."""
config.return_value = self.config_complete
myfilename = "/tmp/mytestfile"
aapp_run_config = AappRunnerConfig(myfilename, 'norrkoping', 'xl-band')
aapp_config = AappL1Config(aapp_run_config.config, 'xl-band')
aapp_config.job_register = {'metop03': [(datetime(2022, 1, 8, 12, 49, 50),
datetime(2022, 1, 8, 13, 0, 26), 'euron1')]}
# An EARS scene (same platform and overlapping time interval and over
# the same area of interest) arrives shortly after:
aapp_config['platform_name'] = 'metop03'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0)
expected_logging = ['INFO:aapp_runner.helper_functions:first message',
'INFO:aapp_runner.helper_functions:Processing of scene metop03 2022-01-08 12:49:50 2022-01-08 13:00:26 with overlapping time has been launched previously. Skip it!']
with self.assertLogs('aapp_runner.helper_functions', level='INFO') as cm:
logging.getLogger('aapp_runner.helper_functions').info('first message')
_ = check_if_scene_is_unique(aapp_config)
self.assertEqual(cm.output, expected_logging)
with self.assertLogs('aapp_runner.helper_functions', level='WARNING') as cm:
logging.getLogger('aapp_runner.helper_functions').warning('first message')
_ = check_if_scene_is_unique(aapp_config)
self.assertEqual(len(cm.output), 1)
# Scene is different (different satellite) from previous:
aapp_config['platform_name'] = 'metop01'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0)
with self.assertLogs('aapp_runner.helper_functions', level='INFO') as cm:
logging.getLogger('aapp_runner.helper_functions').info('first message')
result = check_if_scene_is_unique(aapp_config)
assert result
self.assertEqual(len(cm.output), 1)
|
pytroll/pytroll-aapp-runner
|
aapp_runner/tests/test_helper_functions.py
|
Python
|
gpl-3.0
| 5,294 | 0.002456 |
from tests.test_helper import *
from braintree.resource import Resource
class TestResource(unittest.TestCase):
def test_verify_keys_allows_wildcard_keys(self):
signature = [
{"foo": [{"bar": ["__any_key__"]}]}
]
params = {
"foo[bar][lower]": "lowercase",
"foo[bar][UPPER]": "uppercase",
"foo[bar][123]": "numeric",
"foo[bar][under_scores]": "underscores",
"foo[bar][dash-es]": "dashes",
"foo[bar][ABC-abc_123]": "all together"
}
Resource.verify_keys(params, signature)
@raises(KeyError)
def test_verify_keys_escapes_brackets_in_signature(self):
signature = [
{"customer": [{"custom_fields": ["__any_key__"]}]}
]
params = {
"customer_id": "value",
}
Resource.verify_keys(params, signature)
def test_verify_keys_works_with_array_param(self):
signature = [
{"customer": ["one", "two"]}
]
params = {
"customer": {
"one": "foo"
}
}
Resource.verify_keys(params, signature)
@raises(KeyError)
def test_verify_keys_raises_on_bad_array_param(self):
signature = [
{"customer": ["one", "two"]}
]
params = {
"customer": {
"invalid": "foo"
}
}
Resource.verify_keys(params, signature)
def test_verify_keys_works_with_arrays(self):
signature = [
{"add_ons": [{"update": ["existing_id", "quantity"]}]}
]
params = {
"add_ons": {
"update": [
{
"existing_id": "foo",
"quantity": 10
}
]
}
}
Resource.verify_keys(params, signature)
@raises(KeyError)
def test_verify_keys_raises_with_invalid_param_in_arrays(self):
signature = [
{"add_ons": [{"update": ["existing_id", "quantity"]}]}
]
params = {
"add_ons": {
"update": [
{
"invalid": "foo",
"quantity": 10
}
]
}
}
Resource.verify_keys(params, signature)
def test_verify_keys_allows_text(self):
text_string = u"text_string"
assert isinstance(text_string, TestHelper.text_type)
signature = [
{"customer": [{"custom_fields": [text_string]}]}
]
params = {
"customer": {
"custom_fields": {
text_string : text_string
}
}
}
Resource.verify_keys(params, signature)
def test_verify_keys_allows_raw_data(self):
raw_string = str.encode("raw_string")
assert isinstance(raw_string, TestHelper.raw_type)
signature = [
{"customer": [{"custom_fields": [raw_string]}]}
]
params = {
"customer": {
"custom_fields": {
raw_string : raw_string
}
}
}
Resource.verify_keys(params, signature)
|
braintree/braintree_python
|
tests/unit/test_resource.py
|
Python
|
mit
| 3,302 | 0.000909 |
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import sys
import argparse
import pkg_resources
from difflib import get_close_matches
from .iaas_client.actions import ActionManager as IaaSActionManager
from .qs_client.actions import ActionManager as QSActionManager
SERVICES = ('iaas', 'qs')
INDENT = ' ' * 2
NEWLINE = '\n' + INDENT
def exit_due_to_invalid_service(suggest_services=None):
usage = NEWLINE + '%(prog)s <service> <action> [parameters]\n\n' \
+ 'Here are valid services:\n\n' \
+ INDENT + NEWLINE.join(SERVICES)
if suggest_services:
usage += '\n\nInvalid service, maybe you meant:\n ' \
+ ','.join(suggest_services)
parser = argparse.ArgumentParser(
prog = 'qingcloud',
usage = usage,
)
parser.print_help()
sys.exit(-1)
def exit_due_to_invalid_action(service, suggest_actions=None):
usage = NEWLINE + '%(prog)s <action> [parameters]\n\n' \
+ 'Here are valid actions:\n\n' \
+ INDENT + NEWLINE.join(get_valid_actions(service))
if suggest_actions:
usage += '\n\nInvalid action, maybe you meant:\n ' \
+ NEWLINE.join(suggest_actions)
parser = argparse.ArgumentParser(
prog = 'qingcloud %s' % service,
usage = usage,
)
parser.print_help()
sys.exit(-1)
def get_valid_actions(service):
if service == 'iaas':
return IaaSActionManager.get_valid_actions()
elif service == 'qs':
return QSActionManager.get_valid_actions()
def get_action(service, action):
if service == 'iaas':
return IaaSActionManager.get_action(action)
elif service == 'qs':
return QSActionManager.get_action(action)
def check_argument(args):
if len(args) < 2:
exit_due_to_invalid_service()
if args[1].lower() in ('--version', '-v'):
version = pkg_resources.require("qingcloud-cli")[0].version
print('qingcloud-cli version %s' % version)
sys.exit(0)
service = args[1]
if service not in SERVICES:
suggest_services = get_close_matches(service, SERVICES)
exit_due_to_invalid_service(suggest_services)
if len(args) < 3:
exit_due_to_invalid_action(service)
valid_actions = get_valid_actions(service)
if args[2] not in valid_actions:
suggest_actions = get_close_matches(args[2], valid_actions)
exit_due_to_invalid_action(service, suggest_actions)
def main():
args = sys.argv
check_argument(args)
action = get_action(args[1], args[2])
action.main(args[3:])
|
yunify/qingcloud-cli
|
qingcloud/cli/driver.py
|
Python
|
apache-2.0
| 3,383 | 0.004138 |
# coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import traceback
from . import generic
from sickbeard import logger, tvcache, helpers
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
class GrabTheInfoProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, 'GrabTheInfo')
self.url_base = 'http://grabthe.info/'
self.urls = {'config_provider_home_uri': self.url_base,
'login': self.url_base + 'takelogin.php',
'cache': self.url_base + 'browse.php?%s',
'search': '&search=%s',
'get': self.url_base + '%s'}
self.categories = 'c56=1&c8=1&c61=1&c10=1&incldead=0&blah=0'
self.url = self.urls['config_provider_home_uri']
self.username, self.password, self.minseed, self.minleech = 4 * [None]
self.cache = GrabTheInfoCache(self)
def _do_login(self):
logged_in = lambda: 'uid' in self.session.cookies and 'pass' in self.session.cookies
if logged_in():
return True
if self._check_auth():
login_params = {'username': self.username, 'password': self.password}
response = helpers.getURL(self.urls['login'], post_data=login_params, session=self.session)
if response and logged_in():
return True
msg = u'Failed to authenticate with %s, abort provider'
if response and 'Username or password incorrect' in response:
msg = u'Invalid username or password for %s. Check settings'
logger.log(msg % self.name, logger.ERROR)
return False
def _do_search(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
if not self._do_login():
return results
items = {'Season': [], 'Episode': [], 'Cache': []}
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download'}.items())
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
search_url = self.urls['cache'] % self.categories
if 'cache' != mode.lower():
search_url += self.urls['search'] % search_string
html = self.get_url(search_url)
cnt = len(items[mode])
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
html = html.replace('<?xml version="1.0" encoding="iso-8859-1"?>', '')
html = re.sub(r'(</td>)[^<]*</td>', r'\1', html)
html = re.sub(r'(<a[^<]*)<a[^<]*?href=details[^<]*', r'\1', html)
with BS4Parser(html, 'html.parser') as soup:
shows_found = False
torrent_rows = soup.find_all('tr')
for index, row in enumerate(torrent_rows):
if 'type' == row.find_all('td')[0].get_text().strip().lower():
shows_found = index
break
if not shows_found or 2 > (len(torrent_rows) - shows_found):
raise generic.HaltParseException
for tr in torrent_rows[1 + shows_found:]:
try:
info = tr.find('a', href=rc['info'])
if None is info:
continue
title = (('title' in info.attrs.keys() and info['title']) or info.get_text()).strip()
download_url = tr.find('a', href=rc['get'])
if None is download_url:
continue
seeders, leechers = [int(tr.find_all('td')[x].get_text().strip()) for x in (-2, -1)]
if 'Cache' != mode and (seeders < self.minseed or leechers < self.minleech):
continue
except (AttributeError, TypeError, KeyError):
continue
if title:
items[mode].append((title, self.urls['get']
% str(download_url['href'].lstrip('/')), seeders))
except generic.HaltParseException:
pass
except Exception:
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
self._log_result(mode, len(items[mode]) - cnt, search_url)
# for each search mode sort all the items by seeders
'Cache' != mode and items[mode].sort(key=lambda tup: tup[2], reverse=True)
results += items[mode]
return results
def find_propers(self, search_date=datetime.datetime.today()):
return self._find_propers(search_date)
def _get_episode_search_strings(self, ep_obj, add_string='', **kwargs):
return generic.TorrentProvider._get_episode_search_strings(self, ep_obj, add_string, sep_date='|', use_or=False)
class GrabTheInfoCache(tvcache.TVCache):
def __init__(self, this_provider):
tvcache.TVCache.__init__(self, this_provider)
self.minTime = 20 # cache update frequency
def _getRSSData(self):
return self.provider.get_cache_data()
provider = GrabTheInfoProvider()
|
adam111316/SickGear
|
sickbeard/providers/grabtheinfo.py
|
Python
|
gpl-3.0
| 6,393 | 0.002659 |
###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('MyPing')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('MyPing', True)
MyPing = conf.registerPlugin('MyPing')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(MyPing, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
conf.registerChannelValue(MyPing, 'enable',
registry.Boolean(False, """Should plugin work in this channel?"""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
Alcheri/Plugins
|
MyPing/config.py
|
Python
|
bsd-3-clause
| 2,709 | 0.001477 |
import pf
from Var import var
import numpy,string
from Glitch import Glitch
"""A faster version of nextTok(), using memory allocated (once only)
using numpy, and using functions written in C. The slow, pure
python module is NexusToken.py. This version is about twice as fast.
Which one is used is under the control of var.nexus_doFastNextTok.
This one does not work for CStrings, so we need to revert to the old
way whenever CStrings are encountered."""
class NexusToken(object):
def __init__(self, max):
self.max = numpy.array([max], numpy.int32)
self.tokLen = numpy.array([0], numpy.int32)
self.tok = numpy.array(['x'] * int(self.max), 'c')
self.embeddedCommentLen = numpy.array([0], numpy.int32)
self.embeddedComment = numpy.array(['x'] * int(self.max), 'c')
self.savedCommentLen = numpy.array([0], numpy.int32)
self.filePtr = None
self.nexusToken = pf.newNexusToken(var._nexus_writeVisibleComments,
var._nexus_getP4CommandComments,
var._nexus_getWeightCommandComments,
var._nexus_getAllCommandComments,
var._nexus_getLineEndingsAsTokens,
self.max,
self.tokLen,
self.tok,
self.embeddedCommentLen,
self.embeddedComment,
self.savedCommentLen)
#self.previousTok = None
#self.previousEmbeddedComment = None
nt = NexusToken(300)
def checkLineLengths(flob):
global nt
#print 'NexusToken2.checkLineLengths here.'
flob.seek(0,0)
longest = pf.nexusTokenCheckLineLengths(nt.nexusToken, flob)
flob.seek(0,0)
#print 'The longest line length is %i' % longest
if longest > nt.max:
nt = NexusToken(longest)
def nextTok(flob):
#print 'NexusToken2.nextTok() here. nt.nexusToken = %i, max=%s, tokLen=%s, type(tokLen)=%s' % (nt.nexusToken, nt.max, nt.tokLen[0], type(nt.tokLen))
#assert type(nt.tokLen) == type(numpy.array([0], numpy.int32))
#print "NexusToken2.nextTok(). nt.wordIsFinished[0]=%i, nt.tokLen=%i, previousTok=%s, previousComment=%s" % (nt.wordIsFinished[0], nt.tokLen[0], nt.previousTok, nt.previousEmbeddedComment)
#if nt.wordIsFinished[0]:
# assert nt.tokLen[0]
# ret = nt.tok[:int(nt.tokLen[0])].tostring()
# nt.tokLen[0] = 0
# nt.wordIsFinished[0] = 0
# #nt.previousTok = ret
# return ret
#print ' x1 NexusToken2.nextTok() here. savedCommentLen=%i' % nt.savedCommentLen[0]
if nt.savedCommentLen[0]:
ret = nt.embeddedComment[:int(nt.savedCommentLen[0])].tostring()
nt.savedCommentLen[0] = 0
return ret
pf.nextToken(nt.nexusToken, flob)
#print ' x2 tokLen = %i, embeddedCommentLen[0] = %i' % (nt.tokLen[0], nt.embeddedCommentLen[0])
if nt.embeddedCommentLen[0]:
ret = nt.embeddedComment[:int(nt.embeddedCommentLen[0])].tostring()
nt.embeddedCommentLen[0] = 0
#nt.previousEmbeddedComment = ret
return ret
else:
if nt.tokLen[0]:
ret = nt.tok[:int(nt.tokLen[0])].tostring()
nt.tokLen[0] = 0
#nt.previousTok = ret
return ret
else:
return None
def safeNextTok(flob, caller=None):
t = nextTok(flob)
if not t:
if caller:
gm = ["safeNextTok(), called from %s" % caller]
else:
gm = ["safeNextTok()"]
gm.append("Premature Death.")
gm.append("Ran out of understandable things to read in nexus file.")
raise Glitch, gm
else:
return t
def nexusSkipPastNextSemiColon(flob):
pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob)
def nexusSkipPastBlockEnd(flob):
"""Read up to and including a block 'end' or 'endblock'."""
# This should only ever be issued after a semi-colon
complaintHead = '\nNexus: nexusSkipPastBlockEnd()'
if hasattr(flob, 'name'):
complaintHead += " file: %s" % flob.name
while 1:
tok = nextTok(flob)
if tok:
lowTok = string.lower(tok)
if lowTok == 'end' or lowTok == 'endblock':
tok2 = nextTok(flob)
if not tok2 or tok2 != ';':
gm = [complaintHead]
gm.append(" Expecting a semicolon after %s" % tok)
if not tok2:
gm.append("Got nothing.")
else:
gm.append("Got '%s'" % tok2)
raise Glitch, gm
return
elif lowTok == ';': # for pathological cases where the last command is a ';' by itself.
continue
else:
pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob)
else:
break
gm = [complaintHead]
gm.append("Failed to find either 'end' or 'endblock'")
gm.append("Premature end of file?")
raise Glitch, gm
|
Linhua-Sun/p4-phylogenetics
|
p4/NexusToken2.py
|
Python
|
gpl-2.0
| 5,196 | 0.007313 |
import sqlite3
from sklearn import linear_model
import numpy as np
import pandas as pd
import datetime
import sys
conn = sqlite3.connect(sys.argv[1])
c = conn.cursor();
c.execute("select _id, name from tracks")
rows = c.fetchall()
track_names = pd.DataFrame([{'track_name': row[1]} for row in rows])
track_ids = [int(row[0]) for row in rows]
track_cnt = len(track_ids)
print "Found {0} tracks.".format(track_cnt)
c.execute("select * from ticks")
last_tick = c.fetchall()[-1]
last_day = datetime.date(last_tick[2], last_tick[3], last_tick[4])
def window(day, n=20):
"return a matrix of the last `n` days before day `day`"
tick_date = "date(year || '-' || substr('0' || month, -2, 2) || " + \
"'-' || substr('0' || day, -2, 2))"
max_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day)
min_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day-datetime.timedelta(n))
c.execute("select * from ticks where {d} <= {max_date} and {d} >= {min_date}".\
format(d=tick_date, max_date=max_date, min_date=min_date))
# ticktrix is the matrix containing the ticks
ticktrix = np.zeros((n, track_cnt))
for row in c.fetchall():
print row
try:
row_date = datetime.date(row[2], row[3], row[4])
except ValueError:
print "Error constructing date from", row
x = -(row_date - day).days
y = track_ids.index(int(row[1]))
if x < n:
ticktrix[x, y] = 1
return ticktrix
last_day -= datetime.timedelta(1)
print "Fitting for day:", last_day
my_window = window(last_day)
target_data = my_window[0,:].T
training_data = my_window[1:,:].T
print "Target:", target_data.shape
print target_data
print "Training:", training_data.shape
print training_data
reg = linear_model.LinearRegression()
reg.fit(training_data, target_data)
print "Coefficents:", reg.coef_.shape
print reg.coef_
print "Applied to training data:"
print np.dot(training_data, reg.coef_)
print "Forecast"
#print np.dot(my_window[:19,:].T, reg.coef_)
#print track_names
df = pd.DataFrame()
df['track'] = track_names
df['prob'] = pd.Series(np.dot(my_window[:19,:].T, reg.coef_) * 100.0)
print df
|
lordi/tickmate
|
analysis/tmkit/linear_regression.py
|
Python
|
gpl-3.0
| 2,249 | 0.006225 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0002_advertiser_logo'),
]
operations = [
migrations.RemoveField(
model_name='advertiser',
name='logo',
),
]
|
andela/troupon
|
troupon/deals/migrations/0003_remove_advertiser_logo.py
|
Python
|
mit
| 349 | 0 |
# -*- coding: utf-8 -*-
###############################################################################
#
# UpdateSigningCertificate
# Changes the status of the specified signing certificate from active to disabled, or vice versa. This action can be used to disable a user's signing certificate as part of a certificate rotation workflow.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateSigningCertificate(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateSigningCertificate Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateSigningCertificate, self).__init__(temboo_session, '/Library/Amazon/IAM/UpdateSigningCertificate')
def new_input_set(self):
return UpdateSigningCertificateInputSet()
def _make_result_set(self, result, path):
return UpdateSigningCertificateResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateSigningCertificateChoreographyExecution(session, exec_id, path)
class UpdateSigningCertificateInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateSigningCertificate
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('AWSSecretKeyId', value)
def set_CertificateId(self, value):
"""
Set the value of the CertificateId input for this Choreo. ((required, string) The ID of the signing certificate you want to update.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('CertificateId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('ResponseFormat', value)
def set_Status(self, value):
"""
Set the value of the Status input for this Choreo. ((required, string) The status you want to assign to the certificate. Active means the certificate can be used for API calls to AWS, while Inactive means the certificate cannot be used.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('Status', value)
def set_UserName(self, value):
"""
Set the value of the UserName input for this Choreo. ((optional, string) Name of the user the signing certificate belongs to.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('UserName', value)
class UpdateSigningCertificateResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateSigningCertificate Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class UpdateSigningCertificateChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateSigningCertificateResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Amazon/IAM/UpdateSigningCertificate.py
|
Python
|
apache-2.0
| 4,842 | 0.006196 |
from __future__ import print_function, unicode_literals
import inspect
import six
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass
from rest_framework import serializers
from .. import fields
from ..utils import (
initialize_class_using_reference_object,
reduce_attr_dict_from_base_classes,
)
from .fields import ISO8601DateTimeField
SERIALIZER_FORM_FIELD_MAPPING = {
fields.BooleanField: forms.BooleanField,
fields.CharField: forms.CharField,
fields.ChoiceField: forms.ChoiceField,
fields.DateTimeField: ISO8601DateTimeField,
fields.EmailField: forms.EmailField,
fields.IntegerField: forms.IntegerField,
serializers.BooleanField: forms.BooleanField,
serializers.CharField: forms.CharField,
serializers.ChoiceField: forms.ChoiceField,
serializers.DateTimeField: ISO8601DateTimeField,
serializers.EmailField: forms.EmailField,
serializers.IntegerField: forms.IntegerField,
}
class SerializerFormOptions(object):
def __init__(self, options=None, name=None):
self.serializer = getattr(options, 'serializer', None)
self.fields = getattr(options, 'fields', [])
self.exclude = getattr(options, 'exclude', [])
self.field_mapping = getattr(options, 'field_mapping', {})
assert self.serializer is not None, (
'{}.Meta.serializer must be provided'
''.format(name)
)
assert issubclass(self.serializer, serializers.BaseSerializer), (
'{}.Meta.serializer must be a subclass of DRF serializer'
''.format(name)
)
class SerializerFormMeta(DeclarativeFieldsMetaclass):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, SerializerForm)]
except NameError:
# We are defining SerializerForm itself
parents = None
meta = attrs.pop('Meta', None)
if not parents or attrs.pop('_is_base', False):
return super(SerializerFormMeta, cls).__new__(cls, name, bases, attrs)
attrs['_meta'] = options = SerializerFormOptions(meta, name=name)
new_attrs = cls.get_form_fields_from_serializer(bases, options)
# attrs should take priority in case a specific field is overwritten
new_attrs.update(attrs)
return super(SerializerFormMeta, cls).__new__(cls, name, bases, new_attrs)
@classmethod
def get_field_mapping(cls, bases, options):
mapping = reduce_attr_dict_from_base_classes(
bases,
lambda i: getattr(getattr(i, '_meta', None), 'field_mapping', {}),
SERIALIZER_FORM_FIELD_MAPPING
)
mapping.update(options.field_mapping)
return mapping
@classmethod
def get_form_fields_from_serializer(cls, bases, options):
fields = {}
mapping = cls.get_field_mapping(bases, options)
for name, field in options.serializer._declared_fields.items():
if field.read_only:
continue
if name not in options.fields or name in options.exclude:
continue
form_field_class = mapping.get(type(field))
if not form_field_class:
raise TypeError(
'{} is not mapped to appropriate form field class. '
'Please add it to the mapping via `field_mapping` '
'Meta attribute.'
''.format(type(field))
)
fields[name] = initialize_class_using_reference_object(field, form_field_class)
return fields
class SerializerFormBase(forms.Form):
def __init__(self, *args, **kwargs):
super(SerializerFormBase, self).__init__(*args, **kwargs)
# instantiated during validation
self.serializer = None
def get_serializer_context(self):
return {}
def get_serializer_data(self):
data = self.initial.copy()
data.update(self.cleaned_data or {})
return data
def get_serializer(self):
return self._meta.serializer(
data=self.get_serializer_data(),
context=self.get_serializer_context()
)
def _clean_form(self):
super(SerializerFormBase, self)._clean_form()
self.serializer = self.get_serializer()
if not self.serializer.is_valid():
self._errors.update(self.serializer.errors)
else:
self.cleaned_data = self.serializer.validated_data
class SerializerForm(six.with_metaclass(SerializerFormMeta, SerializerFormBase)):
_is_base = True
def form_from_serializer(serializer, **kwargs):
assert inspect.isclass(serializer) and issubclass(serializer, serializers.BaseSerializer), (
'Can only create forms from DRF Serializers'
)
kwargs.update({'serializer': serializer})
meta = type(str('Meta'), (object,), kwargs)
return type(str('{}Form'.format(serializer.__name__)), (SerializerForm,), {'Meta': meta})
|
pombredanne/django-rest-framework-braces
|
drf_braces/forms/serializer_form.py
|
Python
|
mit
| 5,024 | 0.001194 |
import pytest
from diofant import Integer, SympifyError
from diofant.core.operations import AssocOp, LatticeOp
__all__ = ()
class MyMul(AssocOp):
identity = Integer(1)
def test_flatten():
assert MyMul(2, MyMul(4, 3)) == MyMul(2, 4, 3)
class Join(LatticeOp):
"""Simplest possible Lattice class."""
zero = Integer(0)
identity = Integer(1)
def test_lattice_simple():
assert Join(Join(2, 3), 4) == Join(2, Join(3, 4))
assert Join(2, 3) == Join(3, 2)
assert Join(0, 2) == 0
assert Join(1, 2) == 2
assert Join(2, 2) == 2
assert Join(Join(2, 3), 4) == Join(2, 3, 4)
assert Join() == 1
assert Join(4) == 4
assert Join(1, 4, 2, 3, 1, 3, 2) == Join(2, 3, 4)
def test_lattice_shortcircuit():
pytest.raises(SympifyError, lambda: Join(object))
assert Join(0, object) == 0
def test_lattice_print():
assert str(Join(5, 4, 3, 2)) == 'Join(2, 3, 4, 5)'
def test_lattice_make_args():
assert Join.make_args(0) == {0}
assert Join.make_args(1) == {1}
assert Join.make_args(Join(2, 3, 4)) == {Integer(2), Integer(3), Integer(4)}
|
diofant/diofant
|
diofant/tests/core/test_operations.py
|
Python
|
bsd-3-clause
| 1,106 | 0.000904 |
default_app_config = 'daiquiri.query.apps.QueryConfig'
|
aipescience/django-daiquiri
|
daiquiri/query/__init__.py
|
Python
|
apache-2.0
| 55 | 0 |
#!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Repackage expected/actual GM results as needed by our HTML rebaseline viewer.
"""
# System-level imports
import argparse
import fnmatch
import json
import logging
import os
import re
import sys
import time
# Imports from within Skia
#
# We need to add the 'gm' directory, so that we can import gm_json.py within
# that directory. That script allows us to parse the actual-results.json file
# written out by the GM tool.
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
# so any dirs that are already in the PYTHONPATH will be preferred.
PARENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
GM_DIRECTORY = os.path.dirname(PARENT_DIRECTORY)
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
import gm_json
import imagediffdb
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
IMAGE_FILENAME_FORMATTER = '%s_%s.png' # pass in (testname, config)
FIELDS_PASSED_THRU_VERBATIM = [
gm_json.JSONKEY_EXPECTEDRESULTS_BUGS,
gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE,
gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED,
]
CATEGORIES_TO_SUMMARIZE = [
'builder', 'test', 'config', 'resultType',
gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE,
gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED,
]
RESULTS_ALL = 'all'
RESULTS_FAILURES = 'failures'
class Results(object):
""" Loads actual and expected results from all builders, supplying combined
reports as requested.
Once this object has been constructed, the results (in self._results[])
are immutable. If you want to update the results based on updated JSON
file contents, you will need to create a new Results object."""
def __init__(self, actuals_root, expected_root, generated_images_root):
"""
Args:
actuals_root: root directory containing all actual-results.json files
expected_root: root directory containing all expected-results.json files
generated_images_root: directory within which to create all pixel diffs;
if this directory does not yet exist, it will be created
"""
time_start = int(time.time())
self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root)
self._actuals_root = actuals_root
self._expected_root = expected_root
self._load_actual_and_expected()
self._timestamp = int(time.time())
logging.info('Results complete; took %d seconds.' %
(self._timestamp - time_start))
def get_timestamp(self):
"""Return the time at which this object was created, in seconds past epoch
(UTC).
"""
return self._timestamp
def edit_expectations(self, modifications):
"""Edit the expectations stored within this object and write them back
to disk.
Note that this will NOT update the results stored in self._results[] ;
in order to see those updates, you must instantiate a new Results object
based on the (now updated) files on disk.
Args:
modifications: a list of dictionaries, one for each expectation to update:
[
{
'builder': 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug',
'test': 'bigmatrix',
'config': '8888',
'expectedHashType': 'bitmap-64bitMD5',
'expectedHashDigest': '10894408024079689926',
'bugs': [123, 456],
'ignore-failure': false,
'reviewed-by-human': true,
},
...
]
"""
expected_builder_dicts = Results._read_dicts_from_root(self._expected_root)
for mod in modifications:
image_name = IMAGE_FILENAME_FORMATTER % (mod['test'], mod['config'])
# TODO(epoger): assumes a single allowed digest per test
allowed_digests = [[mod['expectedHashType'],
int(mod['expectedHashDigest'])]]
new_expectations = {
gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS: allowed_digests,
}
for field in FIELDS_PASSED_THRU_VERBATIM:
value = mod.get(field)
if value is not None:
new_expectations[field] = value
builder_dict = expected_builder_dicts[mod['builder']]
builder_expectations = builder_dict.get(gm_json.JSONKEY_EXPECTEDRESULTS)
if not builder_expectations:
builder_expectations = {}
builder_dict[gm_json.JSONKEY_EXPECTEDRESULTS] = builder_expectations
builder_expectations[image_name] = new_expectations
Results._write_dicts_to_root(expected_builder_dicts, self._expected_root)
def get_results_of_type(self, type):
"""Return results of some/all tests (depending on 'type' parameter).
Args:
type: string describing which types of results to include; must be one
of the RESULTS_* constants
Results are returned as a dictionary in this form:
{
'categories': # dictionary of categories listed in
# CATEGORIES_TO_SUMMARIZE, with the number of times
# each value appears within its category
{
'resultType': # category name
{
'failed': 29, # category value and total number found of that value
'failure-ignored': 948,
'no-comparison': 4502,
'succeeded': 38609,
},
'builder':
{
'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug': 1286,
'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Release': 1134,
...
},
... # other categories from CATEGORIES_TO_SUMMARIZE
}, # end of 'categories' dictionary
'testData': # list of test results, with a dictionary for each
[
{
'resultType': 'failed',
'builder': 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug',
'test': 'bigmatrix',
'config': '8888',
'expectedHashType': 'bitmap-64bitMD5',
'expectedHashDigest': '10894408024079689926',
'actualHashType': 'bitmap-64bitMD5',
'actualHashDigest': '2409857384569',
'bugs': [123, 456],
'ignore-failure': false,
'reviewed-by-human': true,
},
...
], # end of 'testData' list
}
"""
return self._results[type]
@staticmethod
def _ignore_builder(builder):
"""Returns True if we should ignore expectations and actuals for a builder.
This allows us to ignore builders for which we don't maintain expectations
(trybots, Valgrind, ASAN, TSAN), and avoid problems like
https://code.google.com/p/skia/issues/detail?id=2036 ('rebaseline_server
produces error when trying to add baselines for ASAN/TSAN builders')
Args:
builder: name of this builder, as a string
Returns:
True if we should ignore expectations and actuals for this builder.
"""
return (builder.endswith('-Trybot') or
('Valgrind' in builder) or
('TSAN' in builder) or
('ASAN' in builder))
@staticmethod
def _read_dicts_from_root(root, pattern='*.json'):
"""Read all JSON dictionaries within a directory tree.
Args:
root: path to root of directory tree
pattern: which files to read within root (fnmatch-style pattern)
Returns:
A meta-dictionary containing all the JSON dictionaries found within
the directory tree, keyed by the builder name of each dictionary.
Raises:
IOError if root does not refer to an existing directory
"""
if not os.path.isdir(root):
raise IOError('no directory found at path %s' % root)
meta_dict = {}
for dirpath, dirnames, filenames in os.walk(root):
for matching_filename in fnmatch.filter(filenames, pattern):
builder = os.path.basename(dirpath)
if Results._ignore_builder(builder):
continue
fullpath = os.path.join(dirpath, matching_filename)
meta_dict[builder] = gm_json.LoadFromFile(fullpath)
return meta_dict
@staticmethod
def _write_dicts_to_root(meta_dict, root, pattern='*.json'):
"""Write all per-builder dictionaries within meta_dict to files under
the root path.
Security note: this will only write to files that already exist within
the root path (as found by os.walk() within root), so we don't need to
worry about malformed content writing to disk outside of root.
However, the data written to those files is not double-checked, so it
could contain poisonous data.
Args:
meta_dict: a builder-keyed meta-dictionary containing all the JSON
dictionaries we want to write out
root: path to root of directory tree within which to write files
pattern: which files to write within root (fnmatch-style pattern)
Raises:
IOError if root does not refer to an existing directory
KeyError if the set of per-builder dictionaries written out was
different than expected
"""
if not os.path.isdir(root):
raise IOError('no directory found at path %s' % root)
actual_builders_written = []
for dirpath, dirnames, filenames in os.walk(root):
for matching_filename in fnmatch.filter(filenames, pattern):
builder = os.path.basename(dirpath)
if Results._ignore_builder(builder):
continue
per_builder_dict = meta_dict.get(builder)
if per_builder_dict is not None:
fullpath = os.path.join(dirpath, matching_filename)
gm_json.WriteToFile(per_builder_dict, fullpath)
actual_builders_written.append(builder)
# Check: did we write out the set of per-builder dictionaries we
# expected to?
expected_builders_written = sorted(meta_dict.keys())
actual_builders_written.sort()
if expected_builders_written != actual_builders_written:
raise KeyError(
'expected to write dicts for builders %s, but actually wrote them '
'for builders %s' % (
expected_builders_written, actual_builders_written))
def _generate_pixel_diffs_if_needed(self, test, expected_image, actual_image):
"""If expected_image and actual_image both exist but are different,
add the image pair to self._image_diff_db and generate pixel diffs.
Args:
test: string; name of test
expected_image: (hashType, hashDigest) tuple describing the expected image
actual_image: (hashType, hashDigest) tuple describing the actual image
"""
if expected_image == actual_image:
return
(expected_hashtype, expected_hashdigest) = expected_image
(actual_hashtype, actual_hashdigest) = actual_image
if None in [expected_hashtype, expected_hashdigest,
actual_hashtype, actual_hashdigest]:
return
expected_url = gm_json.CreateGmActualUrl(
test_name=test, hash_type=expected_hashtype,
hash_digest=expected_hashdigest)
actual_url = gm_json.CreateGmActualUrl(
test_name=test, hash_type=actual_hashtype,
hash_digest=actual_hashdigest)
self._image_diff_db.add_image_pair(
expected_image_locator=expected_hashdigest,
expected_image_url=expected_url,
actual_image_locator=actual_hashdigest,
actual_image_url=actual_url)
def _load_actual_and_expected(self):
"""Loads the results of all tests, across all builders (based on the
files within self._actuals_root and self._expected_root),
and stores them in self._results.
"""
logging.info('Reading actual-results JSON files from %s...' %
self._actuals_root)
actual_builder_dicts = Results._read_dicts_from_root(self._actuals_root)
logging.info('Reading expected-results JSON files from %s...' %
self._expected_root)
expected_builder_dicts = Results._read_dicts_from_root(self._expected_root)
categories_all = {}
categories_failures = {}
Results._ensure_included_in_category_dict(categories_all,
'resultType', [
gm_json.JSONKEY_ACTUALRESULTS_FAILED,
gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED,
gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON,
gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED,
])
Results._ensure_included_in_category_dict(categories_failures,
'resultType', [
gm_json.JSONKEY_ACTUALRESULTS_FAILED,
gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED,
gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON,
])
data_all = []
data_failures = []
builders = sorted(actual_builder_dicts.keys())
num_builders = len(builders)
builder_num = 0
for builder in builders:
builder_num += 1
logging.info('Generating pixel diffs for builder #%d of %d, "%s"...' %
(builder_num, num_builders, builder))
actual_results_for_this_builder = (
actual_builder_dicts[builder][gm_json.JSONKEY_ACTUALRESULTS])
for result_type in sorted(actual_results_for_this_builder.keys()):
results_of_this_type = actual_results_for_this_builder[result_type]
if not results_of_this_type:
continue
for image_name in sorted(results_of_this_type.keys()):
actual_image = results_of_this_type[image_name]
# Default empty expectations; overwrite these if we find any real ones
expectations_per_test = None
expected_image = [None, None]
try:
expectations_per_test = (
expected_builder_dicts
[builder][gm_json.JSONKEY_EXPECTEDRESULTS][image_name])
# TODO(epoger): assumes a single allowed digest per test
expected_image = (
expectations_per_test
[gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS][0])
except (KeyError, TypeError):
# There are several cases in which we would expect to find
# no expectations for a given test:
#
# 1. result_type == NOCOMPARISON
# There are no expectations for this test yet!
#
# 2. alternate rendering mode failures (e.g. serialized)
# In cases like
# https://code.google.com/p/skia/issues/detail?id=1684
# ('tileimagefilter GM test failing in serialized render mode'),
# the gm-actuals will list a failure for the alternate
# rendering mode even though we don't have explicit expectations
# for the test (the implicit expectation is that it must
# render the same in all rendering modes).
#
# Don't log type 1, because it is common.
# Log other types, because they are rare and we should know about
# them, but don't throw an exception, because we need to keep our
# tools working in the meanwhile!
if result_type != gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON:
logging.warning('No expectations found for test: %s' % {
'builder': builder,
'image_name': image_name,
'result_type': result_type,
})
# If this test was recently rebaselined, it will remain in
# the 'failed' set of actuals until all the bots have
# cycled (although the expectations have indeed been set
# from the most recent actuals). Treat these as successes
# instead of failures.
#
# TODO(epoger): Do we need to do something similar in
# other cases, such as when we have recently marked a test
# as ignoreFailure but it still shows up in the 'failed'
# category? Maybe we should not rely on the result_type
# categories recorded within the gm_actuals AT ALL, and
# instead evaluate the result_type ourselves based on what
# we see in expectations vs actual checksum?
if expected_image == actual_image:
updated_result_type = gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED
else:
updated_result_type = result_type
(test, config) = IMAGE_FILENAME_RE.match(image_name).groups()
self._generate_pixel_diffs_if_needed(
test=test, expected_image=expected_image,
actual_image=actual_image)
results_for_this_test = {
'resultType': updated_result_type,
'builder': builder,
'test': test,
'config': config,
'actualHashType': actual_image[0],
'actualHashDigest': str(actual_image[1]),
'expectedHashType': expected_image[0],
'expectedHashDigest': str(expected_image[1]),
# FIELDS_PASSED_THRU_VERBATIM that may be overwritten below...
gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE: False,
}
if expectations_per_test:
for field in FIELDS_PASSED_THRU_VERBATIM:
results_for_this_test[field] = expectations_per_test.get(field)
if updated_result_type == gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON:
pass # no diff record to calculate at all
elif updated_result_type == gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED:
results_for_this_test['numDifferingPixels'] = 0
results_for_this_test['percentDifferingPixels'] = 0
results_for_this_test['weightedDiffMeasure'] = 0
results_for_this_test['maxDiffPerChannel'] = 0
else:
try:
diff_record = self._image_diff_db.get_diff_record(
expected_image_locator=expected_image[1],
actual_image_locator=actual_image[1])
results_for_this_test['numDifferingPixels'] = (
diff_record.get_num_pixels_differing())
results_for_this_test['percentDifferingPixels'] = (
diff_record.get_percent_pixels_differing())
results_for_this_test['weightedDiffMeasure'] = (
diff_record.get_weighted_diff_measure())
results_for_this_test['maxDiffPerChannel'] = (
diff_record.get_max_diff_per_channel())
except KeyError:
logging.warning('unable to find diff_record for ("%s", "%s")' %
(expected_image[1], actual_image[1]))
pass
Results._add_to_category_dict(categories_all, results_for_this_test)
data_all.append(results_for_this_test)
# TODO(epoger): In effect, we have a list of resultTypes that we
# include in the different result lists (data_all and data_failures).
# This same list should be used by the calls to
# Results._ensure_included_in_category_dict() earlier on.
if updated_result_type != gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED:
Results._add_to_category_dict(categories_failures,
results_for_this_test)
data_failures.append(results_for_this_test)
self._results = {
RESULTS_ALL:
{'categories': categories_all, 'testData': data_all},
RESULTS_FAILURES:
{'categories': categories_failures, 'testData': data_failures},
}
@staticmethod
def _add_to_category_dict(category_dict, test_results):
"""Add test_results to the category dictionary we are building.
(See documentation of self.get_results_of_type() for the format of this
dictionary.)
Args:
category_dict: category dict-of-dicts to add to; modify this in-place
test_results: test data with which to update category_list, in a dict:
{
'category_name': 'category_value',
'category_name': 'category_value',
...
}
"""
for category in CATEGORIES_TO_SUMMARIZE:
category_value = test_results.get(category)
if not category_dict.get(category):
category_dict[category] = {}
if not category_dict[category].get(category_value):
category_dict[category][category_value] = 0
category_dict[category][category_value] += 1
@staticmethod
def _ensure_included_in_category_dict(category_dict,
category_name, category_values):
"""Ensure that the category name/value pairs are included in category_dict,
even if there aren't any results with that name/value pair.
(See documentation of self.get_results_of_type() for the format of this
dictionary.)
Args:
category_dict: category dict-of-dicts to modify
category_name: category name, as a string
category_values: list of values we want to make sure are represented
for this category
"""
if not category_dict.get(category_name):
category_dict[category_name] = {}
for category_value in category_values:
if not category_dict[category_name].get(category_value):
category_dict[category_name][category_value] = 0
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--actuals', required=True,
help='Directory containing all actual-result JSON files')
parser.add_argument(
'--expectations', required=True,
help='Directory containing all expected-result JSON files')
parser.add_argument(
'--outfile', required=True,
help='File to write result summary into, in JSON format')
parser.add_argument(
'--workdir', default='.workdir',
help='Directory within which to download images and generate diffs')
args = parser.parse_args()
results = Results(actuals_root=args.actuals,
expected_root=args.expectations,
generated_images_root=args.workdir)
gm_json.WriteToFile(results.get_results_of_type(RESULTS_ALL), args.outfile)
if __name__ == '__main__':
main()
|
trevorlinton/skia
|
gm/rebaseline_server/results.py
|
Python
|
bsd-3-clause
| 22,208 | 0.005629 |
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNodeDescription(
GafferScene.Transform,
"""Modifies the transforms of all locations matched by the filter.""",
"space",
"""The space in which the transform is applied.""",
"transform",
"""The transform to be applied.""",
)
GafferUI.PlugValueWidget.registerCreator(
GafferScene.Transform,
"space",
GafferUI.EnumPlugValueWidget,
labelsAndValues = (
( "World", GafferScene.Transform.Space.World ),
( "Object", GafferScene.Transform.Space.Object ),
)
)
|
goddardl/gaffer
|
python/GafferSceneUI/TransformUI.py
|
Python
|
bsd-3-clause
| 2,347 | 0.008095 |
# Copyright (C) 2008, 2009 Jochen Voss <voss@seehuhn.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# This software is provided by the author "as is" and any express or
# implied warranties, including, but not limited to, the implied
# warranties of merchantability and fitness for a particular purpose
# are disclaimed. In no event shall the author be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
def print_tree(tree, terminals, indent=0):
"""Print a parse tree to stdout."""
prefix = " "*indent
if tree[0] in terminals:
print prefix + repr(tree)
else:
print prefix + unicode(tree[0])
for x in tree[1:]:
print_tree(x, terminals, indent+1)
class Parser(object):
"""LR(1) parser class template.
This class is only used to store source code sniplets for the
generated parser. Code is taken out via code inspection and
pasted into the output file.
"""
class ParseErrors(Exception):
"""Exception class to represent a collection of parse errors.
Instances of this class have two attributes, `errors` and `tree`.
`errors` is a list of tuples, each describing one error.
#@ IF error_stacks
Each tuple consists of the first input token which could not
be processed, the list of grammar symbols which were allowed
at this point, and a list of partial parse trees which
represent the input parsed so far.
#@ ELSE
Each tuple consists of the first input token which could not
be processed and the list of grammar symbols which were allowed
at this point.
#@ ENDIF
`tree` is a "repaired" parse tree which might be used for further
error checking, or `None` if no repair was possible.
"""
def __init__(self, errors, tree):
msg = "%d parse errors"%len(errors)
Exception.__init__(self, msg)
self.errors = errors
self.tree = tree
def __init__(self, max_err=None, errcorr_pre=4, errcorr_post=4):
"""Create a new parser instance.
The constructor arguments are all optional, they control the
handling of parse errors: `max_err` can be given to bound the
number of errors reported during one run of the parser.
`errcorr_pre` controls how many tokens before an invalid token
the parser considers when trying to repair the input.
`errcorr_post` controls how far beyond an invalid token the
parser reads when evaluating the quality of an attempted
repair.
"""
self.max_err = max_err
self.m = errcorr_pre
self.n = errcorr_post
@staticmethod
def leaves(tree):
"""Iterate over the leaves of a parse tree.
This function can be used to reconstruct the input from a
parse tree.
"""
if tree[0] in Parser.terminals:
yield tree
else:
for x in tree[1:]:
for t in Parser.leaves(x):
yield t
def _parse(self, tokens, stack, state):
"""Internal function to construct a parse tree.
'Tokens' is the input token stream, 'stack' is the inital stack
and 'state' is the inital state of the automaton.
Returns a 4-tuple (done, count, state, error). 'done' is a
boolean indicationg whether parsing is completed, 'count' is
number of successfully shifted tokens, and 'error' is None on
success or else the first token which could not be parsed.
"""
read_next = True
count = 0
while state != self._halting_state:
if read_next:
try:
lookahead = tokens.next()
except StopIteration:
return (False,count,state,None)
read_next = False
token = lookahead[0]
#@ IF parser_debugprint
debug = [ ]
for s in stack:
debug.extend([str(s[0]), repr(s[1][0])])
debug.append(str(state))
print " ".join(debug)+" [%s]"%repr(token)
#@ ENDIF parser_debugprint
if (state,token) in self._shift:
#@ IF parser_debugprint
print "shift %s"%repr(token)
#@ ENDIF
stack.append((state,lookahead))
state = self._shift[(state,token)]
read_next = True
count += 1
elif (state,token) in self._reduce:
X,n = self._reduce[(state,token)]
if n > 0:
state = stack[-n][0]
#@ IF transparent_tokens
tree = [ X ]
for s in stack[-n:]:
if s[1][0] in self._transparent:
tree.extend(s[1][1:])
else:
tree.append(s[1])
tree = tuple(tree)
#@ ELSE
tree = (X,) + tuple(s[1] for s in stack[-n:])
#@ ENDIF
#@ IF parser_debugprint
debug = [ s[1][0] for s in stack[-n:] ]
#@ ENDIF
del stack[-n:]
else:
tree = (X,)
#@ IF parser_debugprint
debug = [ ]
#@ ENDIF
#@ IF parser_debugprint
print "reduce %s -> %s"%(repr(debug),repr(X))
#@ ENDIF
stack.append((state,tree))
state = self._goto[(state,X)]
else:
#@ IF parser_debugprint
print "parse error"
#@ ENDIF
return (False,count,state,lookahead)
return (True,count,state,None)
def _try_parse(self, tokens, stack, state):
count = 0
while state != self._halting_state and count < len(tokens):
token = tokens[count][0]
if (state,token) in self._shift:
stack.append(state)
state = self._shift[(state,token)]
count += 1
elif (state,token) in self._reduce:
X,n = self._reduce[(state,token)]
if n > 0:
state = stack[-n]
del stack[-n:]
stack.append(state)
state = self._goto[(state,X)]
else:
break
return count
def parse(self, tokens):
"""Parse the tokens from `tokens` and construct a parse tree.
`tokens` must be an interable over tuples. The first element
of each tuple must be a terminal symbol of the grammar which
is used for parsing. All other element of the tuple are just
copied into the constructed parse tree.
If `tokens` is invalid, a ParseErrors exception is raised.
Otherwise the function returns the parse tree.
"""
errors = []
tokens = chain(tokens, [(self.EOF,)])
stack = []
state = 0
while True:
done,_,state,lookahead = self._parse(tokens, stack, state)
if done:
break
expect = [ t for s,t in self._reduce.keys()+self._shift.keys()
if s == state ]
#@ IF error_stacks
errors.append((lookahead, expect, [ s[1] for s in stack ]))
#@ ELSE
errors.append((lookahead, expect))
#@ ENDIF
if self.max_err is not None and len(errors) >= self.max_err:
raise self.ParseErrors(errors, None)
#@ IF parser_debugprint
print "backtrack for error recovery"
#@ ENDIF
queue = []
def split_input(m, stack, lookahead, queue):
for s in stack:
for t in self.leaves(s[1]):
queue.append(t)
if len(queue) > m:
yield queue.pop(0)
queue.append(lookahead)
in2 = split_input(self.m, stack, lookahead, queue)
stack = []
done,_,state,lookahead = self._parse(in2, stack, 0)
m = len(queue)
for i in range(0, self.n):
try:
queue.append(tokens.next())
except StopIteration:
break
def vary_queue(queue, m):
for i in range(m-1, -1, -1):
for t in self.terminals:
yield queue[:i]+[(t,)]+queue[i:]
if queue[i][0] == self.EOF:
continue
for t in self.terminals:
if t == queue[i]:
continue
yield queue[:i]+[(t,)]+queue[i+1:]
yield queue[:i]+queue[i+1:]
best_val = len(queue)-m+1
best_queue = queue
for q2 in vary_queue(queue, m):
pos = self._try_parse(q2, [ s[0] for s in stack ], state)
val = len(q2) - pos
if val < best_val:
best_val = val
best_queue = q2
if val == len(q2):
break
if best_val >= len(queue)-m+1:
raise self.ParseErrors(errors, None)
tokens = chain(best_queue, tokens)
#@ IF parser_debugprint
debug = " ".join(repr(x[0]) for x in best_queue)
print "restart with repaired input: "+debug
#@ ENDIF
tree = stack[0][1]
if errors:
raise self.ParseErrors(errors, tree)
return tree
|
seehuhn/wisent
|
template.py
|
Python
|
gpl-2.0
| 10,909 | 0.006508 |
"""Trivial Interfaces and Adaptation from PyProtocols.
This package is a subset of the files from Phillip J. Eby's
PyProtocols package. They are only included here to help remove dependencies
on external packages from the Traits package. The code has been reorganized to
address circular imports that were discovered when explicit relative imports
were added.
"""
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/protocols/__init__.py
|
Python
|
gpl-2.0
| 365 | 0 |
# -*- coding: utf-8 -*-
#
# Minimum amount of settings to run the googlytics test suite
#
# googlytics options are often overriden during tests
GOOGLE_ANALYTICS_KEY = 'U-TEST-XXX'
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'googlytics_test.sqlite3'
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'googlytics',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'googlytics.context_processors.googlytics',
)
|
rikpg/django-googlytics
|
test_settings.py
|
Python
|
bsd-3-clause
| 510 | 0 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import os
import unittest
import six
from airflow.models import TaskInstance, DAG, DagRun
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.timezone import datetime
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.db import create_session
from airflow.utils.state import State
DEFAULT_DATE = datetime(2016, 1, 1)
TASK_LOGGER = 'airflow.task'
FILE_TASK_HANDLER = 'file.task'
class TestFileTaskLogHandler(unittest.TestCase):
def cleanUp(self):
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def setUp(self):
super(TestFileTaskLogHandler, self).setUp()
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
logging.root.disabled = False
self.cleanUp()
# We use file task handler by default.
def tearDown(self):
self.cleanUp()
super(TestFileTaskLogHandler, self).tearDown()
def test_default_task_logging_setup(self):
# file task handler is used by default.
logger = logging.getLogger(TASK_LOGGER)
handlers = logger.handlers
self.assertEqual(len(handlers), 1)
handler = handlers[0]
self.assertEqual(handler.name, FILE_TASK_HANDLER)
def test_file_task_handler(self):
def task_callable(ti, **kwargs):
ti.log.info("test")
dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='task_for_testing_file_log_handler',
dag=dag,
python_callable=task_callable,
provide_context=True
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
logger = ti.log
ti.log.disabled = False
file_handler = next((handler for handler in logger.handlers
if handler.name == FILE_TASK_HANDLER), None)
self.assertIsNotNone(file_handler)
set_context(logger, ti)
self.assertIsNotNone(file_handler.handler)
# We expect set_context generates a file locally.
log_filename = file_handler.handler.baseFilename
self.assertTrue(os.path.isfile(log_filename))
self.assertTrue(log_filename.endswith("1.log"), log_filename)
ti.run(ignore_ti_state=True)
file_handler.flush()
file_handler.close()
self.assertTrue(hasattr(file_handler, 'read'))
# Return value of read must be a list.
logs = file_handler.read(ti)
self.assertTrue(isinstance(logs, list))
self.assertEqual(len(logs), 1)
target_re = r'\n\[[^\]]+\] {test_log_handlers.py:\d+} INFO - test\n'
# We should expect our log line from the callable above to appear in
# the logs we read back
six.assertRegex(
self,
logs[0],
target_re,
"Logs were " + str(logs)
)
# Remove the generated tmp log file.
os.remove(log_filename)
def test_file_task_handler_running(self):
def task_callable(ti, **kwargs):
ti.log.info("test")
dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='task_for_testing_file_log_handler',
dag=dag,
python_callable=task_callable,
provide_context=True
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.try_number = 2
ti.state = State.RUNNING
logger = ti.log
ti.log.disabled = False
file_handler = next((handler for handler in logger.handlers
if handler.name == FILE_TASK_HANDLER), None)
self.assertIsNotNone(file_handler)
set_context(logger, ti)
self.assertIsNotNone(file_handler.handler)
# We expect set_context generates a file locally.
log_filename = file_handler.handler.baseFilename
self.assertTrue(os.path.isfile(log_filename))
self.assertTrue(log_filename.endswith("2.log"), log_filename)
logger.info("Test")
# Return value of read must be a list.
logs = file_handler.read(ti)
self.assertTrue(isinstance(logs, list))
# Logs for running tasks should show up too.
self.assertEqual(len(logs), 2)
# Remove the generated tmp log file.
os.remove(log_filename)
class TestFilenameRendering(unittest.TestCase):
def setUp(self):
dag = DAG('dag_for_testing_filename_rendering', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='task_for_testing_filename_rendering', dag=dag)
self.ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
def test_python_formatting(self):
expected_filename = 'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log' % DEFAULT_DATE.isoformat()
fth = FileTaskHandler('', '{dag_id}/{task_id}/{execution_date}/{try_number}.log')
rendered_filename = fth._render_filename(self.ti, 42)
self.assertEqual(expected_filename, rendered_filename)
def test_jinja_rendering(self):
expected_filename = 'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log' % DEFAULT_DATE.isoformat()
fth = FileTaskHandler('', '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log')
rendered_filename = fth._render_filename(self.ti, 42)
self.assertEqual(expected_filename, rendered_filename)
|
OpringaoDoTurno/airflow
|
tests/utils/test_log_handlers.py
|
Python
|
apache-2.0
| 6,367 | 0.001099 |
#!/usr/bin/env python
# Written by Filippo Bonazzi <f.bonazzi@davide.it> 2016
#
# Convert an integer from its decimal representation into its hexadecimal
# representation.
# TODO: add argparse
import sys
import math
s = "".join(sys.argv[1].split())
for c in s:
if c not in "1234567890":
print("Bad string \"{}\"".format(s))
sys.exit(1)
a = 0
for i in range(0, len(s)):
a += int(s[len(s) - i - 1]) * int(math.pow(10, i))
print("{0:#x}".format(a))
|
Fbonazzi/Scripts
|
binary/int_to_byte.py
|
Python
|
gpl-3.0
| 471 | 0 |
"""
Application-class that implements pyFoamChangeGGIBoundary.py
Modification of GGI and cyclicGGI interface parameters in
constant/polymesh/boundary file.
Author:
Martin Beaudoin, Hydro-Quebec, 2009. All rights reserved
"""
from PyFoam.Applications.PyFoamApplication import PyFoamApplication
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from PyFoam.ThirdParty.six import print_
from os import path
import sys
import re
class ChangeGGIBoundary(PyFoamApplication):
def __init__(self,args=None):
description="""\
Change GGI boundary condition parameters
"""
PyFoamApplication.__init__(self,
args=args,
description=description,
usage="%prog <caseDirectory> ggiPatchName",
interspersed=True,
changeVersion=False,
nr=2)
def addOptions(self):
self.parser.add_option("--shadowPatch",
action="store",
dest="shadowPatch",
default=None,
help='Name of the shadowPatch')
self.parser.add_option("--shadowName",
action="store",
dest="shadowName",
default=None,
help='Name of the shadowPatch. Deprecated. Use --shadowPatch instead')
self.parser.add_option("--zone",
action="store",
dest="zone",
default=None,
help='Name of the zone for the GGI patch')
self.parser.add_option("--patchZoneName",
action="store",
dest="patchZoneName",
default=None,
help='Name of the zone for the GGI patch. Deprecated. Use --zone instead')
self.parser.add_option("--bridgeOverlap",
action="store",
dest="bridgeOverlap",
default=None,
help='bridgeOverlap flag (on/off)')
self.parser.add_option("--bridgeOverlapFlag",
action="store",
dest="bridgeOverlapFlag",
default=None,
help='bridgeOverlap flag (on/off). Deprecated. Use --bridgeOverlap instead')
self.parser.add_option("--rotationAxis",
action="store",
dest="rotationAxis",
default=None,
help='rotation axis for cyclicGgi')
self.parser.add_option("--rotationAngle",
action="store",
dest="rotationAngle",
default=None,
help='rotation axis angle for cyclicGgi')
self.parser.add_option("--separationOffset",
action="store",
dest="separationOffset",
default=None,
help='separation offset for cyclicGgi')
self.parser.add_option("--test",
action="store_true",
default=False,
dest="test",
help="Only print the new boundary file")
def run(self):
fName=self.parser.getArgs()[0]
bName=self.parser.getArgs()[1]
boundary=ParsedParameterFile(path.join(".",fName,"constant","polyMesh","boundary"),debug=False,boundaryDict=True)
bnd=boundary.content
if type(bnd)!=list:
self.error("Problem with boundary file (not a list)")
found=False
for val in bnd:
if val==bName:
found=True
elif found:
bcType=val["type"]
if re.match("cyclicGgi", bcType)!= None or re.match("ggi", bcType)!= None:
if self.parser.getOptions().shadowPatch!=None:
shadowPatch=self.parser.getOptions().shadowPatch
val["shadowPatch"]=shadowPatch
if shadowPatch not in bnd:
self.error("\n Option --shadowPatch for patch:",bName,": there is no patch called",shadowPatch,"\n")
if self.parser.getOptions().zone!=None:
val["zone"]=self.parser.getOptions().zone
if self.parser.getOptions().bridgeOverlap!=None:
val["bridgeOverlap"]=self.parser.getOptions().bridgeOverlap
if val["type"]=="cyclicGgi":
if self.parser.getOptions().rotationAxis!=None:
val["rotationAxis"]=self.parser.getOptions().rotationAxis
if self.parser.getOptions().rotationAngle!=None:
val["rotationAngle"]=self.parser.getOptions().rotationAngle
if self.parser.getOptions().separationOffset!=None:
val["separationOffset"]=self.parser.getOptions().separationOffset
# Deprecated
if self.parser.getOptions().shadowName!=None:
self.warning("\n PatchName:",bName,": Option --shadowName is deprecated. Use --shadowPatch instead\n")
shadowName=self.parser.getOptions().shadowName
val["shadowPatch"]=shadowName
if shadowName not in bnd:
self.error("\n Option --shadowName for patch:",bName,": there is no patch called",shadowName,"\n")
# Deprecated
if self.parser.getOptions().patchZoneName!=None:
self.warning("\n PatchName:",bName,": Option --patchZoneName is deprecated. Use --zone instead\n")
val["zone"]=self.parser.getOptions().patchZoneName
# Deprecated
if self.parser.getOptions().bridgeOverlapFlag!=None:
self.warning("\n PatchName:",bName,": Option --bridgeOverlapFlag is deprecated. Use --bridgeOverlap instead\n")
val["bridgeOverlap"]=self.parser.getOptions().bridgeOverlapFlag
else:
print_("Unsupported GGI type '",bcType,"' for patch",bName)
break
if not found:
self.error("Boundary",bName,"not found in",bnd[::2])
if self.parser.getOptions().test:
print_(boundary)
else:
boundary.writeFile()
|
Unofficial-Extend-Project-Mirror/foam-extend-foam-extend-3.2
|
ThirdParty/LocalDev/Hydro-Quebec/PyFoam/ChangeGGIBoundary.py
|
Python
|
gpl-3.0
| 7,040 | 0.012642 |
# TODO: Handle timestamp generation for modinput and set sample.timestamp properly for timestamp replacement
import datetime
import json
import os
import pprint
import random
import re
import time
import uuid
import six.moves.urllib.error
import six.moves.urllib.parse
import six.moves.urllib.request
from splunk_eventgen.lib.logging_config import logger
from splunk_eventgen.lib.timeparser import timeDelta2secs
class Token(object):
"""Contains data and methods for replacing a token in a given sample"""
token = None
replacementType = None
replacement = None
sample = None
mvhash = {}
_replaytd = None
_lastts = None
_tokenfile = None
_tokents = None
_earliestTime = None
_latestTime = None
_replacementFile = None
_replacementColumn = None
_integerMatch = None
_floatMatch = None
_hexMatch = None
_stringMatch = None
_listMatch = None
_tokenfilecounter = 0
def __init__(self, sample=None):
self._earliestTime = (None, None)
self._latestTime = (None, None)
def __str__(self):
"""Only used for debugging, outputs a pretty printed representation of this token"""
# Eliminate recursive going back to parent
temp = dict(
[(key, value) for (key, value) in self.__dict__.items() if key != "sample"]
)
return pprint.pformat(temp)
def __repr__(self):
return self.__str__()
def _match(self, event):
"""Executes regular expression match and returns the re.Match object"""
return re.match(self.token, event)
def _search(self, event):
"""Executes regular expression search and returns the re.Match object"""
return re.search(self.token, event)
def _finditer(self, event):
"""Executes regular expression finditer and returns the re.Match object"""
return re.finditer(self.token, event)
def _findall(self, event):
"""Executes regular expression finditer and returns the re.Match object"""
return re.findall(self.token, event)
def replace(self, event, et=None, lt=None, s=None, pivot_timestamp=None):
"""Replaces all instances of this token in provided event and returns event"""
offset = 0
tokenMatch = list(self._finditer(event))
if len(tokenMatch) > 0:
replacement = self._getReplacement(
event[tokenMatch[0].start(0) : tokenMatch[0].end(0)],
et,
lt,
s,
pivot_timestamp=pivot_timestamp,
)
if replacement is not None or self.replacementType == "replaytimestamp":
# logger.debug("Replacement: '%s'" % replacement)
# Iterate matches
for match in tokenMatch:
# logger.debug("Match: %s" % (match))
try:
matchStart = match.start(1) + offset
matchEnd = match.end(1) + offset
startEvent = event[:matchStart]
endEvent = event[matchEnd:]
# In order to not break legacy which might replace the same timestamp
# with the same value in multiple matches, here we'll include
# ones that need to be replaced for every match
if self.replacementType == "replaytimestamp":
replacement = lt.strftime(self.replacement)
offset += len(replacement) - len(match.group(1))
except:
matchStart = match.start(0) + offset
matchEnd = match.end(0) + offset
startEvent = event[:matchStart]
endEvent = event[matchEnd:]
# In order to not break legacy which might replace the same timestamp
# with the same value in multiple matches, here we'll include
# ones that need to be replaced for every match
if self.replacementType == "replaytimestamp":
replacement = lt.strftime(self.replacement)
offset += len(replacement) - len(match.group(0))
# logger.debug("matchStart %d matchEnd %d offset %d" % (matchStart, matchEnd, offset))
event = startEvent + replacement + endEvent
# Reset replay internal variables for this token
self._replaytd = None
self._lastts = None
return event
def _getReplacement(
self, old=None, earliestTime=None, latestTime=None, s=None, pivot_timestamp=None
):
if self.replacementType == "static":
return self.replacement
# This logic is done in replay.py
elif self.replacementType == "replaytimestamp":
pass
elif self.replacementType == "timestamp":
if s.earliest and s.latest:
if earliestTime and latestTime:
if latestTime >= earliestTime:
if pivot_timestamp:
replacementTime = pivot_timestamp
elif s.timestamp is None:
minDelta = 0
# Compute timeDelta as total_seconds
td = latestTime - earliestTime
if not type(td) == float:
maxDelta = timeDelta2secs(td)
else:
maxDelta = td
# Get random timeDelta
randomDelta = datetime.timedelta(
seconds=random.randint(minDelta, maxDelta),
microseconds=random.randint(
0,
latestTime.microsecond
if latestTime.microsecond > 0
else 999999,
),
)
# Compute replacmentTime
replacementTime = latestTime - randomDelta
s.timestamp = replacementTime
else:
replacementTime = s.timestamp
replacement = self.replacement.replace(
"%s",
str(round(time.mktime(replacementTime.timetuple())))
.rstrip("0")
.rstrip("."),
)
replacementTime = replacementTime.strftime(replacement)
# replacementTime == replacement for invalid strptime specifiers
if replacementTime != self.replacement.replace("%", ""):
return replacementTime
else:
logger.error(
"Invalid strptime specifier '%s' detected; will not replace"
% (self.replacement)
)
return old
# earliestTime/latestTime not proper
else:
logger.error(
(
"Earliest specifier '%s', value '%s' is greater than latest specifier '%s'"
+ "value '%s' for sample '%s'; will not replace"
)
% (s.earliest, earliestTime, s.latest, latestTime, s.name)
)
return old
# earliest/latest not proper
else:
logger.error(
"Earliest or latest specifier were not set; will not replace"
)
return old
elif self.replacementType in ("random", "rated"):
# Validations:
if self._integerMatch is not None:
integerMatch = self._integerMatch
else:
integerRE = re.compile(r"integer\[([-]?\d+):([-]?\d+)\]", re.I)
integerMatch = integerRE.match(self.replacement)
self._integerMatch = integerMatch
if self._floatMatch is not None:
floatMatch = self._floatMatch
else:
floatRE = re.compile(
r"float\[(-?\d+|-?\d+\.(\d+)):(-?\d+|-?\d+\.(\d+))\]", re.I
)
floatMatch = floatRE.match(self.replacement)
self._floatMatch = floatMatch
if self._stringMatch is not None:
stringMatch = self._stringMatch
else:
stringRE = re.compile(r"string\((\d+)\)", re.I)
stringMatch = stringRE.match(self.replacement)
self._stringMatch = stringMatch
if self._hexMatch is not None:
hexMatch = self._hexMatch
else:
hexRE = re.compile(r"hex\((\d+)\)", re.I)
hexMatch = hexRE.match(self.replacement)
self._hexMatch = hexMatch
if self._listMatch is not None:
listMatch = self._listMatch
else:
listRE = re.compile(r"list(\[[^\]]+\])", re.I)
listMatch = listRE.match(self.replacement)
self._listMatch = listMatch
# Valid replacements: ipv4 | ipv6 | integer[<start>:<end>] | string(<i>)
if self.replacement.lower() == "ipv4":
x = 0
replacement = ""
while x < 4:
replacement += str(random.randint(0, 255)) + "."
x += 1
replacement = replacement.strip(".")
return replacement
elif self.replacement.lower() == "ipv6":
x = 0
replacement = ""
while x < 8:
replacement += hex(random.randint(0, 65535))[2:] + ":"
x += 1
replacement = replacement.strip(":")
return replacement
elif self.replacement.lower() == "mac":
x = 0
replacement = ""
# Give me 6 blocks of 2 hex
while x < 6:
y = 0
while y < 2:
replacement += hex(random.randint(0, 15))[2:]
y += 1
replacement += ":"
x += 1
replacement = replacement.strip(":")
return replacement
elif self.replacement.lower() == "guid":
return str(uuid.uuid4())
elif integerMatch:
startInt = int(integerMatch.group(1))
endInt = int(integerMatch.group(2))
if endInt >= startInt:
replacementInt = random.randint(startInt, endInt)
if self.replacementType == "rated":
rateFactor = 1.0
if type(s.hourOfDayRate) == dict:
try:
rateFactor *= s.hourOfDayRate[str(s.now())]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Hour of day rate failed for token %s. Stacktrace %s"
% stack
)
if type(s.dayOfWeekRate) == dict:
try:
weekday = datetime.date.weekday(s.now())
if weekday == 6:
weekday = 0
else:
weekday += 1
rateFactor *= s.dayOfWeekRate[str(weekday)]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Day of week rate failed. Stacktrace %s" % stack
)
replacementInt = int(round(replacementInt * rateFactor, 0))
replacement = str(replacementInt)
return replacement
else:
logger.error(
"Start integer %s greater than end integer %s; will not replace"
% (startInt, endInt)
)
return old
elif floatMatch:
try:
startFloat = float(floatMatch.group(1))
endFloat = float(floatMatch.group(3))
significance = 0
if floatMatch.group(2) is not None:
significance = len(floatMatch.group(2))
if endFloat >= startFloat:
floatret = round(
random.uniform(startFloat, endFloat), significance
)
if self.replacementType == "rated":
rateFactor = 1.0
now = s.now()
if type(s.hourOfDayRate) == dict:
try:
rateFactor *= s.hourOfDayRate[str(now.hour)]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Hour of day rate failed for token %s. Stacktrace %s"
% stack
)
if type(s.dayOfWeekRate) == dict:
try:
weekday = datetime.date.weekday(now)
if weekday == 6:
weekday = 0
else:
weekday += 1
rateFactor *= s.dayOfWeekRate[str(weekday)]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Day of week rate failed. Stacktrace %s"
% stack
)
floatret = round(floatret * rateFactor, significance)
floatret = str(floatret)
return floatret
else:
logger.error(
"Start float %s greater than end float %s; will not replace"
% (startFloat, endFloat)
)
return old
except ValueError:
logger.error(
"Could not parse float[%s:%s]"
% (floatMatch.group(1), floatMatch.group(4))
)
return old
elif stringMatch:
strLength = int(stringMatch.group(1))
if strLength == 0:
return ""
elif strLength > 0:
replacement = ""
while len(replacement) < strLength:
# Generate a random ASCII between dec 33->126
replacement += chr(random.randint(33, 126))
# Practice safe strings
replacement = re.sub(
"%[0-9a-fA-F]+",
"",
six.moves.urllib.parse.quote(replacement),
)
return replacement
else:
logger.error(
"Length specifier %s for string replacement must be greater than 0; will not replace"
% (strLength)
)
return old
elif hexMatch:
strLength = int(hexMatch.group(1))
replacement = ""
hexList = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"A",
"B",
"C",
"D",
"E",
"F",
]
while len(replacement) < strLength:
replacement += hexList[random.randint(0, 15)]
return replacement
elif listMatch:
try:
value = json.loads(listMatch.group(1))
except:
logger.error(
"Could not parse json for '%s' in sample '%s'"
% (listMatch.group(1), s.name)
)
return old
return random.choice(value)
else:
logger.error(
"Unknown replacement value '%s' for replacementType '%s'; will not replace"
% (self.replacement, self.replacementType)
)
return old
elif self.replacementType in ("file", "mvfile", "seqfile"):
if self._replacementFile is not None:
replacementFile = self._replacementFile
replacementColumn = self._replacementColumn
else:
try:
paths = self.replacement.split(":")
if len(paths) == 1:
replacementColumn = 0
else:
try: # When it's not a mvfile, there's no number on the end:
replacementColumn = int(paths[-1])
except (ValueError):
replacementColumn = 0
if replacementColumn > 0:
# This supports having a drive-letter colon
replacementFile = s.pathParser(":".join(paths[0:-1]))
else:
replacementFile = s.pathParser(self.replacement)
except ValueError:
logger.error(
"Replacement string '%s' improperly formatted. Should be /path/to/file or /path/to/file:column"
% self.replacement
)
return old
self._replacementFile = replacementFile
self._replacementColumn = replacementColumn
# If we've seen this file before, simply return already read results
# This applies only if we're looking at a multivalue file and we want to
# return the same random pick on every iteration
if replacementColumn > 0 and replacementFile in self.mvhash:
if replacementColumn > len(self.mvhash[replacementFile]):
logger.error(
"Index for column '%s' in replacement file '%s' is out of bounds"
% (replacementColumn, replacementFile)
)
return old
else:
# logger.debug("Returning mvhash: %s" % self.mvhash[replacementFile][replacementColumn-1])
return self.mvhash[replacementFile][replacementColumn - 1]
else:
# Adding caching of the token file to avoid reading it every iteration
if self._tokenfile is not None:
replacementLines = self._tokenfile
# Otherwise, lets read the file and build our cached results, pick a result and return it
else:
# logger.debug("replacementFile: %s replacementColumn: %s" %
# (replacementFile, replacementColumn))
replacementFile = os.path.abspath(replacementFile)
logger.debug("Normalized replacement file %s" % replacementFile)
if os.path.exists(replacementFile) and os.path.isfile(
replacementFile
):
replacementFH = open(replacementFile, "rU")
replacementLines = replacementFH.readlines()
replacementFH.close()
if len(replacementLines) == 0:
logger.error(
"Replacement file '%s' is empty; will not replace"
% (replacementFile)
)
return old
else:
self._tokenfile = replacementLines
else:
logger.error("File '%s' does not exist" % (replacementFile))
return old
if self.replacementType == "seqfile":
# pick value one by one from replacement file
replacement = replacementLines[
self._tokenfilecounter % len(replacementLines)
].strip()
self._tokenfilecounter += 1
else:
# pick value randomly from replacement file
replacement = replacementLines[
random.randint(0, len(replacementLines) - 1)
].strip()
if replacementColumn > 0:
self.mvhash[replacementFile] = replacement.split(",")
if replacementColumn > len(self.mvhash[replacementFile]):
logger.error(
"Index for column '%s' in replacement file '%s' is out of bounds"
% (replacementColumn, replacementFile)
)
return old
else:
return self.mvhash[replacementFile][replacementColumn - 1]
else:
return replacement
elif self.replacementType == "integerid":
temp = self.replacement
self.replacement = str(int(self.replacement) + 1)
return temp
else:
logger.error(
"Unknown replacementType '%s'; will not replace" % self.replacementType
)
return old
|
splunk/eventgen
|
splunk_eventgen/lib/eventgentoken.py
|
Python
|
apache-2.0
| 23,368 | 0.002225 |
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import socket
import os
import re
import requests
import json
from hashlib import sha256
from urlparse import urljoin
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import electrum
from electrum import bitcoin
from electrum.bitcoin import *
from electrum.mnemonic import Mnemonic
from electrum import version
from electrum.wallet import Wallet_2of3
from electrum.i18n import _
from electrum.plugins import BasePlugin, run_hook, hook
from electrum_gui.qt.util import *
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum_gui.qt.amountedit import AmountEdit
from electrum_gui.qt.main_window import StatusBarButton
from decimal import Decimal
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
signing_xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
billing_xpub = "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/', debug=False):
self.base_url = base_url
self.debug = debug
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print '%s %s %s' % (method, url, data)
response = requests.request(method, url, **kwargs)
if self.debug:
print response.text
print
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
return self.send_request('get', 'cosigner/%s' % quote(id))
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Tranfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Wallet_2of3):
wallet_type = '2fa'
def get_action(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
if xpub2 is None and not self.storage.get('use_trustedcoin'):
return 'show_disclaimer'
if xpub2 is None:
return 'create_extended_seed'
if xpub3 is None:
return 'create_remote_key'
if not self.accounts:
return 'create_accounts'
def make_seed(self):
return Mnemonic('english').make_seed(num_bits=256, prefix=SEED_PREFIX)
def estimated_fee(self, tx):
fee = Wallet_2of3.estimated_fee(self, tx)
x = run_hook('extra_fee', tx)
if x: fee += x
return fee
def get_tx_fee(self, tx):
fee = Wallet_2of3.get_tx_fee(self, tx)
x = run_hook('extra_fee', tx)
if x: fee += x
return fee
class Plugin(BasePlugin):
wallet = None
def __init__(self, x, y):
BasePlugin.__init__(self, x, y)
self.seed_func = lambda x: bitcoin.is_new_seed(x, SEED_PREFIX)
self.billing_info = None
self.is_billing = False
def constructor(self, s):
return Wallet_2fa(s)
def is_available(self):
if not self.wallet:
return False
if self.wallet.storage.get('wallet_type') == '2fa':
return True
return False
def requires_settings(self):
return True
def set_enabled(self, enabled):
self.wallet.storage.put('use_' + self.name, enabled)
def is_enabled(self):
if not self.is_available():
return False
if self.wallet.master_private_keys.get('x2/'):
return False
return True
def make_long_id(self, xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
def get_user_id(self):
xpub_hot = self.wallet.master_public_keys["x1/"]
xpub_cold = self.wallet.master_public_keys["x2/"]
long_id = self.make_long_id(xpub_hot, xpub_cold)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(self, xpub, s):
_, _, _, c, cK = deserialize_xkey(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
xpub2 = ("0488B21E" + "00" + "00000000" + "00000000").decode("hex") + c2 + cK2
return EncodeBase58Check(xpub2)
def make_billing_address(self, num):
long_id, short_id = self.get_user_id()
xpub = self.make_xpub(billing_xpub, long_id)
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
address = public_key_to_bc_address( cK )
return address
def create_extended_seed(self, wallet, window):
seed = wallet.make_seed()
if not window.show_seed(seed, None):
return
if not window.verify_seed(seed, None, self.seed_func):
return
password = window.password_dialog()
wallet.storage.put('seed_version', wallet.seed_version, True)
wallet.storage.put('use_encryption', password is not None, True)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_xpub(' '.join(words[n:]), 'x2/')
msg = [
_('Your wallet file is:') + " %s"%os.path.abspath(wallet.storage.path),
_('You need to be online in order to complete the creation of your wallet.'),
_('If you generated your seed on an offline computer, click on "%s" to close this window, move your wallet file to an online computer and reopen it with Electrum.') % _('Close'),
_('If you are online, click on "%s" to continue.') % _('Next')
]
return window.question('\n\n'.join(msg), no_label=_('Close'), yes_label=_('Next'))
def show_disclaimer(self, wallet, window):
msg = [
_("Two-factor authentication is a service provided by TrustedCoin.") + ' ',
_("It uses a multi-signature wallet, where you own 2 of 3 keys.") + ' ',
_("The third key is stored on a remote server that signs transactions on your behalf.") + ' ',
_("To use this service, you will need a smartphone with Google Authenticator.") + '\n\n',
_("A small fee will be charged on each transaction that uses the remote server.") + ' ',
_("You may check and modify your billing preferences once the installation is complete.") + '\n\n',
_("Note that your coins are not locked in this service.") + ' ',
_("You may withdraw your funds at any time and at no cost, without the remote server, by using the 'restore wallet' option with your wallet seed.") + '\n\n',
_('The next step will generate the seed of your wallet.') + ' ',
_('This seed will NOT be saved in your computer, and it must be stored on paper.') + ' ',
_('To be safe from malware, you may want to do this on an offline computer, and move your wallet later to an online computer.')
]
icon = QPixmap(':icons/trustedcoin.png')
if not window.question(''.join(msg), icon=icon):
return False
self.wallet = wallet
self.set_enabled(True)
return True
def restore_third_key(self, wallet):
long_user_id, short_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
wallet.add_master_public_key('x3/', xpub3)
@hook
def do_clear(self):
self.is_billing = False
@hook
def load_wallet(self, wallet):
self.trustedcoin_button = StatusBarButton( QIcon(":icons/trustedcoin.png"), _("Network"), self.settings_dialog)
self.window.statusBar().addPermanentWidget(self.trustedcoin_button)
self.xpub = self.wallet.master_public_keys.get('x1/')
self.user_id = self.get_user_id()[1]
t = threading.Thread(target=self.request_billing_info)
t.setDaemon(True)
t.start()
@hook
def close_wallet(self):
self.window.statusBar().removeWidget(self.trustedcoin_button)
@hook
def get_wizard_action(self, window, wallet, action):
if hasattr(self, action):
return getattr(self, action)
@hook
def installwizard_restore(self, window, storage):
if storage.get('wallet_type') != '2fa':
return
seed = window.enter_seed_dialog("Enter your seed", None, func=self.seed_func)
if not seed:
return
wallet = Wallet_2fa(storage)
self.wallet = wallet
password = window.password_dialog()
wallet.add_seed(seed, password)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_seed(' '.join(words[n:]), 'x2/', password)
self.restore_third_key(wallet)
wallet.create_main_account(password)
# disable plugin
self.set_enabled(False)
return wallet
def create_remote_key(self, wallet, window):
self.wallet = wallet
self.window = window
if wallet.storage.get('wallet_type') != '2fa':
raise
return
email = self.accept_terms_of_use(window)
if not email:
return
xpub_hot = wallet.master_public_keys["x1/"]
xpub_cold = wallet.master_public_keys["x2/"]
# Generate third key deterministically.
long_user_id, self.user_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub_hot, xpub_cold, email)
except socket.error:
self.window.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
raise e
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
self.window.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == self.user_id, ("user id error", _id, self.user_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
self.window.show_message(str(e))
return
if not self.setup_google_auth(self.window, self.user_id, otp_secret):
return
self.wallet.add_master_public_key('x3/', xpub3)
return True
def need_server(self, tx):
from electrum.account import BIP32_Account
# Detect if the server is needed
long_id, short_id = self.get_user_id()
xpub3 = self.wallet.master_public_keys['x3/']
for x in tx.inputs_to_sign():
if x[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x)
if xpub == xpub3:
return True
return False
@hook
def send_tx(self, tx):
self.print_error("twofactor:send_tx")
if self.wallet.storage.get('wallet_type') != '2fa':
return
if not self.need_server(tx):
self.print_error("twofactor: xpub3 not needed")
self.auth_code = None
return
self.auth_code = self.auth_dialog()
@hook
def before_send(self):
# request billing info before forming the transaction
self.billing_info = None
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info)
self.waiting_dialog.start()
self.waiting_dialog.wait()
if self.billing_info is None:
self.window.show_message('Could not contact server')
return True
return False
@hook
def extra_fee(self, tx):
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
# trustedcoin won't charge if the total inputs is lower than their fee
price = int(self.price_per_tx.get(1))
assert price <= 100000
if tx.input_value() < price:
self.print_error("not charging for this tx")
return 0
return price
@hook
def make_unsigned_transaction(self, tx):
price = self.extra_fee(tx)
if not price:
return
tx.outputs.append(('address', self.billing_info['billing_address'], price))
@hook
def sign_transaction(self, tx, password):
self.print_error("twofactor:sign")
if self.wallet.storage.get('wallet_type') != '2fa':
self.print_error("twofactor: aborting")
return
self.long_user_id, self.user_id = self.get_user_id()
if not self.auth_code:
return
if tx.is_complete():
return
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
try:
r = server.sign(self.user_id, raw_tx, self.auth_code)
except Exception as e:
tx.error = str(e)
return
self.print_error( "received answer", r)
if not r:
return
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
def auth_dialog(self ):
d = QDialog(self.window)
d.setModal(1)
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def settings_dialog(self):
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info, self.show_settings_dialog)
self.waiting_dialog.start()
def show_settings_dialog(self, success):
if not success:
self.window.show_message(_('Server not reachable.'))
return
d = QDialog(self.window)
d.setWindowTitle("TrustedCoin Information")
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a fee per co-signed transaction. You may pay on each transaction (an extra output will be added to your transaction), or you may purchase prepaid transaction using this dialog.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
v = self.price_per_tx.get(1)
grid.addWidget(QLabel(_("Price per transaction (not prepaid):")), 0, 0)
grid.addWidget(QLabel(self.window.format_amount(v) + ' ' + self.window.base_unit()), 0, 1)
i = 1
if 10 not in self.price_per_tx:
self.price_per_tx[10] = 10 * self.price_per_tx.get(1)
for k, v in sorted(self.price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Price for %d prepaid transactions:"%k), i, 0)
grid.addWidget(QLabel("%d x "%k + self.window.format_amount(v/k) + ' ' + self.window.base_unit()), i, 1)
b = QPushButton(_("Buy"))
b.clicked.connect(lambda b, k=k, v=v: self.on_buy(k, v, d))
grid.addWidget(b, i, 2)
i += 1
n = self.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
# tranfer button
#def on_transfer():
# server.transfer_credit(self.user_id, recipient, otp, signature_callback)
# pass
#b = QPushButton(_("Transfer"))
#b.clicked.connect(on_transfer)
#grid.addWidget(b, 1, 2)
#grid.addWidget(QLabel(_("Next Billing Address:")), i, 0)
#grid.addWidget(QLabel(self.billing_info['billing_address']), i, 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, k, v, d):
d.close()
if self.window.pluginsdialog:
self.window.pluginsdialog.close()
uri = "bitcoin:" + self.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
self.is_billing = True
self.window.pay_from_URI(uri)
self.window.payto_e.setFrozen(True)
self.window.message_e.setFrozen(True)
self.window.amount_e.setFrozen(True)
def request_billing_info(self):
billing_info = server.get(self.user_id)
billing_address = self.make_billing_address(billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
self.billing_info = billing_info
self.price_per_tx = dict(self.billing_info['price_per_tx'])
return True
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
window.set_layout(vbox)
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
vbox.addStretch()
accept_button = OkButton(window, _('Accept'))
accept_button.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(window), accept_button))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(lambda: accept_button.setEnabled(re.match(regexp,email_e.text()) is not None))
email_e.setFocus(True)
if not window.exec_():
return
email = str(email_e.text())
return email
def setup_google_auth(self, window, _id, otp_secret):
vbox = QVBoxLayout()
window.set_layout(vbox)
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
vbox.addWidget(QLabel("Please scan this QR code in Google Authenticator."))
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel("This wallet is already registered, but it was never authenticated. To finalize your registration, please enter your Google Authenticator Code. If you do not have this code, delete the wallet file and start a new registration")
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(QLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
hbox.addWidget(pw)
hbox.addStretch(1)
vbox.addLayout(hbox)
b = OkButton(window, _('Next'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(window), b))
pw.textChanged.connect(lambda: b.setEnabled(len(pw.text())==6))
while True:
if not window.exec_():
return False
otp = pw.get_amount()
try:
server.auth(_id, otp)
return True
except:
QMessageBox.information(self.window, _('Message'), _('Incorrect password'), _('OK'))
pw.setText('')
|
edb1rd/BTC
|
plugins/trustedcoin.py
|
Python
|
gpl-3.0
| 25,459 | 0.003928 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-02 18:22
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('description', models.TextField(blank=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
Nikola-K/django-template
|
users/migrations/0001_initial.py
|
Python
|
mit
| 3,082 | 0.004218 |
from lightbulb.api.api_native import LightBulb
import base64
lightbulbapp = LightBulb()
path = "/test/env/bin/lightbulb" #Path to binary
configuration_A = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'}
configuration_B = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'}
handlerconfig_A = {'WSPORT': '5000','WBPORT': '5080', 'BROWSERPARSE': 'True', 'DELAY': '50', 'HOST': 'localhost'}
handlerconfig_B = {'URL': 'http://127.0.0.1/~fishingspot/securitycheck/index.php', 'BLOCK':'Impact', 'REQUEST_TYPE':'GET','PARAM':'input','BYPASS':'None', 'PROXY_SCHEME': 'None', 'PROXY_HOST': 'None', 'PROXY_PORT': 'None', 'PROXY_USERNAME': 'None', 'PROXY_PASSWORD': 'None','USER_AGENT': "Mozilla/5.0", 'REFERER': "http://google.com"}
stats = lightbulbapp.start_sfadiff_algorithm(
path,
configuration_A,
configuration_B,
handlerconfig_A,
handlerconfig_B,
"BrowserHandler",
"HTTPHandler")
print stats
|
lightbulb-framework/lightbulb-framework
|
examples/test_custom_api_native_example_1.py
|
Python
|
mit
| 1,286 | 0.012442 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from PySide import QtGui, QtCore
from traits.trait_types import Event
from traitsui.api import View, UItem
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.editors.api import TableEditor
from traitsui.handler import Controller
from traitsui.qt4.editor import Editor
from traitsui.qt4.key_event_to_name import key_event_to_name
from traitsui.table_column import ObjectColumn
# ============= standard library imports ========================
# ============= local library imports ==========================
# from traitsui.basic_editor_factory import BasicEditorFactory
from pychron.envisage.key_bindings import keybinding_exists
class KeyBindingsEditor(Controller):
def traits_view(self):
cols = [
ObjectColumn(name="binding", editor=KeyBindingEditor()),
ObjectColumn(name="description", editable=False, width=400),
]
v = View(
UItem("bindings", editor=TableEditor(columns=cols)),
width=500,
height=600,
title="Edit Key Bindings",
kind="livemodal",
buttons=["OK", "Cancel"],
resizable=True,
)
return v
class KeyBindingControl(QtGui.QLabel):
def keyPressEvent(self, event):
"""Handle keyboard keys being pressed."""
# Ignore presses of the control and shift keys.
if event.key() not in (QtCore.Qt.Key_Control, QtCore.Qt.Key_Shift):
self.editor.key = event
class _KeyBindingEditor(Editor):
key = Event
# clear = Event
# refresh_needed = Event
# dump_needed = Event
def dispose(self):
# override Editor.dispose. don't break reference to control
if self.ui is None:
return
name = self.extended_name
if name != "None":
self.context_object.on_trait_change(self._update_editor, name, remove=True)
if self._user_from is not None:
for name, handler in self._user_from:
self.on_trait_change(handler, name, remove=True)
if self._user_to is not None:
for object, name, handler in self._user_to:
object.on_trait_change(handler, name, remove=True)
# self.object = self.ui = self.item = self.factory = self.control = \
# self.label_control = self.old_value = self._context_object = None
def init(self, parent):
self.control = self._create_control()
# self.sync_value(self.factory.refresh_needed, 'refresh_needed', mode='to')
# self.sync_value(self.factory.refresh_needed, 'dump_needed', mode='to')
def _create_control(self):
ctrl = KeyBindingControl()
ctrl.editor = self
return ctrl
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
if self.control:
self.control.setText(self.value)
def _key_changed(self, event):
key_name = key_event_to_name(event)
key_name = key_name.replace("-", "+")
desc = keybinding_exists(key_name)
if desc:
if (
QtGui.QMessageBox.question(
self.control,
"Duplicate Key Definition",
"'%s' has already been assigned to '%s'.\n"
"Do you wish to continue?" % (key_name, desc),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No,
)
!= QtGui.QMessageBox.Yes
):
return
# else:
# clear_keybinding(desc)
# self.refresh_needed = True
self.value = key_name
self.control.setText(key_name)
class KeyBindingEditor(BasicEditorFactory):
klass = _KeyBindingEditor
# refresh_needed = Str
# ============= EOF =============================================
|
USGSDenverPychron/pychron
|
pychron/core/ui/qt/keybinding_editor.py
|
Python
|
apache-2.0
| 4,794 | 0.001043 |
#!/usr/bin/python2.7
import sys
import csv
import yaml
import codecs
TO_BE_TRANSLATED_MARK = "***TO BE TRANSLATED***"
def collect(result, node, prefix=None):
for key,value in node.items():
new_prefix = (key if prefix == None else prefix + "." + key)
if isinstance(value, dict):
collect(result, value, new_prefix)
else:
result[new_prefix] = value
def collect_old_csv(filename):
result = {}
reader = csv.reader(open(filename))
for row in reader:
if TO_BE_TRANSLATED_MARK not in row[1]:
result[row[0]] = row[1].decode("utf-8")
return result
def flatten(namespace=None,old_csv=None):
namespace = "" if namespace == None else namespace + "."
en_src = yaml.load(open("%sen.yml" % namespace))
ja_src = yaml.load(open("%sja.yml" % namespace))
en = {}
collect(en, en_src["en"])
ja = {}
collect(ja, ja_src["ja"])
ja_old = collect_old_csv(old_csv) if old_csv else {}
writer = csv.writer(sys.stdout)
for key,value in sorted(en.items()):
val = TO_BE_TRANSLATED_MARK + value
if key in ja: val = ja[key]
elif key in ja_old: val = ja_old[key]
writer.writerow([key, val.encode("UTF-8")])
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: yaml2csv.py namespace('server'|'client') [old-translated-csv-file]"
sys.exit(1)
flatten(sys.argv[1], None if len(sys.argv) < 3 else sys.argv[2])
|
shimarin/discourse-ja-translation
|
yaml2csv.py
|
Python
|
gpl-2.0
| 1,474 | 0.00882 |
import functools
import logging
import random
from hazelcast.future import Future
from hazelcast.proxy.base import Proxy
from hazelcast.cluster import VectorClock
from hazelcast.protocol.codec import (
pn_counter_add_codec,
pn_counter_get_codec,
pn_counter_get_configured_replica_count_codec,
)
from hazelcast.errors import NoDataMemberInClusterError
_logger = logging.getLogger(__name__)
class PNCounter(Proxy):
"""PN (Positive-Negative) CRDT counter.
The counter supports adding and subtracting values as well as
retrieving the current counter value.
Each replica of this counter can perform operations locally without
coordination with the other replicas, thus increasing availability.
The counter guarantees that whenever two nodes have received the
same set of updates, possibly in a different order, their state is
identical, and any conflicting updates are merged automatically.
If no new updates are made to the shared state, all nodes that can
communicate will eventually have the same data.
When invoking updates from the client, the invocation is remote.
This may lead to indeterminate state - the update may be applied but the
response has not been received. In this case, the caller will be notified
with a TargetDisconnectedError.
The read and write methods provide monotonic read and RYW (read-your-write)
guarantees. These guarantees are session guarantees which means that if
no replica with the previously observed state is reachable, the session
guarantees are lost and the method invocation will throw a
ConsistencyLostError. This does not mean
that an update is lost. All of the updates are part of some replica and
will be eventually reflected in the state of all other replicas. This
exception just means that you cannot observe your own writes because
all replicas that contain your updates are currently unreachable.
After you have received a ConsistencyLostError, you can either
wait for a sufficiently up-to-date replica to become reachable in which
case the session can be continued or you can reset the session by calling
the reset() method. If you have called the reset() method,
a new session is started with the next invocation to a CRDT replica.
Notes:
The CRDT state is kept entirely on non-lite (data) members. If there
aren't any and the methods here are invoked on a lite member, they will
fail with an NoDataMemberInClusterError.
"""
_EMPTY_ADDRESS_LIST = []
def __init__(self, service_name, name, context):
super(PNCounter, self).__init__(service_name, name, context)
self._observed_clock = VectorClock()
self._max_replica_count = 0
self._current_target_replica_address = None
def get(self):
"""Returns the current value of the counter.
Returns:
hazelcast.future.Future[int]: The current value of the counter.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_get_codec)
def get_and_add(self, delta):
"""Adds the given value to the current value and returns the previous value.
Args:
delta (int): The value to add.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=True)
def add_and_get(self, delta):
"""Adds the given value to the current value and returns the updated value.
Args:
delta (int): The value to add.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False)
def get_and_subtract(self, delta):
"""Subtracts the given value from the current value and returns the previous value.
Args:
delta (int): The value to subtract.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=-1 * delta, get_before_update=True)
def subtract_and_get(self, delta):
"""Subtracts the given value from the current value and returns the updated value.
Args:
delta (int): The value to subtract.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(
pn_counter_add_codec, delta=-1 * delta, get_before_update=False
)
def get_and_decrement(self):
"""Decrements the counter value by one and returns the previous value.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=-1, get_before_update=True)
def decrement_and_get(self):
"""Decrements the counter value by one and returns the updated value.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=-1, get_before_update=False)
def get_and_increment(self):
"""Increments the counter value by one and returns the previous value.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=1, get_before_update=True)
def increment_and_get(self):
"""Increments the counter value by one and returns the updated value.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
UnsupportedOperationError: if the cluster version is less than 3.10.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=1, get_before_update=False)
def reset(self):
"""Resets the observed state by this PN counter.
This method may be used after a method invocation has thrown a ``ConsistencyLostError``
to reset the proxy and to be able to start a new session.
"""
self._observed_clock = VectorClock()
def _invoke_internal(self, codec, **kwargs):
delegated_future = Future()
self._set_result_or_error(
delegated_future, PNCounter._EMPTY_ADDRESS_LIST, None, codec, **kwargs
)
return delegated_future
def _set_result_or_error(
self, delegated_future, excluded_addresses, last_error, codec, **kwargs
):
target = self._get_crdt_operation_target(excluded_addresses)
if not target:
if last_error:
delegated_future.set_exception(last_error)
return
delegated_future.set_exception(
NoDataMemberInClusterError(
"Cannot invoke operations on a CRDT because "
"the cluster does not contain any data members"
)
)
return
request = codec.encode_request(
name=self.name,
replica_timestamps=self._observed_clock.entry_set(),
target_replica_uuid=target.uuid,
**kwargs
)
future = self._invoke_on_target(request, target.uuid, codec.decode_response)
checker_func = functools.partial(
self._check_invocation_result,
delegated_future=delegated_future,
excluded_addresses=excluded_addresses,
target=target,
codec=codec,
**kwargs
)
future.add_done_callback(checker_func)
def _check_invocation_result(
self, future, delegated_future, excluded_addresses, target, codec, **kwargs
):
try:
result = future.result()
self._update_observed_replica_timestamp(result["replica_timestamps"])
delegated_future.set_result(result["value"])
except Exception as ex:
_logger.exception(
"Exception occurred while invoking operation on target %s, "
"choosing different target",
target,
)
if excluded_addresses == PNCounter._EMPTY_ADDRESS_LIST:
excluded_addresses = []
excluded_addresses.append(target)
self._set_result_or_error(delegated_future, excluded_addresses, ex, codec, **kwargs)
def _get_crdt_operation_target(self, excluded_addresses):
if (
self._current_target_replica_address
and self._current_target_replica_address not in excluded_addresses
):
return self._current_target_replica_address
self._current_target_replica_address = self._choose_target_replica(excluded_addresses)
return self._current_target_replica_address
def _choose_target_replica(self, excluded_addresses):
replica_addresses = self._get_replica_addresses(excluded_addresses)
if len(replica_addresses) == 0:
return None
random_replica_index = random.randrange(0, len(replica_addresses))
return replica_addresses[random_replica_index]
def _get_replica_addresses(self, excluded_addresses):
data_members = self._context.cluster_service.get_members(
lambda member: not member.lite_member
)
replica_count = self._get_max_configured_replica_count()
current_count = min(replica_count, len(data_members))
replica_addresses = []
for i in range(current_count):
member_address = data_members[i]
if member_address not in excluded_addresses:
replica_addresses.append(member_address)
return replica_addresses
def _get_max_configured_replica_count(self):
if self._max_replica_count > 0:
return self._max_replica_count
request = pn_counter_get_configured_replica_count_codec.encode_request(self.name)
count = self._invoke(
request, pn_counter_get_configured_replica_count_codec.decode_response
).result()
self._max_replica_count = count
return self._max_replica_count
def _update_observed_replica_timestamp(self, observed_timestamps):
observed_clock = self._to_vector_clock(observed_timestamps)
if observed_clock.is_after(self._observed_clock):
self._observed_clock = observed_clock
def _to_vector_clock(self, timestamps):
vector_clock = VectorClock()
for replica_id, timestamp in timestamps:
vector_clock.set_replica_timestamp(replica_id, timestamp)
return vector_clock
|
hazelcast/hazelcast-python-client
|
hazelcast/proxy/pn_counter.py
|
Python
|
apache-2.0
| 12,287 | 0.002442 |
import os
import unittest
import moderngl
import pycodestyle
class TestCase(unittest.TestCase):
def test_style(self):
config_file = os.path.join(os.path.dirname(__file__), '..', 'tox.ini')
style = pycodestyle.StyleGuide(config_file=config_file, ignore='E402')
check = style.check_files([
os.path.join(os.path.dirname(__file__), '../moderngl/__init__.py'),
os.path.join(os.path.dirname(__file__), '../moderngl/__main__.py'),
])
self.assertEqual(check.total_errors, 0)
if __name__ == '__main__':
unittest.main()
|
cprogrammer1994/ModernGL
|
tests/test_code_style.py
|
Python
|
mit
| 587 | 0 |
import time
from menu.ncolor import *
from menu.showMainMenu import *
from command.shell import *
from write.dnsmasq_write import *
class Sub_Menu():
dns_message = """ you can add a redirect entry in this menu or edit the dnsmasq configuration
file located in""" + color.BLEU + """ '/etc/redirect/dnsmasq.host'\n """ + color.ENDC
#the user choose a new name. the input of the user will be put in the user
#object
def nameMenu(ssid):
while True:
print ("\nthe current name of the access point is " + color.VERT + "'" + ssid + "'" + color.ENDC)
print("")
print("%49s" % ("current options" + color.ENDC))
print("%58s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%48s" % ("(1) choose a new name."))
print("%41s" % ("(5) main menu.\n"))
while True:
NameChoice = input(color.BLEU + "name > " + color.ENDC)
if NameChoice == "1":
print(color.DARKYELLOW + "enter the new name of the ap..." + color.ENDC)
ssid = input(color.BLEU + "name > " + color.DARKROUGE + "new name > " + color.ENDC)
print (color.VERT + "[+]" + color.ENDC + " changing the name for " + color.VERT + "'" + ssid + "'" + color.ENDC)
time.sleep(1)
return ssid
elif NameChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return ssid
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
#taking the crypt variable object to check if an encryption have been chosen. If not
#the user is ask to choose an encryption type. The PassHandle function will be called
#to verify if the password respect the security exigence
def PassordMenu(crypt, password):
while True:
if crypt != "N/A":
print("")
print("%48s" % ("current options" + color.ENDC))
print("%56s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%48s" % ("(1) choose new password."))
print("%39s" % ("(5) main menu.\n"))
while True:
PasswordChoice = input(color.BLEU + "password > " + color.ENDC)
if PasswordChoice == "1":
print(color.DARKYELLOW + "enter the new password for the ap..." + color.ENDC)
error = False
while error == False:
password = input(color.BLEU + "password > " + color.DARKROUGE + "new password > " + color.ENDC)
error = Sub_Menu.PassHandle(crypt, password)
print (color.VERT + "[+]" + color.ENDC + " changing the password to " + color.VERT + "'" + password + "'" + color.ENDC)
time.sleep(1)
return password
elif PasswordChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return password
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
else:
print(color.ROUGE + "[*]" + color.ENDC + " please select a security type if you want to choose a password.")
time.sleep(1.5)
return password
#take the security type and password in parameter. If a new password is chosen the old
#password gonna be reset to zero.
def securityMenu(crypt, password):
while True:
security_text = color.BLEU + color.BOLD + """
-WPA2 """ + color.ENDC + """is the most advanced wifi security protocol curently used by most
router by default. The passphrase must have a minimum of 8 character.""" + color.BLEU + color.BOLD + """\n
-WPA""" + color.ENDC + """ wpa is older and less secure than wpa2. it is using an older
encryption (TKIP). Like wpa2 you need to put at least 8 charactere. """ + color.BLEU + color.BOLD + """\n
-WEP""" + color.ENDC + """ wep is deprecated and can be very easely cracked. your wep key must
be at least 10 charactere and only contain hexadecimal character."""
print(security_text)
print ("\n - the current security of the access point is " + color.VERT + "'" + crypt + "'" + color.ENDC)
print("")
print("%53s" % ("current options" + color.ENDC))
print("%61s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%38s" % ("(1) WPA2."))
print("%44s" % ("(2) WPA (TKIP)."))
print("%47s" % ("(3) WEP (64 bits)."))
print("%45s" % ("(4) no security."))
print("%44s" % ("(5) main menu.\n"))
while True:
NameChoice = input(color.BLEU + "security > " + color.ENDC)
pwd = ""
if NameChoice == "1":
Sec = "WPA2"
crypt, password = Sub_Menu.AskPassword(Sec, pwd)
return crypt, password
elif NameChoice == "2":
Sec = "WPA"
crypt, password = Sub_Menu.AskPassword(Sec, pwd)
return crypt, password
elif NameChoice == "3":
Sec = "WEP"
crypt, password = Sub_Menu.AskPassword(Sec, pwd)
return crypt, password
elif NameChoice == "4":
print (color.VERT + "[+]" + color.ENDC + " deleting the " + color.VERT + crypt + color.ENDC + " security.")
time.sleep(1)
crypt = "N/A"
password = "N/A"
return crypt, password
elif NameChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return crypt, password
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
#giving the option to decide if the dhcp server will be on or off. It will also
#give the option to change the dhcp pool adresse.
def dhcpMenu(dhcp):
while True:
#putting some information for the dhcp in variable
couleur = color.Color_check(dhcp)
dhcpPool = "10.0.0.10-250"
dhcpLease = "12h"
# show the appropriate option in the menu
if dhcp == "N/A":
dhcpOPTION = "(1) set dhcp server to" + color.VERT + " 'on'" + color.ENDC
else:
dhcpOPTION = "%47s" % " (1) set dhcp server to" + color.ROUGE + " 'off'" + color.ENDC
print ("""\n the dhcp server should always be on. If the dhcp is set to 'N/A' the client
will need to have is adresse, gateway and dns set manualy.\n""")
print (color.BOLD + " dhcp status: " + color.ENDC + couleur + "'" + dhcp + "'" + color.ENDC)
print (color.BOLD + " dhcp pool: " + color.ENDC + color.BLEU + dhcpPool + color.ENDC)
print (color.BOLD + " dhcp lease: " + color.ENDC + color.BLEU + dhcpLease + color.ENDC)
print("")
print("%49s" % ("current options" + color.ENDC))
print("%57s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%61s" % ( dhcpOPTION))
print("%40s" % ("(5) main menu.\n"))
while True:
DhcpChoice = input(color.BLEU + "dhcp > " + color.ENDC)
#check the last dhcp value and take the decision to put it to on or off
if DhcpChoice == "1":
if dhcp == "N/A":
dhcp = "ON"
else:
dhcp = "N/A"
print (color.VERT + "[+]" + color.ENDC + " changing dhcp status to " + color.VERT + "'" + dhcp + "'" + color.ENDC)
time.sleep(1)
return dhcp
#if this option is chosen to go back to main menu
elif DhcpChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return dhcp
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
#show the menu for chosing dns option. The dns object can be change to on or N/A.
# I am planing to give the user the choice to put their dns redirect entry directly
# in the program and in the config file.
def dnsMenu(dns):
while True:
couleur = color.Color_check(dns)
# show the appropriate option in the menu
if dns == "N/A":
dnsOPTION = "(1) set dns server to" + color.VERT + " 'on' " + color.ENDC
else:
dnsOPTION = "(1) set dns server to" + color.ROUGE + " 'off'" + color.ENDC
print ("""\n if dns fowarding is set to 'on' dnsmasq will start the dns server and
start fowarding all the request to the google dns server. When the dns
server is active its possible to redirect the client to the ip adresse
of your choice """)
print (color.BOLD + "\n dns status:" + color.ENDC + couleur + " '" + dns + "'" + color.ENDC)
print("%51s" % ("current options" + color.ENDC))
print("%59s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%63s" % (dnsOPTION))
print("%47s" % ("(2) redirect client."))
print("%46s" % ("(3) cleaning entry."))
print("%42s" % ("(5) main menu.\n"))
while True:
DnsChoice = input(color.BLEU + "dns > " + color.ENDC)
if DnsChoice == "1":
if dns == "N/A":
dns = "ON"
else:
dns = "N/A"
print (color.VERT + "[+]" + color.ENDC + " changing dns status to " + color.VERT + "'" + dns + "'" + color.ENDC)
time.sleep(1)
return dns
if DnsChoice == "2":
while True:
# read the dnsmasq.host file and print the message.
print(Sub_Menu.dns_message)
entry_number = read_dnsmasq_host()
# give the user de choice to do a new entry.
print(color.DARKYELLOW + "\ndo you want to write an entry in the file? (y/n)" + color.ENDC)
choice = input(color.BLEU + "dns > " + color.ENDC)
# if choice is yes, we ask the user to enter the entry withthe spicified format.
if choice == "y":
error = False
print (color.DARKCYAN + "enter the new entry with the adresse and the domain separated only by a single")
print("space. Example: (192.168.1.60 www.google.com)")
# if an error is detected in the checkup of the pattern, we stay in the loop.
while not error:
entry = input(color.BLEU + "dns > " + color.DARKROUGE + "entry > " + color.ENDC)
error = Entry_handeling(entry)
else:
break
break
if DnsChoice == "3":
# handle the delete of the entry.
delete_handeling()
break
if DnsChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return dns
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
#this function is allowing the user to chose the in and out interface. When the
#interface will be chosen it gonna allow the user to see the status. A refresh
#option will be included
def interfaceMenu(inside, outside):
print("""\n Quick ap will use the interface that you have selected to apply the ip tables
rules on them and make the hotspot working. The inside interface is the wifi
card that will be use whith hostapd for creating the hotspot. The outside
interface will be use to share the connection with the victims. You need to
make sure that the outside interface have an addresse if you want to share
the Internet. \n""")
while True:
#put genral status of the interface in the variables and return false if interface is down.
addresse_in, addresse_out, check_in, check_out = command.nic_selectedStatus(inside, outside)
#color status of the interface are put into varirables.
color_in = color.color_checkINT(inside, check_in)
color_out = color.color_checkINT(outside, check_out)
#show the status of the selected interface with the help of the method nic_selected
print("%50s" % (" interface status" + color.ENDC))
print("%59s" % (color.DARKCYAN + "=======================" + color.ENDC))
print("\t\t\t [" + color_in + inside + color.ENDC + "]" + " <-> " + addresse_in)
print("\t\t\t [" + color_out + outside + color.ENDC + "]" + " <-> " + addresse_out + "\n")
print("%50s" % ("current options" + color.ENDC))
print("%59s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%48s" % ("(1) choose interface."))
print("%39s" % ("(2) refresh."))
print("%41s" % ("(5) main menu."))
#first menu choice.
interfaceChoiceFirst = input(color.BLEU + "\nnetwork > " + color.ENDC)
if interfaceChoiceFirst == "1":
Menu = True
check_choice = False
while Menu == True:
print("%52s" % (" available interface" + color.ENDC))
print("%59s" % (color.VERT + "=======================" + color.ENDC))
#looping to all interface disponible and show their status and if they are selected
interface = command.nic_status(inside, outside)
print("")
print("%51s" % ("current options" + color.ENDC))
print("%59s" % (color.VERT + "-----------------------" + color.ENDC))
print("%49s" % ("(1) choose inside nic."))
print("%50s" % ("(2) choose outside nic."))
print("%48s" % ("(3) deselect all nic."))
print("%39s" % ("(4) refresh."))
print("%41s" % ("(5) main menu."))
interfaceChoice = input(color.BLEU + "\nnetwork > " + color.ENDC + color.DARKROUGE + "nic > " + color.ENDC)
if interfaceChoice == "1":
print(color.DARKYELLOW + "enter the name of the inside interface that you want to select..." + color.ENDC)
insideChoice = input(color.BLEU + "network > " + color.ENDC + color.DARKROUGE + "inside > " + color.ENDC)
#checking in the list of interface to see if the interface is in the choice
interface_check = command.choice_check(insideChoice, interface)
#make sure that the interface selected is not the same has the outside interface.
duplicate = command.nic_duplicate("inside", insideChoice, "", inside, outside)
# if the duplicate is detected the statement continu make the program skip the conditions
# and go back to the start of the loop
if duplicate == True:
continue
#if interface_check return false, return the user to main menu.
elif interface_check == False:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid interface. Press 'refresh' to scan interface again.")
time.sleep(1.5)
print("\n")
#run sevral check to see if the choice is wireless compactible etc... If the choice is
#not accepted the last_choice is returned by wifi check
else:
last_choice_in = inside
inside = command.wifi_check(insideChoice, last_choice_in)
elif interfaceChoice == "2":
print(color.DARKYELLOW + "enter the name of the outside interface that you want to select..." + color.ENDC)
outsideChoice = input(color.BLEU + "network > " + color.ENDC + color.DARKROUGE + "outside > " + color.ENDC)
interface_check = command.choice_check(outsideChoice, interface)
duplicate = command.nic_duplicate("outside", "", outsideChoice, inside, outside)
if duplicate == True:
continue
elif interface_check == False:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid interface. Press 'refresh' to scan interface again.")
time.sleep(1.5)
print("\n")
else:
outside = command.out_check(outsideChoice)
elif interfaceChoice == "3":
inside = "N/A"
outside = "N/A"
print(color.VERT + "[+]" + color.ENDC + " unselecting all network interface!")
time.sleep(1)
print("\n")
elif interfaceChoice == "4":
print (color.VERT + "[+] " + color.ENDC + "refreshing!")
time.sleep(0.3)
elif interfaceChoice == "5":
print (color.VERT + "[+] " + color.ENDC + "main menu.")
time.sleep(0.3)
return inside, outside
else:
print (color.ROUGE + "[-] " + color.ENDC + "please enter a valid option!\n")
time.sleep(0.3)
elif interfaceChoiceFirst == "2":
print (color.VERT + "[+] " + color.ENDC + "refreshing!")
time.sleep(0.3)
elif interfaceChoiceFirst == "5":
print (color.VERT + "[+] " + color.ENDC + "main menu")
time.sleep(0.3)
return inside, outside
else:
print(color.ROUGE + "[-] " + color.ENDC + "please enter a valid choice!\n")
time.sleep(0.3)
return inside, outside
#this function take in parameter the security type and the actual password. If the
#wpa key or the wep key is incorrect it gonna show an error message and send true or
#false depending on the situation
def PassHandle(handleSEC, handlePASS):
passLenght = len(handlePASS)
allowed = set("123456789" + "abcdef")
if handleSEC == "WPA2" or handleSEC == "WPA":
if passLenght < 8:
print (color.ROUGE + "[*]" + color.ENDC + " the wpa password must be at least 8 charactere!")
return False
else:
return True
elif handleSEC == "WEP":
if set(handlePASS) <= allowed and passLenght == 10:
return True
else:
print (color.ROUGE + "[*]" + color.ENDC + " the wep password must have 10 charactere and use HEX only")
return False
#this function take the secutiry type and password in parameter and it check with a loop
#if the password is following the rule.
def AskPassword(Sec, pwd):
error = False
print(color.DARKYELLOW + "enter the new " + Sec + " password for the ap..." + color.ENDC)
while error == False:
pwd = input(color.BLEU + "security > " + color.DARKROUGE + Sec + " > " + color.ENDC)
error = Sub_Menu.PassHandle(Sec, pwd)
print (color.VERT + "[+]" + color.ENDC + " changing the security to " + color.VERT + "'" + Sec + "'" + color.ENDC)
print (color.VERT + "[+]" + color.ENDC + " changing the password to " + color.VERT + "'" + pwd + "'" + color.ENDC)
time.sleep(1)
return Sec, pwd
|
blackice5514/QuickAp
|
menu/showSubMenu.py
|
Python
|
gpl-3.0
| 20,613 | 0.027119 |
"""
====================================================
Shuffle channels' data in the time domain and plot.
====================================================
"""
# Author: Eberhard Eich
# Praveen Sripad
#
# License: BSD (3-clause)
import numpy as np
import os.path as op
import mne
from jumeg.jumeg_utils import (get_files_from_list, time_shuffle_slices,
channel_indices_from_list)
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = str(data_path + '/MEG/sample/sample_audvis_raw.fif')
# shuffle all MEG channels that begin with number 11
shflchanlist = ['MEG 11..']
# shuffle the whole length of the data
tmin, tmax = 0., None
# apply the shuffling
# time_shuffle_slices(raw_fname, shufflechans=shflchanlist, tmin=tmin, tmax=tmax)
plot_things = True
if plot_things:
permname = op.join(op.dirname(raw_fname),
op.basename(raw_fname).split('-')[0]) + ',tperm-raw.fif'
rawraw = mne.io.Raw(raw_fname,preload=True)
shflpick = channel_indices_from_list(rawraw.info['ch_names'][:],
shflchanlist)
procdperm = mne.io.Raw(permname, preload=True)
figraw = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(1,0,0), picks=shflpick)
axisraw = figraw.gca()
axisraw.set_ylim([-300., -250.])
# procdnr.plot_psd(fmin=0.,fmax=300., color=(0,0,1), picks=shflpick)
figshfl = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(1,0,0), picks=shflpick)
axisshfl = figshfl.gca()
axisshfl.set_ylim([-300., -250.])
megpick = mne.pick_types(rawraw.info, meg=True, ref_meg=False, eeg=False, eog=False, stim=False)
figraw1 = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,0,1), picks=megpick)
axisraw1 = figraw1.gca()
axisraw1.set_ylim([-300., -250.])
figshfl1 = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,0,1), picks=megpick)
axisshfl1 = figshfl1.gca()
axisshfl1.set_ylim([-300., -250.])
megnochgpick = np.setdiff1d(megpick, shflpick)
figraw2 = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,1,0), picks=megnochgpick)
axisraw2 = figraw2.gca()
axisraw2.set_ylim([-300., -250.])
figshfl2 = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,1,0), picks=megnochgpick)
axisshfl2 = figshfl2.gca()
axisshfl2.set_ylim([-300., -250.])
|
fboers/jumeg
|
examples/connectivity/plot_shuffle_time_slices.py
|
Python
|
bsd-3-clause
| 2,449 | 0.008575 |
# -*- coding: utf-8 -*-
# entry.py, part for evparse : EisF Video Parse, evdh Video Parse.
# entry: evparse/lib/hunantv
# version 0.1.0.0 test201505151816
# author sceext <sceext@foxmail.com> 2009EisF2015, 2015.05.
# copyright 2015 sceext
#
# This is FREE SOFTWARE, released under GNU GPLv3+
# please see README.md and LICENSE for more information.
#
# evparse : EisF Video Parse, evdh Video Parse.
# Copyright (C) 2015 sceext <sceext@foxmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# import
import re
from .. import error
from . import get_base_info
from . import get_video_info
# global vars
# version of this extractor
THIS_EXTRACTOR_VERSION = 'evparse lib/hunantv version 0.1.0.0 test201505151816'
# http://www.hunantv.com/v/2/150668/f/1518250.html#
# http://www.hunantv.com/v/2/51717/f/692063.html#
# http://www.hunantv.com/v/2/107768/f/1517224.html#
RE_SUPPORT_URL = '^http://www\.hunantv\.com/v/2/[0-9]+/f/[0-9]+\.html'
RE_VID = 'http://www\.hunantv\.com/v/2/[0-9]+/f/([0-9]+)\.html'
# global config obj
etc = {} # NOTE should be set
etc['flag_debug'] = False
etc['hd_min'] = 0
etc['hd_max'] = 0
# functions
def set_config(config):
# just copy it
etc['flag_debug'] = config['flag_debug']
etc['hd_min'] = config['hd_min']
etc['hd_max'] = config['hd_max']
# get vid
def get_vid(url_to):
vid_info = {}
vid_info['url'] = url_to
# get vid
vids = re.findall(RE_VID, url_to)
vid_info['vid'] = vids[0]
# done
return vid_info
def parse(url_to): # this site entry main entry function
# frist re-check url, if supported by this
if not re.match(RE_SUPPORT_URL, url_to):
raise error.NotSupportURLError('not support this url', url_to)
# create evinfo
evinfo = {}
evinfo['info'] = {}
evinfo['video'] = []
# add some base info
evinfo['info']['url'] = url_to
evinfo['info']['site'] = 'hunantv'
# get vid
vid_info = get_vid(url_to)
# DEBUG info
if etc['flag_debug']:
print('lib.hunantv: DEBUG: got vid \"' + vid_info['vid'] + '\" ')
# get base, more info
info, more = get_base_info.get_info(vid_info, flag_debug=etc['flag_debug'])
# add more info
evinfo['info']['title'] = more['title']
evinfo['info']['title_sub'] = more['sub_title']
evinfo['info']['title_short'] = more['short_title']
evinfo['info']['title_no'] = more['no']
# get video info
evinfo['video'] = get_video_info.get_info(info, hd_min=etc['hd_min'], hd_max=etc['hd_max'], flag_debug=etc['flag_debug'])
# done
return evinfo
# end entry.py
|
liuguanyu/evparse
|
lib/hunantv/entry.py
|
Python
|
gpl-3.0
| 3,213 | 0.006225 |
from b_hash import b_hash
from b_hash import NoData
from jenkins import jenkins
from h3_hash import h3_hash
from jenkins import jenkins_fast, jenkins_wrapper
from graph import *
from collections import deque
from bitstring import BitArray
import math
class bdz(b_hash):
"""Class for perfect hash function generated by the BDZ algorithm. This algorithms uses uniform random hypergraph."""
def __init__(self):
b_hash.__init__(self)
self.known_keys = False #Keyset is not set
self.function_number = 3 #random 3-graph
self.iteration_limit = 5
self.ratio = 1.24 #ratio between keyset size and theconsumed memory
self.limit = -1
self.m = -1
self.g = None;
def get_g(self):
"""This function return values of the g array. It can not be called before the generate_seed, since it is part of the seed"""
return self.g
def get_range(self):
"""This function returns the size of the biggest possible hash value. If the range is not known yet, the -1 is returned"""
return self.m
def get_ratio(self):
"""Return ratio c between keyset and the size of the memory"""
return self.ratio
def set_ratio(self,ratio):
"""sets the ration and therefore size of the data structure of the PHF"""
self.ratio = ratio
def set_limit(self, limit):
"""Sets the size of the memory bank for one hash function. This function can be used instead of the set ratio. BDZ computes three hash functions with nonoverlapping outputs. Outputs of these hash functions are used as a pointers to the memory. If user know amount of the memory, he may set the limit as 1/3 of the available memory. The ration and other parameters are computed when the key set is given. The limit value always take precedents before the ratio. To stop using limit value, limit should be set to the negative value."""
self.limit = limit;
def get_iteration_limit(self):
"""The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation"""
return self.iteration_limit
def set_iteration_limit(self,iteration_limit):
"""The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation"""
self.iteration_limit = iteration_limit
def get_order(self):
"""This function return the number of uniform hash function used to create hypergraph"""
return self.function_number
def set_order(self,number):
"""This function sets the number of hash function used for the creation of the hypergraph. It can not be changed after generation of the PHF"""
self.function_number = number
def set_keys(self, key_set):
"""This is a perfect hash function. For the construction of the PHF, the set of keys has to be known. This function gives set of keys to the function, so generate_seed can build correct function"""
self.key_set = key_set
self.known_keys = True
if self.limit > 0 :
#The limit is set, recompute ratio for the given limit
self.ratio = (3.0*self.limit)/len(key_set)
def is_key_set(self):
"""This function return information, if the set of keys is prepared for the generation of the PHF"""
return self.known_keys
def _found_graph(self):
"""This is internal function. It generate random hypergraph according to the specification in the bdz class. It returns a queue of the edge and changes internal datastructure of BDZ class. Returned edges are ordered in such way, that they can be used for the construction of the PHF"""
#First step is to initialize seed
self.seed = dict()
#Second step is to generate the random hash functions
hashes = list()
for i in range(0,self.function_number):
x = jenkins_wrapper()
x.generate_seed()
# x = h3_hash()
# x.set_bitsize(16)
# x.set_input_size(len(self.key_set[0]))
# x.generate_seed()
hashes.append(x)
self.seed["hashes"] = hashes
#setting m
self.m = int(math.ceil(self.ratio * len(self.key_set)))
limit = int(math.ceil(float(self.m) /self.function_number))
self.m = 3*limit
#print("XXXXXXXXXXXXXXX",limit, self.m)
#Generation of hypergraph
hyper = graph()
hyper.set_order(self.function_number)
hyper.add_vertices(self.m)
#Generation of the edges of the hypergraph
for x in self.key_set:
values = list()
for i in self.seed["hashes"]:
#print("test",i.hash(x)%limit,limit*len(values))
vertex = (i.hash(x) % limit) + limit*len(values)
values.append(vertex)
#Add this edge into the hypergraph
e = hyper.add_edge(values)
# print(e.get_vertices())
#Add edge to the vertices
for v in values:
hyper.get_vertex(v).add_edge(e)
#Generate queue for the edge evaluation
queue_list = []
queue = deque()
#Boolean vector of the used edges
used = [False] * hyper.get_edge_number()
#First remove edges that have at least one vertex with degree 1
for i in range(0,hyper.get_edge_number()):
vert = hyper.get_edge(i).get_vertices()
#print([hyper.get_vertex(x).get_degree() for x in vert])
Deg = [hyper.get_vertex(x).get_degree() == 1 for x in vert]
if sum(Deg) > 0 and used[i] == False:
#This edge has at least one vertex with degree 1
used[i] = True
queue_list.append(i)
queue.append(i)
#Removing edges that have unique vertex (on the stack)
#adding a new edges with unique vertex into stack
while(len(queue)>0):
edge = queue.popleft()
#remove edge from the graph (only from vertex and decrease degree)
for v in hyper.get_edge(edge).get_vertices():
hyper.get_vertex(v).get_edges().remove(hyper.get_edge(edge))
deg = hyper.get_vertex(v).get_degree() - 1
#print("KVIK",deg)
hyper.get_vertex(v).set_degree(deg)
#if degree decrease to 1, the remaining edge should be added
#into the queue
if(deg == 1):
#Found the edge position
e1 = hyper.get_vertex(v).get_edges()[0]
position = hyper.get_edge_position(e1)
#If it is not in the queue, put it there
if used[position] == False:
queue.append(position)
queue_list.append(position)
used[position] = True
self.hyper = hyper
return queue_list
def _found_g(self,v,ed,vi):
"""This function computes value of the g array for given vertex. It uses plus operation."""
s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()]
sum1 = sum(s)-s[vi];
self.g[v] = (vi-sum1)%len(s)
return True;
def _found_g2(self,v,ed,vi):
"""This function computes value of the g array for given vertex by the use of the xor function. Assumes two bit representation of the g array"""
s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()]
sum1 = s[0];
for index in range(1,len(self.hyper.get_edge(ed).get_vertices())):
sum1 = sum1^s[index]
sum1 = sum1^s[vi]
self.g[v] = (vi^sum1)&3 #3 is the 11 in binary, therefore it clear all the higher bits to zero
return True
def generate_seed(self):
"""This function generates the PHF function according to the BDZ algorithm"""
if not self.known_keys:
raise NoData("The key set is unknown")
size = 0
iteration = 0
while(size != len(self.key_set) and self.iteration_limit > iteration):
queue = self._found_graph()
size = len(queue)
iteration = iteration+1
if(len(queue) != len(self.key_set)):
return False
self.g = [3] * self.m
marked_vertices = [False] *self.m
while(len(queue) > 0):
ed = queue.pop()
worked = False
for vi in range(0,len(self.hyper.get_edge(ed).get_vertices())):
v = self.hyper.get_edge(ed).get_vertices()[vi]
if(marked_vertices[v] == False and worked == False):
worked = self._found_g2(v,ed,vi)
marked_vertices[v] = True
# print(self.g)
# print(self.g)
# print(len(queue))
# print(len(self.key_set))
def hash(self, key):
limit = int(self.m /self.function_number)
# print(limit)
hashes = [x.hash(key)%limit for x in self.seed["hashes"]]
h1 = [hashes[x]+x*limit for x in range(0,len(hashes))]
g_val = [self.g[x] for x in h1]
sum1 = g_val[0];
for index in range(1,len(g_val)):
sum1 = sum1^g_val[index]
h = sum1&3
if h>=len(hashes):
h = 0
return -1
# print("Nonexistent key")
#print(hashes,g_val)
#h = sum(g_val)%len(g_val)
return hashes[h]+(limit*h)
|
vhavlena/appreal
|
netbench/pattern_match/bin/library/bdz.py
|
Python
|
gpl-2.0
| 9,513 | 0.01356 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 20:47:53 2017
@author: fernando
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df = pd.read_csv("/home/fernando/CoursePythonDS/DAT210x/Module3/Datasets/wheat.data")
print df.describe()
df[df.groove>5].asymmetry.plot.hist(alpha=0.3, normed=True)
df[df.groove<=5].asymmetry.plot.hist(alpha=0.5, normed=True)
plt.show()
|
FernanOrtega/DAT210x
|
Module3/notes/histogram_example.py
|
Python
|
mit
| 446 | 0.008969 |
from pypers.core.step import Step
from pypers.steps.mothur import Mothur
import os
import json
import re
import glob
class MothurSummarySeqs(Mothur):
"""
Summarizes the quality of sequences in an unaligned or aligned fasta-formatted sequence file.
"""
spec = {
'name' : 'MothurSummarySeqs',
'version' : '20150512',
'descr' : [
'Summarizes the quality of sequences in an unaligned or aligned fasta-formatted sequence file'
],
'url' : 'www.mothur.org/wiki/Summary.seqs',
'args' : {
'inputs' : [
{
'name' : 'input_fasta',
'type' : 'file',
'iterable' : True,
'descr' : 'input fasta filename'
},
{
'name' : 'input_names',
'type' : 'file',
'iterable' : True,
'required' : False,
'descr' : 'input names filename'
},
{
'name' : 'input_counts',
'type' : 'file',
'iterable' : True,
'required' : False,
'descr' : 'input counts filename'
}
],
'outputs' : [
{
'name' : 'output_summary',
'type' : 'file',
'value' : '*.summary',
'descr': 'output summary filename'
},
{
'name' : 'output_log',
'type' : 'file',
'value' : '*.log.txt',
'descr': 'output summary logfile with tile summary table'
}
]
},
'requirements' : {
'cpus' : '8'
}
}
def process(self):
"""
Create the necessary input file links and run mothur command
"""
if type(self.input_fasta) != list:
self.input_fasta = [self.input_fasta]
if type(self.input_names) != list:
self.input_names = [self.input_names]
if type(self.input_counts) != list:
self.input_counts = [self.input_counts]
for idx, input_fasta in enumerate(self.input_fasta):
self.mk_links([input_fasta],self.output_dir)
input_fasta = os.path.join(self.output_dir,os.path.basename(input_fasta))
extra_params={'fasta':input_fasta}
if self.input_names[idx]:
input_names = os.path.join(self.output_dir,os.path.basename(self.input_names[idx]))
self.mk_links([self.input_names[idx]],self.output_dir)
extra_params['name'] = input_names
if self.input_counts[idx]:
input_counts = os.path.join(self.output_dir,os.path.basename(self.input_counts[idx]))
self.mk_links([self.input_counts[idx]],self.output_dir)
extra_params['count'] = input_counts
self.run_cmd('summary.seqs',extra_params)
|
frankosan/pypers
|
pypers/steps/mothur/MothurSummarySeqs.py
|
Python
|
gpl-3.0
| 3,305 | 0.014221 |
import pytest
import datetime
import os
from helpers import ensure_dir
def pytest_configure(config):
if not hasattr(config, 'input'):
current_day = '{:%Y_%m_%d_%H_%S}'.format(datetime.datetime.now())
ensure_dir(os.path.join(os.path.dirname(__file__), 'input', current_day))
result_dir = os.path.join(os.path.dirname(__file__), 'results', current_day)
ensure_dir(result_dir)
result_dir_test_run = result_dir
ensure_dir(os.path.join(result_dir_test_run, 'screenshots'))
ensure_dir(os.path.join(result_dir_test_run, 'logcat'))
config.screen_shot_dir = os.path.join(result_dir_test_run, 'screenshots')
config.logcat_dir = os.path.join(result_dir_test_run, 'logcat')
class DeviceLogger:
def __init__(self, logcat_dir, screenshot_dir):
self.screenshot_dir = screenshot_dir
self.logcat_dir = logcat_dir
@pytest.fixture(scope='function')
def device_logger(request):
logcat_dir = request.config.logcat_dir
screenshot_dir = request.config.screen_shot_dir
return DeviceLogger(logcat_dir, screenshot_dir)
|
appium/appium
|
sample-code/python/test/conftest.py
|
Python
|
apache-2.0
| 1,108 | 0.002708 |
# import asyncio
#
# async def compute(x, y):
# print("Compute %s + %s ..." % (x, y))
# await asyncio.sleep(1.0)
# return x + y
#
# async def print_sum(x, y):
# for i in range(10):
# result = await compute(x, y)
# print("%s + %s = %s" % (x, y, result))
#
# loop = asyncio.get_event_loop()
# loop.run_until_complete(print_sum(1,2))
# asyncio.ensure_future(print_sum(1, 2))
# asyncio.ensure_future(print_sum(3, 4))
# asyncio.ensure_future(print_sum(5, 6))
# loop.run_forever()
import asyncio
async def display_date(who, num):
i = 0
while True:
if i > num:
return
print('{}: Before loop {}'.format(who, i))
await asyncio.sleep(1)
i += 1
loop = asyncio.get_event_loop()
asyncio.ensure_future(display_date('AAA', 4))
asyncio.ensure_future(display_date('BBB', 6))
loop.run_forever()
|
fs714/concurrency-example
|
asynchronous/py36/asyncio/async_test.py
|
Python
|
apache-2.0
| 868 | 0.002304 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_atmosphere_user_manager_update'),
]
operations = [
migrations.AlterField(
model_name='allocationstrategy', name='refresh_behaviors', field=models.ManyToManyField(
to='core.RefreshBehavior', blank=True), ), migrations.AlterField(
model_name='allocationstrategy', name='rules_behaviors', field=models.ManyToManyField(
to='core.RulesBehavior', blank=True), ), migrations.AlterField(
model_name='machinerequest', name='new_machine_licenses', field=models.ManyToManyField(
to='core.License', blank=True), ), migrations.AlterField(
model_name='project', name='applications', field=models.ManyToManyField(
related_name='projects', to='core.Application', blank=True), ), migrations.AlterField(
model_name='project', name='instances', field=models.ManyToManyField(
related_name='projects', to='core.Instance', blank=True), ), migrations.AlterField(
model_name='project', name='volumes', field=models.ManyToManyField(
related_name='projects', to='core.Volume', blank=True), ), migrations.AlterField(
model_name='providermachine', name='licenses', field=models.ManyToManyField(
to='core.License', blank=True), ), ]
|
CCI-MOC/GUI-Backend
|
core/migrations/0012_remove_null_from_many_many.py
|
Python
|
apache-2.0
| 1,573 | 0.006357 |
from setuptools import setup
version = '1.4'
testing_extras = ['nose', 'coverage']
docs_extras = ['Sphinx']
setup(
name='WebOb',
version=version,
description="WSGI request and response object",
long_description="""\
WebOb provides wrappers around the WSGI request environment, and an
object to help create WSGI responses.
The objects map much of the specified behavior of HTTP, including
header parsing and accessors for other standard parts of the
environment.
You may install the `in-development version of WebOb
<https://github.com/Pylons/webob/zipball/master#egg=WebOb-dev>`_ with
``pip install WebOb==dev`` (or ``easy_install WebOb==dev``).
* `WebOb reference <http://docs.webob.org/en/latest/reference.html>`_
* `Bug tracker <https://github.com/Pylons/webob/issues>`_
* `Browse source code <https://github.com/Pylons/webob>`_
* `Mailing list <http://bit.ly/paste-users>`_
* `Release news <http://docs.webob.org/en/latest/news.html>`_
* `Detailed changelog <https://github.com/Pylons/webob/commits/master>`_
""",
classifiers=[
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords='wsgi request web http',
author='Ian Bicking',
author_email='ianb@colorstudy.com',
maintainer='Pylons Project',
url='http://webob.org/',
license='MIT',
packages=['webob'],
zip_safe=True,
test_suite='nose.collector',
tests_require=['nose'],
extras_require = {
'testing':testing_extras,
'docs':docs_extras,
},
)
|
nirmeshk/oh-mainline
|
vendor/packages/webob/setup.py
|
Python
|
agpl-3.0
| 2,150 | 0.00186 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import os
import codecs
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-typehints',
version='0.1.0',
author='Edward Dunn Ekelund',
author_email='edward.ekelund@gmail.com',
maintainer='Edward Dunn Ekelund',
maintainer_email='edward.ekelund@gmail.com',
license='BSD-3',
url='https://github.com/eddie-dunn/pytest-typehints',
description='Pytest plugin that checks for type hinting',
long_description=read('README.rst'),
py_modules=['pytest_typehints'],
install_requires=['pytest>=2.9.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
],
entry_points={
'pytest11': [
'typehints = pytest_typehints',
],
},
)
|
eddie-dunn/pytest-typehints
|
setup.py
|
Python
|
bsd-3-clause
| 1,385 | 0 |
"""
Copyright (C) 2014 Vahid Rafiei (@vahid_r)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
from testbuilder.utils import get_version
class TestUtilsModuleFunctions(unittest.TestCase):
""" This is a test skeleton for module-level functions at the utils module"""
def test_version(self):
self.assertEquals("0.9", get_version(), "The current version should be 0.9")
if __name__ == "__main__":
unittest.main()
|
vahidR/test-builder
|
tests/test_utils.py
|
Python
|
gpl-2.0
| 1,040 | 0.005769 |
"""
Convert between bytestreams and higher-level AMQP types.
2007-11-05 Barry Pederson <bp@barryp.org>
"""
# Copyright (C) 2007 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import
import calendar
import sys
from datetime import datetime
from decimal import Decimal
from io import BytesIO
from struct import pack, unpack
from .exceptions import FrameSyntaxError
from .five import int_types, long_t, string, string_t, items
IS_PY3K = sys.version_info[0] >= 3
if IS_PY3K:
def byte(n):
return bytes([n])
else:
byte = chr
ILLEGAL_TABLE_TYPE_WITH_KEY = """\
Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}]
"""
ILLEGAL_TABLE_TYPE = """\
Table type {0!r} not handled by amqp. [value: {1!r}]
"""
class AMQPReader(object):
"""Read higher-level AMQP types from a bytestream."""
def __init__(self, source):
"""Source should be either a file-like object with a read() method, or
a plain (non-unicode) string."""
if isinstance(source, bytes):
self.input = BytesIO(source)
elif hasattr(source, 'read'):
self.input = source
else:
raise ValueError(
'AMQPReader needs a file-like object or plain string')
self.bitcount = self.bits = 0
def close(self):
self.input.close()
def read(self, n):
"""Read n bytes."""
self.bitcount = self.bits = 0
return self.input.read(n)
def read_bit(self):
"""Read a single boolean value."""
if not self.bitcount:
self.bits = ord(self.input.read(1))
self.bitcount = 8
result = (self.bits & 1) == 1
self.bits >>= 1
self.bitcount -= 1
return result
def read_octet(self):
"""Read one byte, return as an integer"""
self.bitcount = self.bits = 0
return unpack('B', self.input.read(1))[0]
def read_short(self):
"""Read an unsigned 16-bit integer"""
self.bitcount = self.bits = 0
return unpack('>H', self.input.read(2))[0]
def read_long(self):
"""Read an unsigned 32-bit integer"""
self.bitcount = self.bits = 0
return unpack('>I', self.input.read(4))[0]
def read_longlong(self):
"""Read an unsigned 64-bit integer"""
self.bitcount = self.bits = 0
return unpack('>Q', self.input.read(8))[0]
def read_float(self):
"""Read float value."""
self.bitcount = self.bits = 0
return unpack('>d', self.input.read(8))[0]
def read_shortstr(self):
"""Read a short string that's stored in up to 255 bytes.
The encoding isn't specified in the AMQP spec, so
assume it's utf-8
"""
self.bitcount = self.bits = 0
slen = unpack('B', self.input.read(1))[0]
return self.input.read(slen).decode('utf-8')
def read_longstr(self):
"""Read a string that's up to 2**32 bytes.
The encoding isn't specified in the AMQP spec, so
assume it's utf-8
"""
self.bitcount = self.bits = 0
slen = unpack('>I', self.input.read(4))[0]
return self.input.read(slen).decode('utf-8')
def read_table(self):
"""Read an AMQP table, and return as a Python dictionary."""
self.bitcount = self.bits = 0
tlen = unpack('>I', self.input.read(4))[0]
table_data = AMQPReader(self.input.read(tlen))
result = {}
while table_data.input.tell() < tlen:
name = table_data.read_shortstr()
val = table_data.read_item()
result[name] = val
return result
def read_item(self):
ftype = ord(self.input.read(1))
# 'S': long string
if ftype == 83:
val = self.read_longstr()
# 's': short string
elif ftype == 115:
val = self.read_shortstr()
# 'b': short-short int
elif ftype == 98:
val, = unpack('>B', self.input.read(1))
# 'B': short-short unsigned int
elif ftype == 66:
val, = unpack('>b', self.input.read(1))
# 'U': short int
elif ftype == 85:
val, = unpack('>h', self.input.read(2))
# 'u': short unsigned int
elif ftype == 117:
val, = unpack('>H', self.input.read(2))
# 'I': long int
elif ftype == 73:
val, = unpack('>i', self.input.read(4))
# 'i': long unsigned int
elif ftype == 105: # 'l'
val, = unpack('>I', self.input.read(4))
# 'L': long long int
elif ftype == 76:
val, = unpack('>q', self.input.read(8))
# 'l': long long unsigned int
elif ftype == 108:
val, = unpack('>Q', self.input.read(8))
# 'f': float
elif ftype == 102:
val, = unpack('>f', self.input.read(4))
# 'd': double
elif ftype == 100:
val = self.read_float()
# 'D': decimal
elif ftype == 68:
d = self.read_octet()
n, = unpack('>i', self.input.read(4))
val = Decimal(n) / Decimal(10 ** d)
# 'F': table
elif ftype == 70:
val = self.read_table() # recurse
# 'A': array
elif ftype == 65:
val = self.read_array()
# 't' (bool)
elif ftype == 116:
val = self.read_bit()
# 'T': timestamp
elif ftype == 84:
val = self.read_timestamp()
# 'V': void
elif ftype == 86:
val = None
else:
raise FrameSyntaxError(
'Unknown value in table: {0!r} ({1!r})'.format(
ftype, type(ftype)))
return val
def read_array(self):
array_length = unpack('>I', self.input.read(4))[0]
array_data = AMQPReader(self.input.read(array_length))
result = []
while array_data.input.tell() < array_length:
val = array_data.read_item()
result.append(val)
return result
def read_timestamp(self):
"""Read and AMQP timestamp, which is a 64-bit integer representing
seconds since the Unix epoch in 1-second resolution.
Return as a Python datetime.datetime object,
expressed as localtime.
"""
return datetime.utcfromtimestamp(self.read_longlong())
class AMQPWriter(object):
"""Convert higher-level AMQP types to bytestreams."""
def __init__(self, dest=None):
"""dest may be a file-type object (with a write() method). If None
then a BytesIO is created, and the contents can be accessed with
this class's getvalue() method."""
self.out = BytesIO() if dest is None else dest
self.bits = []
self.bitcount = 0
def _flushbits(self):
if self.bits:
out = self.out
for b in self.bits:
out.write(pack('B', b))
self.bits = []
self.bitcount = 0
def close(self):
"""Pass through if possible to any file-like destinations."""
try:
self.out.close()
except AttributeError:
pass
def flush(self):
"""Pass through if possible to any file-like destinations."""
try:
self.out.flush()
except AttributeError:
pass
def getvalue(self):
"""Get what's been encoded so far if we're working with a BytesIO."""
self._flushbits()
return self.out.getvalue()
def write(self, s):
"""Write a plain Python string with no special encoding in Python 2.x,
or bytes in Python 3.x"""
self._flushbits()
self.out.write(s)
def write_bit(self, b):
"""Write a boolean value."""
b = 1 if b else 0
shift = self.bitcount % 8
if shift == 0:
self.bits.append(0)
self.bits[-1] |= (b << shift)
self.bitcount += 1
def write_octet(self, n):
"""Write an integer as an unsigned 8-bit value."""
if n < 0 or n > 255:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..255'.format(n))
self._flushbits()
self.out.write(pack('B', n))
def write_short(self, n):
"""Write an integer as an unsigned 16-bit value."""
if n < 0 or n > 65535:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..65535'.format(n))
self._flushbits()
self.out.write(pack('>H', int(n)))
def write_long(self, n):
"""Write an integer as an unsigned2 32-bit value."""
if n < 0 or n >= 4294967296:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..2**31-1'.format(n))
self._flushbits()
self.out.write(pack('>I', n))
def write_longlong(self, n):
"""Write an integer as an unsigned 64-bit value."""
if n < 0 or n >= 18446744073709551616:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..2**64-1'.format(n))
self._flushbits()
self.out.write(pack('>Q', n))
def write_shortstr(self, s):
"""Write a string up to 255 bytes long (after any encoding).
If passed a unicode string, encode with UTF-8.
"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
if len(s) > 255:
raise FrameSyntaxError(
'Shortstring overflow ({0} > 255)'.format(len(s)))
self.write_octet(len(s))
self.out.write(s)
def write_longstr(self, s):
"""Write a string up to 2**32 bytes long after encoding.
If passed a unicode string, encode as UTF-8.
"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
self.write_long(len(s))
self.out.write(s)
def write_table(self, d):
"""Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints."""
self._flushbits()
table_data = AMQPWriter()
for k, v in items(d):
table_data.write_shortstr(k)
table_data.write_item(v, k)
table_data = table_data.getvalue()
self.write_long(len(table_data))
self.out.write(table_data)
def write_item(self, v, k=None):
if isinstance(v, (string_t, bytes)):
if isinstance(v, string):
v = v.encode('utf-8')
self.write(b'S')
self.write_longstr(v)
elif isinstance(v, bool):
self.write(pack('>cB', b't', int(v)))
elif isinstance(v, float):
self.write(pack('>cd', b'd', v))
elif isinstance(v, int_types):
self.write(pack('>ci', b'I', v))
elif isinstance(v, Decimal):
self.write(b'D')
sign, digits, exponent = v.as_tuple()
v = 0
for d in digits:
v = (v * 10) + d
if sign:
v = -v
self.write_octet(-exponent)
self.write(pack('>i', v))
elif isinstance(v, datetime):
self.write(b'T')
self.write_timestamp(v)
elif isinstance(v, dict):
self.write(b'F')
self.write_table(v)
elif isinstance(v, (list, tuple)):
self.write(b'A')
self.write_array(v)
elif v is None:
self.write(b'V')
else:
err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k
else ILLEGAL_TABLE_TYPE.format(type(v), v))
raise FrameSyntaxError(err)
def write_array(self, a):
array_data = AMQPWriter()
for v in a:
array_data.write_item(v)
array_data = array_data.getvalue()
self.write_long(len(array_data))
self.out.write(array_data)
def write_timestamp(self, v):
"""Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch."""
self.out.write(pack('>Q', long_t(calendar.timegm(v.utctimetuple()))))
class GenericContent(object):
"""Abstract base class for AMQP content.
Subclasses should override the PROPERTIES attribute.
"""
PROPERTIES = [('dummy', 'shortstr')]
def __init__(self, **props):
"""Save the properties appropriate to this AMQP content type
in a 'properties' dictionary."""
d = {}
for propname, _ in self.PROPERTIES:
if propname in props:
d[propname] = props[propname]
# FIXME: should we ignore unknown properties?
self.properties = d
def __eq__(self, other):
"""Check if this object has the same properties as another
content object."""
try:
return self.properties == other.properties
except AttributeError:
return NotImplemented
def __getattr__(self, name):
"""Look for additional properties in the 'properties'
dictionary, and if present - the 'delivery_info'
dictionary."""
if name == '__setstate__':
# Allows pickling/unpickling to work
raise AttributeError('__setstate__')
if name in self.properties:
return self.properties[name]
if 'delivery_info' in self.__dict__ \
and name in self.delivery_info:
return self.delivery_info[name]
raise AttributeError(name)
def _load_properties(self, raw_bytes):
"""Given the raw bytes containing the property-flags and property-list
from a content-frame-header, parse and insert into a dictionary
stored in this object as an attribute named 'properties'."""
r = AMQPReader(raw_bytes)
#
# Read 16-bit shorts until we get one with a low bit set to zero
#
flags = []
while 1:
flag_bits = r.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
d = {}
for key, proptype in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
d[key] = getattr(r, 'read_' + proptype)()
shift -= 1
self.properties = d
def _serialize_properties(self):
"""serialize the 'properties' attribute (a dictionary) into
the raw bytes making up a set of property flags and a
property list, suitable for putting into a content frame header."""
shift = 15
flag_bits = 0
flags = []
raw_bytes = AMQPWriter()
for key, proptype in self.PROPERTIES:
val = self.properties.get(key, None)
if val is not None:
if shift == 0:
flags.append(flag_bits)
flag_bits = 0
shift = 15
flag_bits |= (1 << shift)
if proptype != 'bit':
getattr(raw_bytes, 'write_' + proptype)(val)
shift -= 1
flags.append(flag_bits)
result = AMQPWriter()
for flag_bits in flags:
result.write_short(flag_bits)
result.write(raw_bytes.getvalue())
return result.getvalue()
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/amqp/serialization.py
|
Python
|
agpl-3.0
| 16,315 | 0 |
#
# Copyright 2006-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""XLIFF classes specifically suited for handling the PO representation in
XLIFF.
This way the API supports plurals as if it was a PO file, for example.
"""
import re
from lxml import etree
from translate.misc.multistring import multistring
from translate.misc.xml_helpers import setXMLspace
from translate.storage import base, lisa, poheader, xliff
from translate.storage.placeables import general
def hasplurals(thing):
if not isinstance(thing, multistring):
return False
return len(thing.strings) > 1
class PoXliffUnit(xliff.xliffunit):
"""A class to specifically handle the plural units created from a po file."""
rich_parsers = general.parsers
def __init__(self, source=None, empty=False, **kwargs):
self._rich_source = None
self._rich_target = None
self._state_n = 0
self.units = []
if empty:
return
if not hasplurals(source):
super().__init__(source)
return
self.xmlelement = etree.Element(self.namespaced("group"))
self.xmlelement.set("restype", "x-gettext-plurals")
self.source = source
def __eq__(self, other):
if isinstance(other, PoXliffUnit):
if len(self.units) != len(other.units):
return False
if not super().__eq__(other):
return False
for i in range(len(self.units) - 1):
if not self.units[i + 1] == other.units[i + 1]:
return False
return True
if len(self.units) <= 1:
if isinstance(other, lisa.LISAunit):
return super().__eq__(other)
else:
return self.source == other.source and self.target == other.target
return False
# XXX: We don't return language nodes correctly at the moment
# def getlanguageNodes(self):
# if not self.hasplural():
# return super().getlanguageNodes()
# else:
# return self.units[0].getlanguageNodes()
@property
def source(self):
if not self.hasplural():
return super().source
return multistring([unit.source for unit in self.units])
@source.setter
def source(self, source):
self.setsource(source, sourcelang="en")
def setsource(self, source, sourcelang="en"):
# TODO: consider changing from plural to singular, etc.
self._rich_source = None
if not hasplurals(source):
super().setsource(source, sourcelang)
else:
target = self.target
for unit in self.units:
try:
self.xmlelement.remove(unit.xmlelement)
except ValueError:
pass
self.units = []
for s in source.strings:
newunit = xliff.xliffunit(s)
# newunit.namespace = self.namespace #XXX?necessary?
self.units.append(newunit)
self.xmlelement.append(newunit.xmlelement)
self.target = target
# We don't support any rich strings yet
multistring_to_rich = base.TranslationUnit.multistring_to_rich
rich_to_multistring = base.TranslationUnit.rich_to_multistring
rich_source = base.TranslationUnit.rich_source
rich_target = base.TranslationUnit.rich_target
def gettarget(self, lang=None):
if self.hasplural():
strings = [unit.target for unit in self.units]
if strings:
return multistring(strings)
else:
return None
else:
return super().gettarget(lang)
def settarget(self, target, lang="xx", append=False):
self._rich_target = None
if self.target == target:
return
if not self.hasplural():
super().settarget(target, lang, append)
return
if not isinstance(target, multistring):
target = multistring(target)
source = self.source
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * (targetl - sourcel)
targets = target.strings
id = self.getid()
self.source = multistring(sources)
self.setid(id)
elif targetl < sourcel:
targets = target.strings + [""] * (sourcel - targetl)
else:
targets = target.strings
for i in range(len(self.units)):
self.units[i].target = targets[i]
def addnote(self, text, origin=None, position="append"):
"""Add a note specifically in a "note" tag"""
note = etree.SubElement(self.xmlelement, self.namespaced("note"))
note.text = text
if origin:
note.set("from", origin)
for unit in self.units[1:]:
unit.addnote(text, origin)
def getnotes(self, origin=None):
# NOTE: We support both <context> and <note> tags in xliff files for comments
if origin == "translator":
notes = super().getnotes("translator")
trancomments = self.gettranslatorcomments()
if notes == trancomments or trancomments.find(notes) >= 0:
notes = ""
elif notes.find(trancomments) >= 0:
trancomments = notes
notes = ""
return trancomments + notes
elif origin in ["programmer", "developer", "source code"]:
devcomments = super().getnotes("developer")
autocomments = self.getautomaticcomments()
if devcomments == autocomments or autocomments.find(devcomments) >= 0:
devcomments = ""
elif devcomments.find(autocomments) >= 0:
autocomments = devcomments
devcomments = ""
return autocomments
else:
return super().getnotes(origin)
def markfuzzy(self, value=True):
super().markfuzzy(value)
for unit in self.units[1:]:
unit.markfuzzy(value)
def marktranslated(self):
super().marktranslated()
for unit in self.units[1:]:
unit.marktranslated()
def setid(self, id):
super().setid(id)
if len(self.units) > 1:
for i in range(len(self.units)):
self.units[i].setid("%s[%d]" % (id, i))
def getlocations(self):
"""Returns all the references (source locations)"""
groups = self.getcontextgroups("po-reference")
references = []
for group in groups:
sourcefile = ""
linenumber = ""
for (type, text) in group:
if type == "sourcefile":
sourcefile = text
elif type == "linenumber":
linenumber = text
assert sourcefile
if linenumber:
sourcefile = sourcefile + ":" + linenumber
references.append(sourcefile)
return references
def getautomaticcomments(self):
"""Returns the automatic comments (x-po-autocomment), which corresponds
to the #. style po comments.
"""
def hasautocomment(grp):
return grp[0] == "x-po-autocomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hasautocomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def gettranslatorcomments(self):
"""Returns the translator comments (x-po-trancomment), which
corresponds to the # style po comments.
"""
def hastrancomment(grp):
return grp[0] == "x-po-trancomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hastrancomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def isheader(self):
return "gettext-domain-header" in (self.getrestype() or "")
def istranslatable(self):
return super().istranslatable() and not self.isheader()
@classmethod
def createfromxmlElement(cls, element, namespace=None):
if element.tag.endswith("trans-unit"):
object = cls(None, empty=True)
object.xmlelement = element
object.namespace = namespace
return object
assert element.tag.endswith("group")
group = cls(None, empty=True)
group.xmlelement = element
group.namespace = namespace
units = list(element.iterdescendants(group.namespaced("trans-unit")))
for unit in units:
subunit = xliff.xliffunit.createfromxmlElement(unit)
subunit.namespace = namespace
group.units.append(subunit)
return group
def hasplural(self):
return self.xmlelement.tag == self.namespaced("group")
class PoXliffFile(xliff.xlifffile, poheader.poheader):
"""a file for the po variant of Xliff files"""
UnitClass = PoXliffUnit
def __init__(self, *args, **kwargs):
if "sourcelanguage" not in kwargs:
kwargs["sourcelanguage"] = "en-US"
xliff.xlifffile.__init__(self, *args, **kwargs)
def createfilenode(self, filename, sourcelanguage="en-US", datatype="po"):
# Let's ignore the sourcelanguage parameter opting for the internal
# one. PO files will probably be one language
return super().createfilenode(
filename, sourcelanguage=self.sourcelanguage, datatype="po"
)
def _insert_header(self, header):
header.xmlelement.set("restype", "x-gettext-domain-header")
header.xmlelement.set("approved", "no")
setXMLspace(header.xmlelement, "preserve")
self.addunit(header)
def addheaderunit(self, target, filename):
unit = self.addsourceunit(target, filename, True)
unit.target = target
unit.xmlelement.set("restype", "x-gettext-domain-header")
unit.xmlelement.set("approved", "no")
setXMLspace(unit.xmlelement, "preserve")
return unit
def addplural(self, source, target, filename, createifmissing=False):
"""This method should now be unnecessary, but is left for reference"""
assert isinstance(source, multistring)
if not isinstance(target, multistring):
target = multistring(target)
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * targetl - sourcel
targets = target.strings
else:
sources = source.strings
targets = target.strings
self._messagenum += 1
pluralnum = 0
group = self.creategroup(filename, True, restype="x-gettext-plural")
for (src, tgt) in zip(sources, targets):
unit = self.UnitClass(src)
unit.target = tgt
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
if pluralnum < sourcel:
for string in sources[pluralnum:]:
unit = self.UnitClass(src)
unit.xmlelement.set("translate", "no")
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
return self.units[-pluralnum]
def parse(self, xml):
"""Populates this object from the given xml string"""
# TODO: Make more robust
def ispluralgroup(node):
"""determines whether the xml node refers to a getttext plural"""
return node.get("restype") == "x-gettext-plurals"
def isnonpluralunit(node):
"""determindes whether the xml node contains a plural like id.
We want to filter out all the plural nodes, except the very first
one in each group.
"""
return re.match(r".+\[[123456]\]$", node.get("id") or "") is None
def pluralunits(pluralgroups):
for pluralgroup in pluralgroups:
yield self.UnitClass.createfromxmlElement(
pluralgroup, namespace=self.namespace
)
self.filename = getattr(xml, "name", "")
if hasattr(xml, "read"):
xml.seek(0)
xmlsrc = xml.read()
xml = xmlsrc
parser = etree.XMLParser(resolve_entities=False)
self.document = etree.fromstring(xml, parser).getroottree()
self.initbody()
root_node = self.document.getroot()
assert root_node.tag == self.namespaced(self.rootNode)
groups = root_node.iterdescendants(self.namespaced("group"))
pluralgroups = filter(ispluralgroup, groups)
termEntries = root_node.iterdescendants(
self.namespaced(self.UnitClass.rootNode)
)
singularunits = list(filter(isnonpluralunit, termEntries))
if len(singularunits) == 0:
return
pluralunit_iter = pluralunits(pluralgroups)
nextplural = next(pluralunit_iter, None)
for entry in singularunits:
term = self.UnitClass.createfromxmlElement(entry, namespace=self.namespace)
if nextplural and str(term.getid()) == ("%s[0]" % nextplural.getid()):
self.addunit(nextplural, new=False)
nextplural = next(pluralunit_iter, None)
else:
self.addunit(term, new=False)
|
miurahr/translate
|
translate/storage/poxliff.py
|
Python
|
gpl-2.0
| 14,579 | 0.000549 |
# encoding: utf-8
from bs4 import BeautifulSoup
from okscraper.base import BaseScraper
from okscraper.sources import UrlSource, ScraperSource
from okscraper.storages import ListStorage, DictStorage
from lobbyists.models import LobbyistHistory, Lobbyist, LobbyistData, LobbyistRepresent, LobbyistRepresentData
from persons.models import Person
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from lobbyist_represent import LobbyistRepresentScraper
class LobbyistScraperDictStorage(DictStorage):
"""
This storage first determines if a new Lobbyist object needs to be created:
it searches for a Lobbyist object with the same source_id and first / last name
if such an object exists - it uses that object, otherwise created a new Lobbyist
It then updates the lobbyist.data:
it gets the last LobbyistData object for this lobbyist and compares that to the current data
if it matches - then that object is used and a new object is not created
else - a new LobbyistData object is created and appended to the lobbyist.data
This storage returns the lobbyist object
"""
_commitInterval = -1
def _get_data_keys(self):
return ['first_name', 'family_name', 'profession', 'corporation_name', 'corporation_id', 'faction_member', 'faction_name', 'permit_type']
def _get_represents_data(self, source_id):
return LobbyistRepresentScraper().scrape(source_id)
def _get_latest_lobbyist_data(self, lobbyist):
return lobbyist.latest_data
def _get_last_lobbyist_data(self, lobbyist, data):
try:
last_lobbyist_data = self._get_latest_lobbyist_data(lobbyist)
except ObjectDoesNotExist:
last_lobbyist_data = None
if last_lobbyist_data is not None:
for key in self._get_data_keys():
if data[key] != getattr(last_lobbyist_data, key):
last_lobbyist_data = None
break
if last_lobbyist_data is not None:
represent_ids = sorted(data['represents'], key=lambda represent: represent.id)
last_represent_ids = sorted(last_lobbyist_data.represents.all(), key=lambda represent: represent.id)
if represent_ids != last_represent_ids:
last_lobbyist_data = None
return last_lobbyist_data
def commit(self):
super(LobbyistScraperDictStorage, self).commit()
data = self._data
source_id = data['id']
data['represents'] = self._get_represents_data(source_id)
full_name = '%s %s' % (data['first_name'], data['family_name'])
q = Lobbyist.objects.filter(source_id=source_id, person__name=full_name)
if q.count() > 0:
lobbyist = q[0]
else:
lobbyist = Lobbyist.objects.create(person=Person.objects.create(name=full_name), source_id=source_id)
self._data = lobbyist
last_lobbyist_data = self._get_last_lobbyist_data(lobbyist, data)
if last_lobbyist_data is None:
kwargs = {}
for key in self._get_data_keys():
kwargs[key] = data[key]
kwargs['source_id'] = source_id
lobbyist_data = LobbyistData.objects.create(**kwargs)
for represent in data['represents']:
lobbyist_data.represents.add(represent)
lobbyist_data.scrape_time = datetime.now()
lobbyist_data.save()
lobbyist.data.add(lobbyist_data)
else:
lobbyist.data.add(last_lobbyist_data)
lobbyist.save()
class LobbyistScraper(BaseScraper):
"""
This scraper gets a lobbyist id, it then goes to the knesset api to get the data about the lobbyist
"""
def __init__(self):
super(LobbyistScraper, self).__init__()
self.source = UrlSource('http://online.knesset.gov.il/WsinternetSps/KnessetDataService/LobbyistData.svc/View_lobbyist(<<id>>)')
self.storage = LobbyistScraperDictStorage()
def _storeLobbyistDataFromSoup(self, soup):
lobbyist_id = soup.find('d:lobbyist_id').text.strip()
self._getLogger().info('got lobbyist id "%s"', lobbyist_id)
lobbyist = {
'id': lobbyist_id,
'first_name': soup.find('d:first_name').text.strip(),
'family_name': soup.find('d:family_name').text.strip(),
'profession': soup.find('d:profession').text.strip(),
'corporation_name': soup.find('d:corporation_name').text.strip(),
'corporation_id': soup.find('d:corporation_id').text.strip(),
'faction_member': soup.find('d:faction_member').text.strip(),
'faction_name': soup.find('d:faction_name').text.strip(),
'permit_type': soup.find('d:lobyst_permit_type').text.strip(),
}
self.storage.storeDict(lobbyist)
self._getLogger().debug(lobbyist)
def _scrape(self, lobbyist_id):
html = self.source.fetch(lobbyist_id)
soup = BeautifulSoup(html)
return self._storeLobbyistDataFromSoup(soup)
|
otadmor/Open-Knesset
|
lobbyists/scrapers/lobbyist.py
|
Python
|
bsd-3-clause
| 5,096 | 0.002747 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.