text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_backend_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
semorale/backend-test
|
django_backend_test/manage.py
|
Python
|
mit
| 262 | 0.003817 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._hascitation import HasCitation
from ._allcitations import AllCitations
from ._changedsince import ChangedSince
from ._citationprivate import CitationPrivate
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._hasnote import HasNote
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasnoteregexp import HasNoteRegexp
from ._hasreferencecountof import HasReferenceCountOf
from ._hassource import HasSource
from ._hassourceidof import HasSourceIdOf
from ._hassourcenoteregexp import HasSourceNoteRegexp
from ._matchesfilter import MatchesFilter
from ._matchespagesubstringof import MatchesPageSubstringOf
from ._matchesrepositoryfilter import MatchesRepositoryFilter
from ._matchessourcefilter import MatchesSourceFilter
from ._regexpidof import RegExpIdOf
from ._regexpsourceidof import RegExpSourceIdOf
from ._hastag import HasTag
editor_rule_list = [
HasCitation,
AllCitations,
ChangedSince,
CitationPrivate,
HasGallery,
HasIdOf,
HasNote,
HasNoteRegexp,
HasReferenceCountOf,
HasSource,
HasSourceIdOf,
HasSourceNoteRegexp,
MatchesFilter,
MatchesPageSubstringOf,
MatchesRepositoryFilter,
MatchesSourceFilter,
RegExpIdOf,
RegExpSourceIdOf,
HasTag
]
|
beernarrd/gramps
|
gramps/gen/filters/rules/citation/__init__.py
|
Python
|
gpl-2.0
| 2,223 | 0 |
from django.db import models
from django.test import TestCase
from ..models.compat import YAMLField
class TestYAMLModel(models.Model):
yaml_field = YAMLField()
class TestYAMLField(TestCase):
...
def test_to_python(self):
yaml_data = """
main:
- 1
- 2
- 3
"""
yaml_field = YAMLField()
yaml_field.to_python(yaml_data)
yaml_data = ""
yaml_field = YAMLField()
self.assertEqual(None, yaml_field.to_python(yaml_data))
yaml_data = """`"""
yaml_field = YAMLField()
with self.assertRaises(Exception):
yaml_field.to_python(yaml_data)
def test_get_prep_value(self):
yaml_field = YAMLField()
self.assertEqual("", yaml_field.get_prep_value(None))
yaml_field = YAMLField()
data = {"aaa": "aaa😺",}
self.assertEqual(
"aaa: aaa😺\n",
yaml_field.get_prep_value(data)
)
|
salexkidd/restframework-definable-serializer
|
definable_serializer/tests/test_compat.py
|
Python
|
mit
| 985 | 0.001021 |
#!/usr/bin/env python3
from collections import namedtuple
from pdfrw import PdfName, PdfDict, PdfObject, PdfString
PageLabelTuple = namedtuple("PageLabelScheme",
"startpage style prefix firstpagenum")
defaults = {"style": "arabic", "prefix": '', "firstpagenum": 1}
styles = {"arabic": PdfName('D'),
"roman lowercase": PdfName('r'),
"roman uppercase": PdfName('R'),
"letters lowercase": PdfName('a'),
"letters uppercase": PdfName('A')}
stylecodes = {v: a for a, v in styles.items()}
class PageLabelScheme(PageLabelTuple):
"""Represents a page numbering scheme.
startpage : the index in the pdf (starting from 0) of the
first page the scheme will be applied to.
style : page numbering style (arabic, roman [lowercase|uppercase], letters [lowercase|uppercase])
prefix: a prefix to be prepended to all page labels
firstpagenum : where to start numbering
"""
__slots__ = tuple()
def __new__(cls, startpage,
style=defaults["style"],
prefix=defaults["prefix"],
firstpagenum=defaults["firstpagenum"]):
if style not in styles:
raise ValueError("PageLabel style must be one of %s" % cls.styles())
return super().__new__(cls, int(startpage), style, str(prefix), int(firstpagenum))
@classmethod
def from_pdf(cls, pagenum, opts):
"""Returns a new PageLabel using options from a pdfrw object"""
return cls(pagenum,
style=stylecodes.get(opts.S, defaults["style"]),
prefix=(opts.P and opts.P.decode() or defaults["prefix"]),
firstpagenum=(opts.St or defaults["firstpagenum"]))
@staticmethod
def styles():
"""List of the allowed styles"""
return styles.keys()
def pdfobjs(self):
"""Returns a tuple of two elements to insert in the PageLabels.Nums
entry of a pdf"""
page_num = PdfObject(self.startpage)
opts = PdfDict(S=styles[self.style])
if self.prefix != defaults["prefix"]:
opts.P = PdfString.encode(self.prefix)
if self.firstpagenum != defaults["firstpagenum"]:
opts.St = PdfObject(self.firstpagenum)
return page_num, opts
|
lovasoa/pagelabels-py
|
pagelabels/pagelabelscheme.py
|
Python
|
gpl-3.0
| 2,320 | 0.001293 |
import operator
from functools import reduce
from collections import namedtuple
from django.db.models import Q
from mi.models import Target
from wins.models import HVC
HVCStruct = namedtuple('HVCStruct', ['campaign_id', 'financial_year'])
def get_all_hvcs_referenced_by_targets(financial_years=None):
"""
Get a list of all hvcs that need to be created that are referenced by Targets
:param financial_years: optional, you can manually define the financial years
instead of getting them from the Target
:type financial_years: List[int]
:returns a list of hvc (campaign_id, financial year) tuples that don't already exist: List[HVCStruct]
"""
hvc_ids_expected_by_targets = Target.objects.all().values_list('campaign_id', flat=True).distinct()
if not financial_years:
financial_years = Target.objects.all().values_list('financial_year', flat=True).distinct()
to_create = [
HVCStruct(campaign_id=campaign_id,
financial_year=int(str(financial_year)[-2:]))
for campaign_id in hvc_ids_expected_by_targets for financial_year in financial_years
]
filter_q = reduce(
operator.or_,
[Q(campaign_id=data.campaign_id, financial_year=data.financial_year)
for data in to_create]
)
already_existing = [
HVCStruct(**data) for data in HVC.objects.filter(filter_q).values('campaign_id', 'financial_year')
]
to_create_without_already_existing = set(to_create) - set(already_existing)
return to_create_without_already_existing
|
UKTradeInvestment/export-wins-data
|
fixturedb/utils/hvc.py
|
Python
|
gpl-3.0
| 1,557 | 0.004496 |
#!/usr/bin/env python
"""
NPR 2017-01-22
www.npr.org/2017/01/22/511046359/youve-got-to-comb-together-to-solve-this-one
The numbers 5,000, 8,000, and 9,000 share a property that only five integers altogether have.
Identify the property and the two other integers that have it.
"""
# The property is that they are supervocalic (one each of aeiou).
# This code will simply try to find the other such numbers.
def is_supervocalic(w):
'''
Determine if a word has one each of a, e, i, o, u
We also want it not to have a 'y'
'''
vowels = 'aeiou'
for vowel in vowels:
if w.lower().count(vowel) != 1:
return False
if 'y' in w.lower():
return False
return True
# Thanks to http://stackoverflow.com/a/19193721
def numToWords(num,join=True):
'''words = {} convert an integer number into words'''
units = ['','one','two','three','four','five','six','seven','eight','nine']
teens = ['','eleven','twelve','thirteen','fourteen','fifteen','sixteen', \
'seventeen','eighteen','nineteen']
tens = ['','ten','twenty','thirty','forty','fifty','sixty','seventy', \
'eighty','ninety']
thousands = ['','thousand','million','billion','trillion','quadrillion', \
'quintillion','sextillion','septillion','octillion', \
'nonillion','decillion','undecillion','duodecillion', \
'tredecillion','quattuordecillion','sexdecillion', \
'septendecillion','octodecillion','novemdecillion', \
'vigintillion']
words = []
if num==0: words.append('zero')
else:
numStr = '%d'%num
numStrLen = len(numStr)
groups = (numStrLen+2)/3
numStr = numStr.zfill(groups*3)
for i in range(0,groups*3,3):
h,t,u = int(numStr[i]),int(numStr[i+1]),int(numStr[i+2])
g = groups-(i/3+1)
if h>=1:
words.append(units[h])
words.append('hundred')
if t>1:
words.append(tens[t])
if u>=1: words.append(units[u])
elif t==1:
if u>=1: words.append(teens[u])
else: words.append(tens[t])
else:
if u>=1: words.append(units[u])
if (g>=1) and ((h+t+u)>0): words.append(thousands[g])
if join: return ' '.join(words)
return words
# Note that every integer greater than 100,000 has a repeated vowel
for i in range(100000):
word = numToWords(i)
if is_supervocalic(word):
print i, word
|
boisvert42/npr-puzzle-python
|
2017/0122_unusual_numbers.py
|
Python
|
cc0-1.0
| 2,574 | 0.029915 |
"""
Django settings for testproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n$(okl9n*#au0%^wxgu$c#x(f%lby3v_j)wuti&6q-nx_35uj6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
adamhaney/django-ipython-notebook-reports
|
testproject/testproject/settings.py
|
Python
|
mit
| 1,987 | 0 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rotor sensing parameters."""
from makani.config import mconfig
from makani.control import system_types as m
@mconfig.Config
def MakeParams():
common_rotor_sensor = {
# Calibration for rotor speed and torque. This applies both to
# the speed sensed by and commanded to the motor controllers.
#
# TODO: The sign convention for rotor
# velocity should be reversed both here and in the motor
# controller.
'omega_cal': {'scale': -1.0, 'bias': 0.0, 'bias_count': 0},
'torque_cal': {'scale': -1.0, 'bias': 0.0, 'bias_count': 0},
}
return [common_rotor_sensor for _ in range(m.kNumMotors)]
|
google/makani
|
config/m600/rotor_sensors.py
|
Python
|
apache-2.0
| 1,234 | 0.001621 |
# # -*- coding: utf-8“ -*-
# from datetime import date, time, datetime, timedelta
# from dateutil.relativedelta import relativedelta
#
# from django.conf import settings
# from django.core.urlresolvers import reverse
# from django.test import TestCase
#
# from eventtools.utils import datetimeify
# from eventtools_testapp.models import *
#
# from _fixture import bigfixture, reload_films
# from _inject_app import TestCaseWithApp as AppTestCase
#
# class TestViews(AppTestCase):
#
# def setUp(self):
# if hasattr(settings, 'OCCURRENCES_PER_PAGE'):
# self._old_OCCURRENCES_PER_PAGE = settings.OCCURRENCES_PER_PAGE
# settings.OCCURRENCES_PER_PAGE = 20
# super(TestViews, self).setUp()
#
# def tearDown(self):
# if hasattr(self, '_old_OCCURRENCES_PER_PAGE'):
# settings.OCCURRENCES_PER_PAGE = self._old_OCCURRENCES_PER_PAGE
# else:
# delattr(settings, 'OCCURRENCES_PER_PAGE')
# super(TestViews, self).tearDown()
#
# def test_purls(self):
# """
# An occurrence has a pURL based on its id.
# You can view a page for an occurrence.
# """
#
# e = self.daily_tour
# o = e.occurrences.all()[0]
#
# #occurrence page
# ourl = reverse('occurrence', args=(o.id,))
# self.assertEqual(o.get_absolute_url(), ourl)
# self.assertTrue(str(o.id) in ourl)
# r1 = self.client.get(ourl)
# self.assertEqual(r1.status_code, 200)
#
# self.assertContains(r1, "Daily Tour")
# self.assertContains(r1, "1 January 2010")
# self.assertNotContains(r1, "00:00")
# self.assertNotContains(r1, "12am")
# self.assertNotContains(r1, "midnight")
#
# e2 = self.weekly_talk
# ourl = reverse('occurrence', args=(e2.occurrences.all()[0].id,))
# r1 = self.client.get(ourl)
# self.assertContains(r1, "Weekly Talk")
# self.assertContains(r1, "1 January 2010, 10am–noon")
#
# def test_list_view(self):
# """
# You can view a paginated list of occurrences for an event qs, following a given day, using ?startdate=2010-10-22&page=2.
# Each page shows n=20 occurrences and paginates by that amount.
# The occurrences are in chronological order.
# The times of all-day events do not appear.
# If there are no events in a given day, the day is not shown.
# The occurrences are grouped by day (and thus a day's occurrences may span several pages - this makes computation easier).
# TODO if a day is unfinished, show 'more on page n+1'..
# If there are no events in a given page, a 'no events match' message is shown.
# """
# url = reverse('occurrence_list',)
# r = self.client.get(url, {'startdate':'2010-01-01'})
# self.assertEqual(r.context['occurrence_pool'].count(), 109)
# self.assertEqual(len(r.context['occurrence_page']), 20)
# self.assertEqual(r.context['occurrence_page'][0].start.date(), date(2010,1,1))
#
# #check results in chrono order
# d = r.context['occurrence_pool'][0].start
# for occ in r.context['occurrence_pool']:
# self.assertTrue(occ.start >= d)
# d = occ.start
#
# #should have some pagination (6 pages)
# self.assertNotContains(r, "Earlier") #it's the first page
# self.assertContains(r, "Later")
# self.assertContains(r, "Showing 1–20 of 109")
#
# self.assertContains(r, "Friday, 1 January 2010", 1) #only print the date once
# self.assertNotContains(r, "Saturday, 2 January 2010") #there are no events
# self.assertContains(r, "Sunday, 3 January 2010", 1) #only print the date once
#
# self.assertContains(r, "10am–​noon")
# self.assertNotContains(r, "12am")# these are all-day
# self.assertNotContains(r, "00:00")# these are all-day
# self.assertNotContains(r, "midnight") # these are all-day
#
# #doesn't matter how far back you go.
# r2 = self.client.get(url, {'startdate':'2000-01-01'})
# self.assertEqual(list(r.context['occurrence_pool']), list(r2.context['occurrence_pool']))
#
# #links
# o = r.context['occurrence_page'][0]
# ourl = reverse('occurrence', args=(o.id,))
# self.assertContains(r, ourl)
#
# #show a 'not found' message
# r = self.client.get(url, {'startdate':'2020-01-01'})
# self.assertEqual(r.context['occurrence_page'].count(), 0)
# self.assertContains(r, "Sorry, no events were found")
# self.assertNotContains(r, "Earlier")
# self.assertNotContains(r, "Later")
# self.assertNotContains(r, "Showing")
# self.assertEqual(r.status_code, 200) #not 404
#
#
# def test_date_range_view(self):
# """
# You can show all occurrences between two days on one page, by adding ?enddate=2010-10-24. Pagination adds or subtracts the difference in days (+1 - consider a single day) to the range.
# For some ranges, pagination is by a different amount:
# TODO: Precisely a month (paginate by month)
# TODO: Precisely a year (paginate by year)
# """
#
# url = reverse('occurrence_list',)
# r = self.client.get(url, {'startdate':'2010-01-01', 'enddate':'2010-01-05'})
# self.assertEqual(r.context['occurrence_pool'].count(), 109)
# self.assertEqual(len(r.context['occurrence_page']), 5)
# self.assertEqual(r.context['occurrence_page'][0].start.date(), date(2010,1,1))
# self.assertEqual(r.context['occurrence_page'].reverse()[0].start.date(), date(2010,1,5))
#
# self.assertContains(r, "Showing 1–5 January 2010")
# self.assertContains(r, '<a href="?startdate=2009-12-27&enddate=2009-12-31">Earlier</a>')
# self.assertContains(r, '<a href="?startdate=2010-01-06&enddate=2010-01-10">Later</a>')
#
# r = self.client.get(url, {'startdate':'2010-01-01', 'enddate':'2010-01-31'})
# self.assertContains(r, "Showing January 2010")
# # self.assertContains(r, '<a href="?datefrom=2009-12-01&dateto=2009-12-31">December 2009</a>')
# # self.assertContains(r, '<a href="?datefrom=2010-02-01&dateto=2010-02-28">February 2010</a>')
#
# def test_event_view(self):
# """
# You can view a paginated list of occurrences for an event.
# """
# #event page
# e = self.daily_tour
# eurl = reverse('event', kwargs={'event_slug': e.slug})
# self.assertEqual(e.get_absolute_url(), eurl)
# r3 = self.client.get(eurl, {'page': 2})
# self.assertEqual(r3.status_code, 200)
#
# #should have some pagination (3 pages)
# self.assertEqual(r3.context['occurrence_page'].count(), 20)
# self.assertContains(r3, "Earlier")
# self.assertContains(r3, "Later")
# self.assertContains(r3, "Showing 21–40 of 49")
#
# def test_ical(self):
# """
# You can view an ical for an occurrence.
# The ical is linked from the occurrence page.
# You can view an ical for a collection of occurrences.
# (TODO: do large icals perform well? If not we might have to make it a feed.)
# """
# e = self.daily_tour
# o = e.occurrences.all()[0]
#
# o_url = reverse('occurrence', kwargs={'occurrence_id': o.id })
# o_ical_url = reverse('occurrence_ical', kwargs={'occurrence_id': o.id })
# r = self.client.get(o_ical_url)
# self.assertEqual(r.status_code, 200)
#
# self.assertContains(r, "BEGIN:VCALENDAR", 1)
# self.assertContains(r, "BEGIN:VEVENT", 1)
#
# self.assertContains(r, "SUMMARY:Daily Tour", 1)
# self.assertContains(r, "DTSTART;VALUE=DATE:20100101", 1)
# self.assertContains(r, "DTEND;VALUE=DATE:20100101", 1)
# self.assertContains(r, "URL:http://testserver%s" % o_url, 1)
# # etc.
#
# #Multiple occurrences
# e_ical_url = reverse('event_ical', kwargs={'event_slug': e.slug })
# r = self.client.get(e_ical_url)
# self.assertEqual(r.status_code, 200)
#
# self.assertContains(r, "BEGIN:VCALENDAR", 1)
# self.assertContains(r, "BEGIN:VEVENT", 49)
# self.assertContains(r, "SUMMARY:Daily Tour", 49)
# self.assertContains(r, "DTSTART;VALUE=DATE:20100101", 1)
# self.assertContains(r, "DTEND;VALUE=DATE:20100101", 1)
#
# def test_hcal(self):
# """
# The occurrence page uses hCalendar microformat.
# The occurrence listing page uses hCalendar microformat.
# """
#
# def test_feeds(self):
# """
# You can view an RSS feed for an iterable of occurrences.
# """
#
# """
# CALENDAR
#
# A template tag shows a calendar of eventoccurrences in a given month.
#
# Calendar's html gives classes for 'today', 'date selection', 'has_events', 'no_events', 'prev_month' 'next_month'.
#
# Calendar optionally shows days.
#
# Calendar optionally hides leading or trailing empty weeks.
#
# Calendar can optionally navigate to prev/next months, which set a start_date to the 1st of the next month.
#
#
#
# API (TODO)
#
# """
|
ixc/glamkit-eventtools
|
eventtools/tests/views.py
|
Python
|
bsd-3-clause
| 9,613 | 0.006971 |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
class CompoundDataPlugValueWidget( GafferUI.CompoundPlugValueWidget ) :
def __init__( self, plug, collapsed=True, label=None, summary=None, editable=True, **kw ) :
GafferUI.CompoundPlugValueWidget.__init__( self, plug, collapsed, label, summary, **kw )
self.__editable = True
self.__footerWidget = None
def _childPlugWidget( self, childPlug ) :
return _MemberPlugValueWidget( childPlug, self._label( childPlug ) )
def _footerWidget( self ) :
if self.__footerWidget is not None :
return self.__footerWidget
if self.__class__ is CompoundDataPlugValueWidget : # slight hack so that SectionedCompoundDataPlugValueWidget doesn't get a plus button
self.__footerWidget = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
self.__footerWidget.append( GafferUI.Spacer( IECore.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) ) )
self.__footerWidget.append(
GafferUI.MenuButton( image="plus.png", hasFrame=False, menu=GafferUI.Menu( self.__addMenuDefinition() ) )
)
self.__footerWidget.append( GafferUI.Spacer( IECore.V2i( 1 ), IECore.V2i( 999999, 1 ) ), expand = True )
return self.__footerWidget
## May be reimplemented by derived classes to return a suitable label
# for the member represented by childPlug.
def _label( self, childPlug ) :
if not childPlug.getFlags( Gaffer.Plug.Flags.Dynamic ) :
return childPlug["name"].getValue()
return None
def __addMenuDefinition( self ) :
result = IECore.MenuDefinition()
result.append( "/Add/Bool", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.BoolData( False ) ) } )
result.append( "/Add/Float", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.FloatData( 0 ) ) } )
result.append( "/Add/Int", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.IntData( 0 ) ) } )
result.append( "/Add/NumericDivider", { "divider" : True } )
result.append( "/Add/String", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.StringData( "" ) ) } )
result.append( "/Add/StringDivider", { "divider" : True } )
result.append( "/Add/V2i", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2iData( IECore.V2i( 0 ) ) ) } )
result.append( "/Add/V3i", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3iData( IECore.V3i( 0 ) ) ) } )
result.append( "/Add/V2f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2fData( IECore.V2f( 0 ) ) ) } )
result.append( "/Add/V3f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3fData( IECore.V3f( 0 ) ) ) } )
result.append( "/Add/VectorDivider", { "divider" : True } )
result.append( "/Add/Color3f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.Color3fData( IECore.Color3f( 0 ) ) ) } )
result.append( "/Add/Color4f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.Color4fData( IECore.Color4f( 0, 0, 0, 1 ) ) ) } )
return result
def __addItem( self, name, value ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
self.getPlug().addOptionalMember( name, value, enabled=True )
class _MemberPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, childPlug, label=None ) :
self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, self.__row, childPlug )
if label is not None or not childPlug.getFlags( Gaffer.Plug.Flags.Dynamic ) :
nameWidget = GafferUI.LabelPlugValueWidget(
childPlug,
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right,
verticalAlignment = GafferUI.Label.VerticalAlignment.Top,
)
if label is not None :
nameWidget.label().setText( label )
nameWidget.label()._qtWidget().setFixedWidth( GafferUI.PlugWidget.labelWidth() )
else :
nameWidget = GafferUI.StringPlugValueWidget( childPlug["name"] )
nameWidget.textWidget()._qtWidget().setFixedWidth( GafferUI.PlugWidget.labelWidth() )
self.__row.append( nameWidget )
if "enabled" in childPlug :
self.__row.append(
GafferUI.BoolPlugValueWidget(
childPlug["enabled"],
displayMode = GafferUI.BoolWidget.DisplayMode.Switch
)
)
self.__row.append( GafferUI.PlugValueWidget.create( childPlug["value"] ), expand = True )
self._updateFromPlug()
def setPlug( self, plug ) :
GafferUI.PlugValueWidget.setPlug( self, plug )
if isinstance( self.__row[0], GafferUI.LabelPlugValueWidget ) :
self.__row[0].setPlug( plug )
else :
self.__row[0].setPlug( plug["name"] )
if "enabled" in plug :
self.__row[1].setPlug( plug["enabled"] )
self.__row[-1].setPlug( plug["value"] )
def hasLabel( self ) :
return True
def childPlugValueWidget( self, childPlug, lazy=True ) :
for w in self.__row :
if w.getPlug().isSame( childPlug ) :
return w
return None
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
for w in self.__row :
w.setReadOnly( readOnly )
def _updateFromPlug( self ) :
if "enabled" in self.getPlug() :
with self.getContext() :
enabled = self.getPlug()["enabled"].getValue()
if isinstance( self.__row[0], GafferUI.StringPlugValueWidget ) :
self.__row[0].setEnabled( enabled )
self.__row[-1].setEnabled( enabled )
GafferUI.PlugValueWidget.registerType( Gaffer.CompoundDataPlug.staticTypeId(), CompoundDataPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.CompoundDataPlug.MemberPlug.staticTypeId(), _MemberPlugValueWidget )
|
davidsminor/gaffer
|
python/GafferUI/CompoundDataPlugValueWidget.py
|
Python
|
bsd-3-clause
| 7,763 | 0.064666 |
#!/usr/bin/env python
import datetime
from run_utils import *
class TestSuite(object):
def __init__(self, options, cache):
self.options = options
self.cache = cache
self.nameprefix = "opencv_" + self.options.mode + "_"
self.tests = self.cache.gatherTests(self.nameprefix + "*", self.isTest)
def getOS(self):
return getPlatformVersion() or self.cache.getOS()
def getHardware(self):
res = []
if self.cache.getArch() in ["x86", "x64"] and self.cache.withCuda():
res.append("CUDA")
return res
def getLogName(self, app, timestamp):
app = self.getAlias(app)
rev = self.cache.getGitVersion()
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.strftime("%Y%m%d-%H%M%S")
if self.options.longname:
small_pieces = [self.getOS(), self.cache.getArch()] + self.cache.getDependencies() + self.getHardware() + [self.cache.getSIMDFeatures()]
big_pieces = [app, str(rev), timestamp, "_".join([p for p in small_pieces if p])]
l = "__".join(big_pieces)
else:
pieces = [app, self.cache.getOS(), self.cache.getArch()] + self.getHardware() + [rev, timestamp]
lname = "_".join([p for p in pieces if p])
lname = re.sub(r'[\(\)\[\]\s,]', '_', lname)
l = re.sub(r'_+', '_', lname)
return l + ".xml"
def listTests(self, short = False, main = False):
if len(self.tests) == 0:
raise Err("No tests found")
for t in self.tests:
if short:
t = self.getAlias(t)
if not main or self.cache.isMainModule(t):
log.info("%s", t)
def getAlias(self, fname):
return sorted(self.getAliases(fname), key = len)[0]
def getAliases(self, fname):
def getCuts(fname, prefix):
# filename w/o extension (opencv_test_core)
noext = re.sub(r"\.(exe|apk)$", '', fname)
# filename w/o prefix (core.exe)
nopref = fname
if fname.startswith(prefix):
nopref = fname[len(prefix):]
# filename w/o prefix and extension (core)
noprefext = noext
if noext.startswith(prefix):
noprefext = noext[len(prefix):]
return noext, nopref, noprefext
# input is full path ('/home/.../bin/opencv_test_core') or 'java'
res = [fname]
fname = os.path.basename(fname)
res.append(fname) # filename (opencv_test_core.exe)
for s in getCuts(fname, self.nameprefix):
res.append(s)
if self.cache.build_type == "Debug" and "Visual Studio" in self.cache.cmake_generator:
res.append(re.sub(r"d$", '', s)) # MSVC debug config, remove 'd' suffix
log.debug("Aliases: %s", set(res))
return set(res)
def getTest(self, name):
# return stored test name by provided alias
for t in self.tests:
if name in self.getAliases(t):
return t
raise Err("Can not find test: %s", name)
def getTestList(self, white, black):
res = [t for t in white or self.tests if self.getAlias(t) not in black]
if len(res) == 0:
raise Err("No tests found")
return set(res)
def isTest(self, fullpath):
if fullpath == "java":
return True
if not os.path.isfile(fullpath):
return False
if self.cache.getOS() == "nt" and not fullpath.endswith(".exe"):
return False
return os.access(fullpath, os.X_OK)
def wrapInValgrind(self, cmd = []):
if self.options.valgrind:
res = ['valgrind']
if self.options.valgrind_supp:
res.append("--suppressions=%s" % self.options.valgrind_supp)
res.extend(self.options.valgrind_opt)
return res + cmd
return cmd
def runTest(self, path, logfile, workingDir, args = []):
args = args[:]
exe = os.path.abspath(path)
if path == "java":
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type, "buildAndTest"]
ret = execute(cmd, cwd = self.cache.java_test_binary_dir + "/.build")
return None, ret
else:
if isColorEnabled(args):
args.append("--gtest_color=yes")
cmd = self.wrapInValgrind([exe] + args)
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
tempDir.init()
log.warning("Run: %s" % " ".join(cmd))
ret = execute(cmd, cwd = workingDir)
tempDir.clean()
hostlogpath = os.path.join(workingDir, logfile)
if os.path.isfile(hostlogpath):
return hostlogpath, ret
return None, ret
def checkPrerequisites(self):
if self.cache.getArch() == "x64" and hostmachine == "x86":
raise Err("Target architecture is incompatible with current platform")
def runTests(self, tests, black, workingDir, args = []):
self.checkPrerequisites()
args = args[:]
logs = []
test_list = self.getTestList(tests, black)
date = datetime.datetime.now()
if len(test_list) != 1:
args = [a for a in args if not a.startswith("--gtest_output=")]
ret = 0
for test in test_list:
more_args = []
exe = self.getTest(test)
userlog = [a for a in args if a.startswith("--gtest_output=")]
if len(userlog) == 0:
logname = self.getLogName(exe, date)
more_args.append("--gtest_output=xml:" + logname)
else:
logname = userlog[0][userlog[0].find(":")+1:]
log.debug("Running the test: %s (%s) ==> %s in %s", exe, args + more_args, logname, workingDir)
if self.options.dry_run:
logfile, r = None, 0
else:
logfile, r = self.runTest(exe, logname, workingDir, args + more_args)
log.debug("Test returned: %s ==> %s", r, logfile)
if r != 0:
ret = r
if logfile:
logs.append(os.path.relpath(logfile, workingDir))
return logs, ret
#===================================================================================================
if __name__ == "__main__":
log.error("This is utility file, please execute run.py script")
|
DamianPilot382/Rubiks-Cube-Solver
|
opencv/sources/modules/ts/misc/run_suite.py
|
Python
|
apache-2.0
| 6,541 | 0.005198 |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from snf_django.management.commands import SynnefoCommand, CommandError
from optparse import make_option
from synnefo.management import common
from synnefo.plankton.backend import PlanktonBackend
from snf_django.management import utils
class Command(SynnefoCommand):
args = "<snapshot_id>"
help = "Display available information about a snapshot"
option_list = SynnefoCommand.option_list + (
make_option(
'--user',
dest='userid',
default=None,
help="The UUID of the owner of the snapshot. Required"
"if snapshot is not public"),
make_option(
'--public',
dest='public',
default=False,
action="store_true",
help="Use this option if the snapshot is public"),
)
@common.convert_api_faults
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Please provide a snapshot ID")
snapshot_id = args[0]
userid = options["userid"]
public = options["public"]
if (userid is None) and (public is False):
raise CommandError("'user' option or 'public' option is required")
try:
with PlanktonBackend(userid) as backend:
snapshot = backend.get_snapshot(snapshot_id)
except:
raise CommandError("An error occurred, verify that snapshot and "
"user ID are valid")
utils.pprint_table(out=self.stdout, table=[snapshot.values()],
headers=snapshot.keys(), vertical=True)
|
apyrgio/synnefo
|
snf-cyclades-app/synnefo/volume/management/commands/snapshot-show.py
|
Python
|
gpl-3.0
| 2,301 | 0.000435 |
#!/usr/bin/env python
import re
import sys
snappy_ver = "v3.0"
html_header_string = """\
<html>
<head>
<title>Snappy Assembly</title>
<style>
BODY {
margin-bottom: 200px;
}
TABLE TD {
vertical-align: middle;
}
H2 {
margin-bottom: 5px;
margin-top: 24px;
font-size: 20pt;
}
LI.section {
font-size: 20pt;
font-weight: bold;
}
H3 {
margin-left: -15px;
margin-bottom: 5px;
margin-top: 18px;
font-size: 16pt;
}
LI.step {
padding-left: 15px;
margin-left: 0;
font-size: 16pt;
font-weight: bold;
list-style-type: none;
}
DIV.desc {
margin-bottom: 15px;
font-size: 12pt;
font-weight: normal;
}
OL {
margin-left: 30px;
}
UL {
padding-left: 5px;
}
</style>
</head>
<body>
<h1>Snappy RepRap Assembly Instructions</h1>
<ol>
"""
class GenAssemblyIndex(object):
indexfile = "docs/assembly/index.html"
markdownfile = "wiki/{0}-Assembly.md".format(snappy_ver)
sourcefile = "full_assembly.scad"
modules = []
modinfo = {}
def write_index(self):
with open(self.indexfile, "w") as f:
f.write(html_header_string)
for mod_eng in self.modules:
f.write('<li class="section">')
f.write('<h2>{0}</h2>\n'.format(mod_eng))
stepcnt = len(self.modinfo[mod_eng])
if stepcnt > 1:
f.write('<ul>\n')
for stepinfo in self.modinfo[mod_eng]:
if stepcnt > 1:
f.write('<li class="step">')
f.write('<h3>Step {step}</h3>\n'.format(**stepinfo))
f.write(
'<div class="desc">{desc}</div>\n'
'<table>'
'<tr>'
'<td class="befor">'
'<img src="{module}_before.png">'
'</td>'
'<td class="arrow"><img src="arrow.png"></td>'
'<td class="after"><img src="{module}_after.png"></td>'
'</tr>'
'</table>\n'
.format(**stepinfo)
)
if stepcnt > 1:
f.write('</li>\n')
if stepcnt > 1:
f.write('</ul>\n')
f.write('</li>\n')
f.write('<li class="section">\n')
f.write('<h2>{0}</h2>\n'.format("RAMPS Wiring"))
f.write('<div class="desc">\n')
f.write('<p>Heres a diagram of what needs to be connected where on a RAMPS 1.4 controller board.</p>\n')
f.write('<p><a href="RAMPS_Wiring_For_Snappy.png"><img width="800" height="600" src="RAMPS_Wiring_For_Snappy.png"></a></p>')
f.write('<p>Click on the image to enlarge.</p>\n\n')
f.write('</div>\n')
f.write('</li>\n')
f.write('<li class="section">\n')
f.write('<h2>{0}</h2>\n'.format("Marlin Firmware for RAMPS"))
f.write('<div class="desc">\n')
f.write('You can find Marlin firmware pre-configured for the Snappy with a RAMPS 1.4 controller at\n')
f.write('<a href="https://github.com/revarbat/snappy-reprap/tree/v3.0/firmware">https://github.com/revarbat/snappy-reprap/tree/v3.0/firmware</a>\n')
f.write('</div>\n')
f.write('</li>\n')
f.write('</ol>\n')
f.write('</body>\n')
f.write('</html>\n')
def write_markdown(self):
with open(self.markdownfile, "w") as f:
f.write("# Snappy RepRap Assembly Instructions\n\n")
for mod_eng in self.modules:
f.write('## {0}\n\n'.format(mod_eng))
stepcnt = len(self.modinfo[mod_eng])
for stepinfo in self.modinfo[mod_eng]:
stepinfo['base'] = (
'https://raw.githubusercontent.com/'
'revarbat/snappy-reprap/{0}/docs/assembly/'
).format(snappy_ver)
if stepcnt > 1:
f.write('### Step {step}\n\n'.format(**stepinfo))
f.write(
'{desc}\n\n'
'Before | After\n'
'------ | -----\n'
'![{module} Step {step} Before]'
'({base}{module}_before.png) | '
'![{module} Step {step} After]'
'({base}{module}_after.png)\n\n'
.format(**stepinfo)
)
f.write('## {0}\n\n'.format("RAMPS Wiring"))
f.write('Heres a diagram of what needs to be connected where on a RAMPS 1.4 controller board.\n\n')
f.write('[]({0}-RAMPS_Wiring_For_Snappy.png)\n'.format(snappy_ver))
f.write('Click on the image to enlarge.\n\n')
f.write('## {0}\n\n'.format("Marlin Firmware for RAMPS"))
f.write('You can find Marlin firmware pre-configured for the Snappy with a RAMPS 1.4 controller at\n')
f.write('https://github.com/revarbat/snappy-reprap/tree/{0}/firmware\n'.format(snappy_ver))
def process_module(self, module, desc):
print("module: {0}".format(module))
step = 1
mod_eng = module.replace('_', ' ') \
.title() \
.replace('Xy', 'XY') \
.replace('Yz', 'YZ')
mod_split = mod_eng.split(" ")
if mod_split[-1].isdigit():
step = int(mod_split[-1])
mod_eng = " ".join(mod_split[:-1])
if mod_eng not in self.modules:
self.modules.append(mod_eng)
self.modinfo[mod_eng] = [
{
'module': module,
'step': step,
'desc': desc
},
]
else:
self.modinfo[mod_eng].append(
{
'module': module,
'step': step,
'desc': desc
},
)
def generate_index(self):
mod_re = re.compile(
r'module *([a-z_][a-z0-9_]*_assembly(_[0-9]+)?) *\('
)
desc_re = re.compile(r'// *desc: *(.*)$')
module = ""
desc = ""
with open(self.sourcefile, "r") as f:
for line in f.readlines():
mod_res = mod_re.search(line)
if mod_res:
if module:
self.process_module(module, desc)
module = mod_res.group(1)
desc = ""
desc_res = desc_re.search(line)
if desc_res:
desc += desc_res.group(1)
if module:
self.process_module(module, desc)
self.write_index()
self.write_markdown()
def main():
genidx = GenAssemblyIndex()
genidx.generate_index()
sys.exit(0)
if __name__ == "__main__":
main()
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
revarbat/snappy-reprap
|
gen_assembly_index.py
|
Python
|
gpl-2.0
| 7,135 | 0.001261 |
#!/usr/bin/python
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
|
moosemaniam/learning
|
ud120-projects/k_means/k_means_cluster.py
|
Python
|
cc0-1.0
| 2,570 | 0.019066 |
# -*- coding: utf-8 -*-
from django.http import HttpResponse,HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django import forms
from django.forms import ModelForm
from django.db.models import Q, F, Avg, Max, Min, Count
from django.utils import simplejson
from django.utils.http import urlencode
from django.contrib.auth.decorators import login_required, user_passes_test
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist, FieldError, MultipleObjectsReturned
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.utils.translation import ugettext as _ , activate
from django.core.mail import send_mail
from django.views.decorators.cache import cache_page
import re
import datetime
from urlparse import urlparse
from django.utils.http import urlquote
from notebook.notes.models import Note, Tag, create_model, WorkingSet, getW
from notebook.bookmarks.models import Bookmark
from notebook.scraps.models import Scrap
from notebook.social.models import *
from notebook.areas.models import Area, Area_Group
from notebook.notes.views import User, getT, getlogger, getFolder, get_public_notes, __get_folder_context, __get_pre_url
from notebook.notes.views import getSearchResults, __getQStr, __get_view_theme, Pl, ALL_VAR, __get_lang
from notebook.notes.util import *
from notebook.notes.constants import *
from notebook.notebook_data.data import *
log = getlogger('social.views')
#TODO: make this the same as in notebook.notes.views
#TODO:belows seems not useful at all. Think of removing them
#book_entry_dict = {'notebook':'note', 'snippetbook':'snippet','bookmarkbook':'bookmark', 'scrapbook': 'scrap'}
#below are not used except in wall implementation, which is commented out for now
#===============================================================================
# def getN(username):
# return create_model("N_"+str(username), Note, username)
#
# def getB(username):
# return create_model("B_"+str(username), Bookmark, username)
#
# def getS(username):
# return create_model("S_"+str(username), Scrap, username)
#===============================================================================
#TODO: for now, social db is not used. Instead default is used to put those social data because social data needs user which is in default db
#otherwise, need to copy the user table over to social
G = Group
SN = Social_Note
ST = Social_Tag
import notebook.settings as settings
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
class AddGroupForm(ModelForm):
error_css_class = 'error'
required_css_class = 'required'
class Meta:
model = Group
class EditGroupForm(ModelForm):
error_css_class = 'error'
required_css_class = 'required'
class Meta:
model = Group
exclude = ('tags', 'members', 'creator', 'admins')
#fields = ('type_of_linkage','desc','tags','title','private',
# 'attachment','notes')
#===============================================================================
#
#
# @login_required
# def wall_snippets(request, username):
# N = getN(username)
# ns = N.objects.all().order_by('-init_date')[:30]
# #get groups for this user
# gs = G.objects.filter(members__username=username)
# return render_to_response('social/wall_snippets.html', {'ns':ns, 'gs':gs, 'profile_username':username, 'book_name': 'snippets'}, context_instance=RequestContext(request))
#
#
# @login_required
# def wall_bookmarks(request, username):
# N = getN(username)
# ns = N.objects.all().order_by('init_date')[:30]
# #get groups for this user
# gs = G.objects.filter(members__username=username)
# return render_to_response('social/wall_snippets.html', {'ns':ns, 'gs':gs, 'profile_username':username, 'book_name': 'snippets'}, context_instance=RequestContext(request))
#
#
# @login_required
# def wall_scraps(request, username):
# N = getN(username)
# ns = N.objects.all().order_by('init_date')[:30]
# #get groups for this user
# gs = G.objects.filter(members__username=username)
# return render_to_response('social/wall_snippets.html', {'ns':ns, 'gs':gs, 'profile_username':username, 'book_name': 'snippets'}, context_instance=RequestContext(request))
#
#
#
# @login_required
# def wall(request, username):
# N = getN(username)
# B = getB(username)
# S = getS(username)
# today = datetime.date.today()
# yesterday = today - datetime.timedelta(1)
# day_bf_yest = today - datetime.timedelta(2)
# ns = N.objects.all().order_by('init_date')[:10]
# bs = B.objects.all().order_by('init_date')[:10]
# ss = S.objects.all().order_by('init_date')[:10]
#
# #get groups for this user
# gs = G.objects.filter(members__username=username)
#
# return render_to_response('social/wall.html', {'ns':ns, 'bs':bs, 'ss':ss, 'gs':gs}, context_instance=RequestContext(request))
#===============================================================================
@login_required
def group_index(request, groupid):
return HttpResponseRedirect('/groups/'+groupid+'/snippetbook/notes/')
def get_groups_created_by_self(request, username):
if username == request.user.username:
gs_created_by_self = G.objects.filter(admins__username=username)
else:
gs_created_by_self = G.objects.filter(admins__username=username, private=False)
return gs_created_by_self
def get_groups_following(request, username):
if username == request.user.username:
gs_created_by_self = G.objects.filter(members__username=username).exclude(admins__username=username)
else:
gs_created_by_self = G.objects.filter(members__username=username, private=False).exclude(admins__username=username)
return gs_created_by_self
def get_groups_list(request, username):
gs_created_by_self = get_groups_created_by_self(request, username)
gs_created_by_self_list = [g for g in gs_created_by_self]
gs_following = get_groups_following(request, username)
gs_following_list = [g for g in gs_following]
group_set = set(gs_created_by_self_list).union(set(gs_following_list))
return list(group_set)
def profile(request, username):
gs_created = get_groups_created_by_self(request, username)
gs_following = get_groups_following(request, username)
profile_member = get_object_or_404(Member, username=username)
areas = Area.objects.using(username).filter(private=False)
return render_to_response('social/profile.html', {'gs_created':gs_created, 'gs_following':gs_following, \
'profile_user':User.objects.get(username=username), \
'profile_member':profile_member, 'profile_username':username,\
'areas':areas}, context_instance=RequestContext(request))
@login_required
def friends(request, username):
profile_member = get_object_or_404(Member, username=username)
friends = profile_member.get_friends()
sorted_members = [[m, m.get_public_notes_count()] for m in friends]
sorted_members.sort(key=lambda r:r[1],reverse = True)
if (not request.user.is_anonymous()) and request.user.username == username:
Notice.objects.filter(notice_type__label='friends_add', recipient=request.user).update(unseen=False)
return render_to_response('social/friends.html', { 'profile_username':username, 'friends':sorted_members}, context_instance=RequestContext(request, {}))
@login_required
def friends_notes2(request, username, bookname):
#print 'username:',username
#print 'bookname:',bookname
friends = request.user.member.get_friends()
q = Q(owner__in=friends, private=False)
note_list = getSN(bookname).objects.filter(q)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#tags = get_group_tags(request, groupname, bookname)
return render_to_response('social/notes/friends_notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'tags':None, 'qstr':qstr, \
'appname':'friends', 'cl':cl, 'profile_username':username},\
context_instance=RequestContext(request, {'book_uri_prefix':'/'+username+'/friends',
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all'),
'pick_lang': __get_lang(request)
}))
#notes of all users
def all_notes(request, bookname):
note_list = getSN(bookname).objects.filter(private=False)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
return render_to_response('social/notes/all_notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'tags':None, 'qstr':qstr, \
'appname':'all', 'cl':cl},\
context_instance=RequestContext(request, {'book_uri_prefix':'/all',
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all'),
'pick_lang': __get_lang(request)
}))
def learners(request):
members = Member.objects.filter(is_active=True)#.order_by('-get_public_notes_count')
sorted_members = [[m, m.get_public_notes_count()] for m in members if m.get_public_notes_count() > 10 and m.username not in ['test', 'guest']]
sorted_members.sort(key=lambda r:r[1],reverse = True)
return render_to_response('social/learners.html', {'learners':sorted_members}, \
context_instance=RequestContext(request))
#viewing all people
@user_passes_test(lambda u: u.username=='leon')
@login_required
def people(request):
members = Member.objects.filter(is_active=True)#.order_by('-get_public_notes_count')
sorted_members = [[m, m.get_public_notes_count()] for m in members]
sorted_members.sort(key=lambda r:r[1],reverse = True)
return render_to_response('social/people.html', {'learners':sorted_members}, \
context_instance=RequestContext(request))
def groups(request):
gs = G.objects.filter(private=False).annotate(num_members=Count('members')).order_by('-num_members')
return render_to_response('social/group/groups.html', {'groups':gs}, \
context_instance=RequestContext(request))
#TODO: think of getting username arg, since you can get it from request. Also think of
# get rid of it in the url.py, changing to personal, or my
@login_required
def my_groups(request, username):
gs_created_by_self = get_groups_created_by_self(request, username)
gs_following = get_groups_following(request, username)
addGroupForm = AddGroupForm(initial={'admins': [username]})
T = getT(username)
tags = T.objects.filter(private=False)
#posting for adding a group
if request.method == 'POST':
post = request.POST.copy()
#print 'post', post
#TODO: move logic below to group.add_tags
tag_names = post.getlist('item[tags][]')
#print 'tag_names', tag_names
#if not tag_names:
# messages.error(request, _("No tags are entered!"))
# log.error("No tags are entered when generating a group!")
# return HttpResponseRedirect('/'+username+'/groups/')
#If tag doesn't exist yet, it will be created.
tag_ids = [ST.objects.get_or_create(name=tag_name)[0].id for tag_name in tag_names]
post.setlist('tags', tag_ids)
#TODO: check if the group already exists
g = G()
#TODO: whether new tags can be created in the social space?
f = AddGroupForm(post, instance=g)
if not f.is_valid():
log.debug("add group form errors:"+str(f.errors))
addGroupForm = f
else:
f.save()
#TODO: handel tags that are created in the social space (might need to push back to the
#personal space.)
#print 'newly created group name:', g.name
#add sharinggroup:groupname as a new tag for this group
gtn = "sharinggroup:"+g.name
st = ST(name=gtn, private=g.private)
st.save()
g.tags.add(st)
#TODO: might modify Group model's save method to push group tags all back to user personal space
#push this group tag back to the user's personal space
#T = getT(username=gtn)
#T.objects.get_or_create(name=gtn, private=g.private)
push_group_tags_back(g, request.user.username)
#TODO: add the creator to the admin?
#print 'tags:', tags
return render_to_response('social/group/mygroups.html', {'gs_created_by_self':gs_created_by_self, 'gs_following':gs_following,\
'addGroupForm':addGroupForm, 'tags':tags, 'profile_username':username}, \
context_instance=RequestContext(request))
@login_required
@cache_page(30)
def groups_notes(request, username, bookname):
group_list = get_groups_list(request, username)
tag_names = [tag.name for tag in group_list[0].tags.all()]
q0a = Q(tags__name__in=tag_names, owner__in=group_list[0].members.all(), private=False)
q0b = Q(tags__name="sharinggroup:"+group_list[0].name, private=True)
q = q0a | q0b
for i in range(1, len(group_list)+1):
qa = Q(tags__name__in=tag_names, owner__in=group_list[0].members.all(), private=False)
qb = Q(tags__name="sharinggroup:"+group_list[0].name, private=True)
q = q | (qa | qb)
note_list = getSN(bookname).objects.filter(q)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#tags = get_group_tags(request, groupname, bookname)
return render_to_response('social/notes/groups_notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'tags':None, 'qstr':qstr,\
'appname':'friends', 'cl':cl, 'profile_username':username},\
context_instance=RequestContext(request,
{'book_uri_prefix':'/'+username+'/groups', 'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all')}))
#TODO: so far automatically push all back. In the future, after for user confirmation
#push group tags back to the user's space, also create a working set with the group name
@login_required
def push_group_tags_back_to_delete(request, groupname):
username = request.user.username
T = getT(username)
g = Group.objects.get(name=groupname)
sts = g.tags.all()
W = getW(username)
w1, created = W.objects.get_or_create(name='snippetbook')
w2, created = W.objects.get_or_create(name='bookmarkbook')
w3, created = W.objects.get_or_create(name='scrapbook')
w, created = W.objects.get_or_create(name="sharinggroup:"+g.name)
for st in sts:
t, created = T.objects.get_or_create(name=st.name)
if created:
t.private = st.private
w1.tags.add(t)
w2.tags.add(t)
w3.tags.add(t)
w.tags.add(t)
@cache_page(100)
def notes(request, username, bookname):
#===============================================================================
# if 'framebook' == bookname:
# return frames(request, username, 'notebook')
#===============================================================================
#profile_user =
note_list = getSN(bookname).objects.filter(owner__username=username)
note_list_taken = getSN(bookname).objects.filter(social_note_taken__taker__username=username)
#print 'note_list_taken', note_list_taken
#print 'note_list size:',len(note_list)
note_list = note_list | note_list_taken #search across table,, so cannot be merged? TODO:
#print 'note_list size after merge:',len(note_list)
#print 'notelist obtained:', note_list
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#print 'paged_notes:',paged_notes
#For now, no tags in a user's social page. Later might bring the tags from user's own db and display here.
tags = []
#So far, get folders from users' personal space, but need to be public folders TODO:
F = getFolder(username, bookname)
#folders = F.objects.all()
# if request.user.username != username:
# log.debug('Not the owner, getting public folders only...')
folders = F.objects.filter(private=False).order_by('name')
profile_member = Member.objects.get(username=username)
pick_lang = __get_lang(request)
return render_to_response('social/include/notes/notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, 'pick_lang':pick_lang, \
'folders':folders, 'profile_username':username, 'profile_member':profile_member, 'appname':'social', 'cl':cl},\
context_instance=RequestContext(request, {'book_uri_prefix':'/social/'+username,
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all')}))
@cache_page(100)
def note(request, username, bookname, note_id):
log.debug('Getting the note:'+note_id)
N = getSN(bookname)
#TODO: if cannot find the note, tell the user such note doesn't exist and send an email to admin
try:
note = N.objects.filter(owner__username=username).get(id=note_id)
except (ObjectDoesNotExist, ValueError):
raise Http404
if note.private:
sharing_groups = [tag.name.split(':')[1] for tag in note.tags.all() if tag.name.startswith('sharinggroup:')]
#print 'sharing_groups:', sharing_groups
if request.user.is_anonymous() or not request.user.member.is_in_groups(sharing_groups):
#flash a msg here? TODO:
#messages.error(request, _("This is from a private group! You need to be a member to view it."))
return HttpResponseRedirect('/login?next='+request.get_full_path())
#if not request.user.member.is_in_groups(sharing_groups):
# raise Http404
#get the backlink
referer = request.META.get('HTTP_REFERER')
if referer:
r = urlparse(referer)
if r.hostname not in notebook_host_names:
snb, created = Social_Note_Backlink.objects.get_or_create(note=note, url=referer)
if 'framebook' == bookname:
return frame(request, username, bookname, note_id)
#===============================================================================
# print 'note type is:', note.get_note_type()
#
# #linkages = note.linkagenote_set.all()
# frames = None
# if note.get_note_type() != 'Frame':
# frames = note.in_frames.all() #notes_included??TODO:
#
notes_included = None
if note.get_note_type() == 'Frame':
notes_included = note.social_frame.notes.all()
# print 'notes_included:', notes_included
#===============================================================================
pick_lang = __get_lang(request)
profile_member = Member.objects.get(username=username)
return render_to_response('social/include/notes/note/note.html', {'note':note,\
#'frames':frames, \
'notes_included':notes_included,\
'profile_username':username,'profile_member':profile_member,\
'pick_lang':pick_lang, 'appname':'social'
},\
context_instance=RequestContext(request, {'bookname': bookname,'aspect_name':'notes',\
'book_uri_prefix':'/social/'+username}))
#===============================================================================
# #TODO: think of whether get rid of appname
# @login_required
# def frames(request, username, bookname):
# #TODO: allow filter on delete
#
# #TODO: get linkages according to bookname
#
#
# note_list = Social_Frame.objects.filter(owner__username=username, deleted=False)
#
#
# sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#
#
#
#
# return render_to_response('social/framebook/notes/notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
# 'profile_username':username, 'appname':'social', 'cl':cl},\
# context_instance=RequestContext(request))
#===============================================================================
#===============================================================================
#
# return render_to_response('framebook/notes/notes.html', {'note_list': paged_notes,
# #'tags':tags,
# 'view_mode':view_mode,
# 'sort':sort, 'delete':delete, 'private':private, 'day':now.day, 'month':now.month, 'year':now.year, 'cl':cl,
# 'folders':folders,'qstr':qstr, 'profile_username':username, 'aspect_name':'linkagenotes', 'date_range':date_range,
# 'order_type':order_type, 'with_attachment':with_attachment, 'users':User.objects.all(),
# 'current_ws':request.session.get("current_ws", None),'included_aspect_name':'notes'},
# context_instance=RequestContext(request,{'bookname': bookname,}))
#===============================================================================
@cache_page(300)
def frame(request, username, bookname, frame_id):
frame = Social_Frame.objects.get(owner__username=username, id=frame_id)
if frame.private:
sharing_groups = [tag.name.split(':')[1] for tag in frame.tags.all() if tag.name.startswith('sharinggroup:')]
#if request.user.is_anonymous() or not request.user.member.is_in_groups(sharing_groups):
if not request.user.member.is_in_groups(sharing_groups):
raise Http404
#===============================================================================
# if request.user.username == username:
# frame_notes_display = frame.display_notes()
# else:
# frame_notes_display = frame.display_public_notes()
# #tags of each note has to be added as below since it again needs to know which user database to use.
# #The same for note type TODO: but for Social_Frame, it actually is all in default db. So?
# for n in frame_notes_display:
# note_id = n[0]
# N = getSN('notebook')
# note = N.objects.get(id=note_id)
# type = note.get_note_type()
# n.append(type)
# n.append(note.get_tags())
# if type == 'Bookmark':
# n.append(note.social_bookmark.url)
# elif type == 'Scrap':
# n.append(note.social_scrap.url)
# else:
# n.append('')
#===============================================================================
sort = request.GET.get('sort')
if request.user.username == username:
notes_in_frame = frame.get_notes_in_order(sort)
else:
notes_in_frame = frame.get_public_notes_in_order(sort)
pick_lang = __get_lang(request)
profile_member = Member.objects.get(username=username)
return render_to_response('social/framebook/notes/note/note.html', {'note':frame, 'notes_in_frame':notes_in_frame,'sort':sort, \
#'frame_notes_display':frame_notes_display, \
'profile_username':username,'profile_member':profile_member, \
'pick_lang':pick_lang, 'appname':'social',\
'pagename':'note'}, \
context_instance=RequestContext(request,{'bookname': bookname,\
'book_uri_prefix':'/social/'+username}))
def folders(request, username, bookname, foldername):
F = getFolder(username, bookname)
#T = getT(username)
SN = getSN(bookname)
note_list = SN.objects.filter(owner__username=username)
if request.user.username != username:
log.debug( 'Not the owner of the notes requested, getting public notes only...')
#For the time being, still apply this. TODO:
note_list = get_public_notes(note_list)
qstr = ""
current_folder = None
if foldername:
current_folder = F.objects.get(name=foldername)
qstr = current_folder.value
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
folders = F.objects.filter(private=False).order_by('name')
return render_to_response('social/folders.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'folders':folders, 'is_in_folders':True, 'current_folder':current_folder,
'profile_username':username, 'appname':'social', 'cl':cl},\
context_instance=RequestContext(request,{'book_uri_prefix':'/social/'+username,
'note_type':bookname_note_type_dict.get(bookname),
'profile_member':Member.objects.get(username=username),}))
@login_required
def add_friend(request, username):
m1 = request.user.member
m2 = Member.objects.get(username=username)
f = Friend_Rel(friend1=m1, friend2=m2)
#So far, make it confirmed automcatically. TODO:
f.comfirmed = True
f.save()
notification.send([m2], "friends_add", {"from_user": request.user})
print 'adding friend notification sent'
return HttpResponseRedirect('/social/'+username+'/')
@login_required
def remove_friend(request, username):
m1 = request.user.member
m2 = Member.objects.get(username=username)
f = Friend_Rel.objects.get(friend1=m1, friend2=m2)
f.delete()
return HttpResponseRedirect('/social/'+username+'/')
#what to do with tags in the user space if the group is removed? I think it is to keep it there.
#But how about the special sharinggroup tag? Should be removed, right?
@login_required
def join_group(request, groupid):
user = request.user
group = G.objects.get(id=groupid)
group.members.add(user.member)
group.save()
push_group_tags_back(request, group.name)
return HttpResponseRedirect('/groups/'+groupid+'/snippetbook/notes/')
@login_required
def group_admin(request, groupid):
g = G.objects.get(id=groupid)
#TODO: move below to a decorator
if request.user.member not in g.admins.all():
return HttpResponse("You are not an admin of this group, and thus cannot admin this group.", mimetype="text/plain") #TODO: translate
tags = Social_Tag.objects.all().order_by('name')
users = User.objects.exclude(username__in=[m.username for m in g.members.all()])
editGroupForm = EditGroupForm(instance=g)
return render_to_response('social/admin/group.html', {'group':g,'tags':tags, 'users':users, 'editGroupForm':editGroupForm \
}, context_instance=RequestContext(request))
#delete a member won't remove the group:groupname workingset from member's personal space
@login_required
def group_delete_member(request, groupid):
#groupname = request.POST.get('group_name')
g = G.objects.get(id=groupid)
member_id = request.POST.get('member_id')
member = User.objects.get(id=member_id)
#TODO: move below to a decorator
#user can remove himself. But to remove group members other than himself. he has to be in the group admin
if request.user.member not in g.admins.all() and member.username != request.user.member.username:
#return HttpResponse("You are not an admin of this group, and thus cannot admin this group.", mimetype="text/plain")
return HttpResponse(simplejson.dumps({'type':'error','msg':_('You are not an admin of this group, and thus cannot admin this group.')}), "application/json")
g.members.remove(member)
g.admins.remove(member)
#return HttpResponse('successful', mimetype="text/plain")
#TODO: use this type of response for all returns that do not refresh the page, to notify the front page that what the result of processing is
return HttpResponse(simplejson.dumps({'type':'success','msg':_('You have successfully removed the member from the group.')}), "application/json")
#===below seems not used============================================================================
# @login_required
# def group_update_tags(request, groupname):
# tag_names = request.POST.getlist('item[tags][]')
# group = G.objects.get(name=groupname)
# for tag_name in tag_names:
# if not tag_name in group.get_tag_names():
# group_add_tags(request, groupname)
# for tag_name in group.get_tag_names():
# if not tag_name in tag_names:
# group_remove_tag(request, groupname)
#===============================================================================
@login_required
def update_group(request, groupid):
group = G.objects.get(id=groupid)
editGroupForm = EditGroupForm(request.POST, request.FILES, instance=group)
#TODO:record last modified by who?
#username = request.user.username
log.debug('form errors:'+str(editGroupForm.errors))
editGroupForm.save()
return HttpResponseRedirect('/groups/'+groupid+'/admin/')
#TODO: check if admin
@login_required
def group_add_users(request, groupid):
user_names = request.POST.getlist('item[tags][]')
#TODO:what is below for?
#tags = [ST.objects.get(name=tag_name).name for tag_name in tag_names]
if not user_names:
#TODO: give an error page, also validation on the form
messages.error(request, _("No users are entered!"))
group = G.objects.get(id=groupid)
for uname in user_names:
member = Member.objects.get(username=uname)
group.members.add(member)
#TODO:push_group_tags_back
if member.default_lang:
activate(member.default_lang)
url = urlquote('www.91biji.com/groups/' + groupid + '/')
content = _('You are added to the group ')+group.name+'\n\n'+\
_('You can visit this group at ')+ 'http://' + url +'\n\n'+\
_('If you do not want to join this group, you can remove yourself from this group on your groups page. ') +\
urlquote('www.91biji.com/'+uname+'/groups/')
send_mail(_('You are added to group ')+group.name, content.encode('utf-8'), u'sys@opensourcelearning.org', [member.email])
if request.user.member.default_lang:
activate(request.user.member.default_lang)
messages.success(request, _("You have successfully added the user to the group!"))
return HttpResponseRedirect('/groups/'+groupid+'/admin/')
#TODO: check if admin
@login_required
def group_invite_users(request, groupid):
user_names = request.POST.getlist('item[tags][]')
#TODO:what is below for?
#tags = [ST.objects.get(name=tag_name).name for tag_name in tag_names]
if not user_names:
#TODO: give an error page, also validation on the form
messages.error(request, _("No users are entered!"))
group = G.objects.get(id=groupid)
for uname in user_names:
member = Member.objects.get(username=uname)
#for now, only invite, don't add the member directly
#group.members.add(member)
if member.default_lang:
activate(member.default_lang)
url = urlquote('www.91biji.com/groups/' + groupid + '/')
content = _('You are invited to join the group ')+group.name+'\n\n'+\
_('You can visit this group at ')+ 'http://' + url +'\n\n'+\
_('If you want to join this group, you can click on the "join the group" button.')+'\n\n'+\
_('After joining, if you want to remove yourself from this group, you can do that in your groups page.')
send_mail(_('You are invited to group ')+group.name, content.encode('utf-8'), u'sys@opensourcelearning.org', [member.email])
if request.user.member.default_lang:
activate(request.user.member.default_lang)
messages.success(request, _("You have successfully sent the group invitation!"))
return HttpResponseRedirect('/groups/'+groupid+'/admin/')
@login_required
def group_add_tags(request, groupid):
"""
Add tags to a group. Non-existing tags can be added, and they will be pushed back to each user's personal space
"""
tag_names = request.POST.getlist('item[tags][]')
#TODO:what is below for?
#tags = [ST.objects.get(name=tag_name).name for tag_name in tag_names]
if not tag_names:
#TODO: give an error page, also validation on the form
messages.error(request, "No tags are entered!")
group = G.objects.get(id=groupid)
username = request.user.username
#group.add_tags(request.username, tags)
#TODO: separate Tag, workingSet out of notebook.notes.models otherwise cannot import Tag, WorkingSet
for tag_name in tag_names:
#TODO: add try block to below to revert back if any error happen in the middle
#In the social space, the tag cannot be created alone. It has to be already existing.
t, created = Social_Tag.objects.get_or_create(name=tag_name)
#TODO: if not created, whether add this tag to all three books?
if created:
#TODO:should do this for every user in this group
#should push back to the user's space
for group_member in group.members.all():
log.info('Push the tag back to the user:',group_member.username)
user_tag, created = Tag.objects.using(group_member.username).get_or_create(name=tag_name)
#add this user_tag to users' three books
W = getW(group_member.username)
w = W.objects.get(name='snippetbook')
try:
w.tags.add(user_tag)
except Exception as inst:
log.error(type(inst))
log.error(inst.args)
log.error(inst)
w = W.objects.get(name='bookmarkbook')
try:
w.tags.add(user_tag)
except Exception as inst:
log.error(type(inst))
log.error(inst.args)
log.error(inst)
w = W.objects.get(name='scrapbook')
try:
w.tags.add(user_tag)
except Exception as inst:
log.error(type(inst))
log.error(inst.args)
log.error(inst)
group.tags.add(t)
group.save()
return HttpResponseRedirect('/groups/'+groupid+'/admin/')
@login_required
def group_remove_tag(request, groupid):
tag_id = request.POST.get('tag_id')
tag = ST.objects.get(id=tag_id)
group = G.objects.get(id=groupid)
group.tags.remove(tag)
#TODO: update user's group working set
return HttpResponse('successful', mimetype="text/plain")
def group(request, groupid, bookname):
gs = G.objects.filter(members__username=request.user.username)
group = G.objects.get(id=groupid)
#tags = [t for t in group.tags.all()]
log.debug('tags of the group:'+str(group.tags.all()))
if group.private and group.name not in [g.name for g in gs]:
raise Http404
note_list = group.get_notes(bookname)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
#print 'group notes:', note_list
#sort, delete, private, date_range, order_type, with_attachment, paged_notes, cl = __get_notes_context(request, note_list)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#group.tags.all()
#tags = Social_Tag.objects.filter(group=group).order_by('name')#.annotate(Count('social_'+book_entry_dict.get(bookname))).order_by('name')
tags = get_group_tags(request, group.id, bookname)
profile_member = group#Group.objects.get(name=group.name)
#For now, get the learning area from the creator's db TODO:
try:
ag = Area_Group.objects.using(group.creator.username).get(group_id=group.id)
ag.owner_name = group.creator.username
area = ag.area
except ObjectDoesNotExist:
area = None
#in the template, to get the right url for each note, it still have to call each note's get_note_bookname method. So it might be better to get rid of passing note_type here TODO:
#But the current way might be a little faster since it reduce the num of calls
return render_to_response('social/notes/group_notes.html', {'group':group, 'gs':gs, 'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'tags':tags, 'qstr':qstr,\
'profile_member':profile_member,\
'appname':'groups', 'cl':cl, 'area':area},\
context_instance=RequestContext(request, {'book_uri_prefix':'/groups/'+groupid,
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all'),
'pick_lang': __get_lang(request)}))
def group_tagframes(request, groupid):
group = G.objects.get(id=groupid)
#print 'group', group
tfs = group.get_tag_frames()
return render_to_response('social/group/group_tagframes.html', {'group':group,
'tag_frames':tfs, 'appname':'groups',
},\
context_instance=RequestContext(request, {'book_uri_prefix':'/groups/'+groupid}))
@cache_page(30)
def notes_tag(request, username, bookname, tag_name):
if tag_name.startswith('takenfrom:'):
note_list = getSN(bookname).objects.filter(tags__name__startswith=tag_name, owner__username=username)
elif tag_name == 'untagged':
note_list = getSN(bookname).objects.filter(tags__name=None, owner__username=username)
else:
note_list = getSN(bookname).objects.filter(tags__name=tag_name, owner__username=username)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#TODO: provide tags for social public notebook
tags = []#Social_Tag.objects.filter(notes_set=note_list).order_by('name')
profile_member = Member.objects.get(username=username)
return render_to_response('social/include/notes/notes.html', {'note_list':paged_notes,'sort':sort, 'current_tag':tag_name, 'bookname':bookname,\
'profile_username':username, 'profile_member':profile_member, 'tags':tags, 'appname':'social', 'cl':cl},\
context_instance=RequestContext(request, {'book_uri_prefix':'/social/'+username,
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all')})) #TODO: refactor, merge getting pick_* parameters together
def get_group_tags(request, groupid, bookname):
group = G.objects.get(id=groupid)
tags_qs = group.tags.all().order_by('name')
SN = getSN(bookname)
tags = []
for tag in tags_qs:
count = SN.objects.filter(tags=tag, owner__in=group.members.all(), private=False).count()
t = {'name':tag.name, 'private':tag.private, 'note_count':count}
tags.append(t)
return tags
@login_required
def group_tag(request, groupid, bookname, tag_name):
group = G.objects.get(id=groupid)
# if bookname == 'scraps':
# note_list = Social_Scrap.objects.filter(tags__name=tag_name, owner__in=group.members.all())
# elif bookname == 'bookmarks':
# note_list = Social_Bookmark.objects.filter(tags__name=tag_name, owner__in=group.members.all())
# else:
# #default snippets
# note_list = Social_Snippet.objects.filter(tags__name=tag_name, owner__in=group.members.all())
#print 'getting notes of tag:', tag_name, ' of bookname:', bookname
note_list = getSN(bookname).objects.filter(tags__name=tag_name, owner__in=group.members.all())
#print 'notes of the tag:', note_list
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
tags = get_group_tags(request, groupid, bookname)
profile_member = group#Group.objects.get(name=groupname)
return render_to_response('social/notes/group_notes.html', {'group':group, 'note_list':paged_notes,'sort':sort, 'current_tag':tag_name, 'bookname':bookname,\
'profile_member':profile_member, 'tags':tags, 'appname':'groups', 'cl':cl},\
context_instance=RequestContext(request, {'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all')
}))
@login_required
def vote_useful(request):
note_id = request.POST.get('id')
note = SN.objects.get(id=note_id)
snv, created = Social_Note_Vote.objects.get_or_create(note=note,voter=request.user.member)
snv.useful=True
snv.save()
result = str(note.get_useful_votes())+'/'+str(note.get_total_votes())
return HttpResponse(result, mimetype="text/plain")
@login_required
def vote_unuseful(request):
note_id = request.POST.get('id')
note = SN.objects.get(id=note_id)
snv, created = Social_Note_Vote.objects.get_or_create(note=note,voter=request.user.member)
snv.useful=False
snv.save()
result = str(note.get_useful_votes())+'/'+str(note.get_total_votes())
return HttpResponse(result, mimetype="text/plain")
@login_required
def take(request):
note_id = request.POST.get('id')
sn = SN.objects.get(id=note_id)
#below can be used for track record for now. TODO:
snt, snt_created = Social_Note_Taken.objects.get_or_create(note=sn, taker=request.user.member)
#note is taken into the taker's db, with a tag: takenfrom:owner_name:owner_note_id
t, t_created = Tag.objects.using(request.user.username).get_or_create(name='takenfrom:'+sn.owner.username+':'+str(sn.id))
try:
if sn.get_note_bookname()=='bookmarkbook':
#print 'note is', sn.get_note_bookname()
n, created = getNote(request.user.username, sn.get_note_bookname()).objects.get_or_create(url=sn.social_bookmark.url, desc=sn.desc, title=sn.title)
elif sn.get_note_bookname()=='scrapbook':
#print 'note is', sn.get_note_bookname()
n, created = getNote(request.user.username, sn.get_note_bookname()).objects.get_or_create(url=sn.social_scrap.url, desc=sn.desc, title=sn.title)
else:
#print 'note is', sn.get_note_bookname()
n, created = getNote(request.user.username, sn.get_note_bookname()).objects.get_or_create(desc=sn.desc, title=sn.title)
#print 'created is', created
except MultipleObjectsReturned:
created = False
#print 'created', created
if created:
n.owner_name = request.user.username
n.tags.add(t)
n.save()
return HttpResponse(created, mimetype="text/plain")
@login_required
def add_comment(request):
note_id = request.POST.get('id')
#N = getN(username)
note = SN.objects.get(id=note_id)
content = request.POST.get('content')
#NC = getNC(username)
nc = Social_Note_Comment(note=note, commenter=request.user.member, desc=content)
nc.save()
#send notice to the user
#print 'sending notice'
if notification:
if note.owner != request.user.member:
notification.send([note.owner], "comment_receive", {"from_user": request.user})
#sent notification to other commenters who have commented on this note
other_commenters_ids = note.social_note_comment_set.exclude(commenter=request.user.member).exclude(commenter=note.owner).values_list('commenter', flat=True)
other_commenters_ids = list(set(other_commenters_ids))
other_commenters = Member.objects.filter(id__in=other_commenters_ids)
notification.send(other_commenters, "comment_receive", {"from_user": request.user})
#print 'notices sent'
if '@' in nc.desc:
notebook.notes.util.processAtAndSendNotice(request, nc.desc)
return HttpResponse(simplejson.dumps({'note_id':note_id, 'comment_id':nc.id, 'comment_desc':nc.desc, 'commenter':nc.commenter.username}),
"application/json")
@login_required
def add_course(request):
note_id = request.POST.get('id')
note = SN.objects.get(id=note_id)
url = request.POST.get('url')
nc = Social_Note_Course(note=note, submitter=request.user.member, url=url)
nc.save()
return HttpResponse(simplejson.dumps({'note_id':note_id, 'course_id':nc.id, 'course_url':nc.url, 'submitter':nc.submitter.username}),
"application/json")
from notification.models import Notice
#TODO:check if profile_member is the same as requesting user. If not, don't allow viewing (Currently already hidden in html)
def comments_4_user(request, username):
profile_member = Member.objects.get(username=username)
current_commenter = request.GET.get('commenter', 'all')
comments1 = Social_Note_Comment.objects.filter(note__owner=profile_member).exclude(commenter=profile_member).order_by('-init_date')
comments2 = Social_Note_Comment.objects.filter(note__social_note_comment__commenter=profile_member).exclude(commenter=profile_member).order_by('-init_date')
#print 'comments2:', comments2
comments = comments1 | comments2
comments = comments.distinct()
#try use aggregation to get the count of comments for each commenter TODO:
commenters = list(set(comments.values_list('commenter__username', flat=True)))
commenters.sort()
#print 'commenters', commenters
if current_commenter != 'all':
comments = comments.filter(commenter__username=current_commenter)
#clear notifications related to comment receive
if (not request.user.is_anonymous()) and request.user.username == username:
Notice.objects.filter(notice_type__label='comment_receive', recipient=request.user).update(unseen=False)
return render_to_response('social/commentsfor.html', {'comments':comments,'profile_username':username, 'commenters':commenters, 'current_commenter': current_commenter},\
context_instance=RequestContext(request))
def comments_by_user(request, username):
profile_member = Member.objects.get(username=username)
comments = Social_Note_Comment.objects.filter(commenter=profile_member).order_by('-init_date')
return render_to_response('social/commentsby.html', {'comments':comments, 'profile_username':username},\
context_instance=RequestContext(request))
@login_required
def mentioned(request, username):
#clear notifications related to mention
if (not request.user.is_anonymous()) and request.user.username == username:
Notice.objects.filter(notice_type__label='mentioned', recipient=request.user).update(unseen=False)
return render_to_response('social/mentioned.html', {'profile_username':username},\
context_instance=RequestContext(request))
@login_required
def mentioned_in_note(request, username):
notes = Social_Note.objects.filter(desc__contains=u'@'+username+' ').order_by('-init_date')
return render_to_response('social/mentioned_in_note.html', {'notes':notes,'profile_username':username},\
context_instance=RequestContext(request))
@login_required
def mentioned_in_comment(request, username):
comments = Social_Note_Comment.objects.filter(desc__contains='@'+username+' ').order_by('-init_date')
return render_to_response('social/mentioned_in_comment.html', {'comments':comments, 'profile_username':username},\
context_instance=RequestContext(request))
@login_required
def delete_comment(request):
#note_id = request.POST.get('id')
comment_id = request.POST.get('comment_id')
nc = Social_Note_Comment.objects.get(id=comment_id)
#check permission
if nc.commenter.username == request.user.username:
nc.delete()
else:
pass
return HttpResponse('successful', mimetype="text/plain")
@login_required
def delete_outer_link(request):
#note_id = request.POST.get('id')
outer_link_id = request.POST.get('outer_link_id')
snb = Social_Note_Backlink.objects.get(id=outer_link_id)
#check permission
if snb.note.owner.username == request.user.username:
snb.delete()
else:
pass
return HttpResponse('successful', mimetype="text/plain")
@login_required
def delete_course(request):
#note_id = request.POST.get('id')
course_id = request.POST.get('course_id')
nc = Social_Note_Course.objects.get(id=course_id)
#check permission
if nc.submitter.username == request.user.username or nc.note.owner.username == request.user.username:
nc.delete()
else:
pass
return HttpResponse('successful', mimetype="text/plain")
#bookname should be framebook
@login_required
def get_related_frames(request, bookname, username, note_id):
frame = Social_Frame.objects.get(id=note_id)
return HttpResponse(simplejson.dumps(frame.get_related_frames()), "application/json")
def suggest(request):
if request.method == 'POST':
content = request.POST.get('content')
content = "A suggestion to the site is sent to you by "+request.user.username+":\n"+content
send_mail('suggestions to the site', content.encode('utf8'), u'sys@opensourcelearning.org', [u'sys@opensourcelearning.org'])
messages.success(request, _("Your suggestion is successfully sent to the sys admin! Thank you for your valuable suggestion!"))
return render_to_response('doc/suggest.html', {},\
context_instance=RequestContext(request))
def __get_notes_context(request, note_list):
print '__get_notes_context called'
theme = __get_view_theme(request)
#view_mode = theme['view_mode']
order_field = theme['sort']
#delete = theme['delete']
#private = theme['private']
#date_range = theme['date_range']
#with_attachment = theme['with_attachment']
#
# if delete in true_words:
# note_list = note_list.filter(deleted=True)
# else:
# note_list = note_list.filter(deleted=False)
#
# if private in ['All', 'all']:
# pass
# else:
# if private in true_words:
# note_list = note_list.filter(private=True)
# else:
# note_list = note_list.filter(private=False)
# now = date.today()
#
# )
# if date_range in ['All', 'all']:
# pass
# elif date_range == 'today':
# note_list = note_list.filter(init_date__day= now.day, init_date__month=now.month, init_date__year= now.year)
# elif date_range == 'past7days':
# one_week_ago = now - datetime.timedelta(days=7)
# note_list = note_list.filter(init_date__gte=one_week_ago.strftime('%Y-%m-%d'), init_date__lte=now.strftime('%Y-%m-%d 23:59:59'))
# elif date_range == 'this_month':
# note_list = note_list.filter(init_date__month=now.month, init_date__year= now.year)
# elif date_range == 'this_year':
# note_list = note_list.filter(init_date__year= now.year)
#
#
# if with_attachment in ['All', 'all']:
# pass
# elif with_attachment in true_words:
# try:
# note_list = note_list.filter(attachment__startswith='noteattachments/') #TODO: hard coded, change
# except FieldError:
# pass
pick_empty = 'all'
#if bookname == 'framebook':
pick_empty = request.GET.get('pick_empty', 'all')
if pick_empty in true_words:
#below won't cause any trouble for bookname other than framebook, since it will not be called
note_list = note_list.filter(notes=None)
#print 'empty frmame list:', [(n.id, n.private, n.deleted) for n in note_list]
elif pick_empty in false_words:
note_list = note_list.exclude(notes=None)
pick_plan = request.GET.get('pick_plan', 'all')
if pick_plan == 'w':
note_list = note_list.filter(title__startswith='Weekly Plan:')
#print 'empty frmame list:', [(n.id, n.private, n.deleted) for n in note_list]
elif pick_plan == 'm':
note_list = note_list.filter(title__startswith='Monthly Plan:')
order_type = request.GET.get('order_type','desc')
#===============================================================================
# if order_field == 'relevance':
# order_field = 'init_date'
#===============================================================================
print 'order_field', order_field
#order_field that was set when triggering notes list view for tagframe app need to be reset
if order_field == 'relevance' and not (hasattr(request,'appname') and request.appname == 'tagframe'):
order_field = 'init_date'
sorted_note_list = note_list
if order_field not in ['usefulness', 'relevance'] :
sorted_note_list = note_list.order_by('%s%s' % ((order_type == 'desc' and '-' or ''), order_field),'-init_date','desc')
elif order_field == 'relevance':
#sort by relevance
#for ordering by relevance, order by vote first so that vote can be the secondary ordering after relevance :
note_list = note_list.order_by('-vote')
#it is too much computation to compute the relevance for each note. So cut the size if there are more than 100 notes.
#This is not right! It still need to sort by relevance first to get the first 100 more relevant notes.
#TODO: create another table to store the relevance values for notes that are in the tag tree and do the computation separately every day.
if len(note_list) > 100:
note_list = note_list[:100]
request.limited = True
sorted_notes = [(note, note.get_relevance(request.tag_path)) for note in note_list]
sorted_notes.sort(key=lambda r: r[1],reverse = True)
for item in sorted_notes:
item[0].relevance = item[1]
print 'relevance is', item[1]
sorted_note_list = [r[0] for r in sorted_notes]
else:
#Social_Note has usefulness
sorted_notes = [(note, note.get_usefulness()) for note in note_list]
sorted_notes.sort(key=lambda r: r[1],reverse = (order_type == 'desc' and True or False))
sorted_note_list = [r[0] for r in sorted_notes]
#for social pages, only show 20 notes per page since it is not edited much
list_per_page = 20
paginator = Paginator(sorted_note_list, list_per_page)
#using code from the admin module
cl = Pl(request)
from django.contrib.admin.views.main import MAX_SHOW_ALL_ALLOWED
cl.show_all = ALL_VAR in request.GET
result_count = paginator.count
cl.can_show_all = result_count <= MAX_SHOW_ALL_ALLOWED
cl.multi_page = result_count > list_per_page
if (cl.show_all and cl.can_show_all):
paginator = Paginator(sorted_note_list, MAX_SHOW_ALL_ALLOWED )
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('p', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
paged_notes = paginator.page(page)
except (EmptyPage, InvalidPage):
paged_notes = paginator.page(paginator.num_pages)
cl.paginator = paginator
cl.page_num = page
cl.result_count = len(sorted_note_list)
return order_field, order_type, paged_notes, cl
|
yejia/osl_notebook
|
social/views.py
|
Python
|
mit
| 62,150 | 0.02148 |
import os.path
import pygame
from pygame.locals import *
from tecles import *
pygame.init()
class Joc (object):
WIDTH = 600
HEIGHT = 400
screen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
background = pygame.image.load(os.path.join("Imatges","fons.jpg"))
clock = pygame.time.Clock()
dt = 0.05
puntsR = 0
puntsL = 0
font = pygame.font.SysFont("Arial", 20)
quit = False
palaL = None
palaR = None
pilota = None
pales = pygame.sprite.Group()
pilotes = pygame.sprite.Group()
def toggle_quit():
Joc.quit = not Joc.quit
def gol():
for pilota in Joc.pilotes.sprites():
if pilota.posicio[0] > Joc.WIDTH:
Joc.puntsR += 1
print(Joc.puntsL, Joc.puntsR)
pilota.restart()
elif pilota.posicio[0] < 0:
Joc.puntsL += 1
print(Joc.puntsL, Joc.puntsR)
pilota.restart()
def main_loop():
while not Joc.quit:
for event in pygame.event.get():
if event.type == KEYUP or event.type == KEYDOWN:
handle_keys(event,Joc)
elif event.type == QUIT:
Joc.quit = True
Joc.pales.update()
Joc.pilotes.update()
Joc.screen.blit(Joc.background,(0,0))
Joc.pilotes.draw(Joc.screen)
Joc.pales.draw(Joc.screen)
Joc.gol()
pygame.display.update()
Joc.dt = Joc.clock.tick() / 10
|
heepaz/rePong
|
joc.py
|
Python
|
gpl-3.0
| 1,394 | 0.031564 |
"""Produces the catalogue page for the VPHAS website."""
import os
import json
import datetime
#import httplib
from astropy import log
from jinja2 import Environment, FileSystemLoader
from surveytools import SURVEYTOOLS_DATA
# Load the column definitions from the JSON file
filename_columns = os.path.join(SURVEYTOOLS_DATA, 'vphas-columns.json')
columns = json.loads(open(filename_columns).read())
def get_filesize(host, url):
"""Returns the filesize of a remote document."""
return 0
"""
conn = httplib.HTTPConnection(host)
conn.request("HEAD", url)
res = conn.getresponse()
try:
size = float(res.getheader('content-length')) / (1024.*1024.)
except Exception, e:
print 'Failed {0}'.format(url)
print e
if size < 950:
result = '{0:.0f} MB'.format(size)
else:
result = '{0:.01f} GB'.format(size/1024.)
log.info('Filesize of {0} = {1}'.format(url, result))
return result
"""
args = {'host': 'www.vphas.org',
'dir_light': '/data/dr2/light',
'dir_full': '/data/dr2/full',
'last_update': datetime.datetime.utcnow().strftime("%Y-%m-%d"),
'columns': columns}
if __name__ == '__main__':
env = Environment(loader=FileSystemLoader('.'),
trim_blocks=True,
lstrip_blocks=True)
env.globals['get_filesize'] = get_filesize
template = env.get_template('dr2-template.html')
with open("vphas-dr2.shtml", "w") as f:
f.write(template.render(args))
|
barentsen/surveytools
|
scripts/website/create-vphas-dr2-page.py
|
Python
|
mit
| 1,542 | 0.000649 |
import sys
from indra import reach
from indra.assemblers import GraphAssembler
orig_txt = [ln.strip() for ln in open('ras_pathway.txt', 'rt').readlines()]
correct_txt = [ln.strip() for ln in open('correction.txt', 'rt').readlines()]
for ln in correct_txt:
if ln.startswith('<'):
remove_line = ln[2:]
orig_txt.remove(remove_line)
elif ln.startswith('>'):
add_line = ln[2:]
orig_txt.append(add_line)
txt = ' '.join(orig_txt)
rp = reach.process_text(txt, offline=True)
st = rp.statements
for s in st:
print '%s\t%s' % (s, s.evidence[0].text)
graphpr = {'rankdir': 'TD'}
nodepr = {'fontsize': 12, 'shape': 'plaintext', 'margin': '0,0', 'pad': 0}
ga = GraphAssembler(st, graph_properties=graphpr, node_properties=nodepr)
ga.make_model()
ga.save_dot('rps6ka_correction.dot')
ga.save_pdf('rps6k1_correction.pdf')
|
jmuhlich/indra
|
models/ras_pathway/run_correction.py
|
Python
|
bsd-2-clause
| 855 | 0 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.plugins.callback import CallbackBase
from ansible.utils.unicode import to_unicode
from ansible.compat.six import string_types
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
self._result_prc = None
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.append([None, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
contenders = [ v for v in contenders if v is not None and v > 0 ]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done = self._start_at_done,
)
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if self._result_prc:
self._result_prc.terminate()
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
if gotit is not None:
methods.append(gotit)
for method in methods:
try:
# temporary hack, required due to a change in the callback API, so
# we don't break backwards compatibility with callbacks which were
# designed to use the original API
# FIXME: target for removal and revert to the original code here after a year (2017-01-14)
if method_name == 'v2_playbook_on_start':
import inspect
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
if 'playbook' in f_args:
method(*args, **kwargs)
else:
method()
else:
method(*args, **kwargs)
except Exception as e:
#TODO: add config toggle to make this fatal or not?
display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e)))
|
reedloden/ansible
|
lib/ansible/executor/task_queue_manager.py
|
Python
|
gpl-3.0
| 13,130 | 0.003427 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 17:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wunderlist', '0011_auto_20151230_1843'),
]
operations = [
migrations.AlterField(
model_name='connection',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connections', to=settings.AUTH_USER_MODEL),
),
]
|
passuf/WunderHabit
|
wunderlist/migrations/0012_auto_20151230_1853.py
|
Python
|
mit
| 631 | 0.001585 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
from swift import gettext_ as _
import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, ismount, \
rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import get_data_dir, get_tmp_dir, DiskFileRouter
from swift.common.storage_policy import POLICIES, REPL_POLICY
DEFAULT_RSYNC_TIMEOUT = 900
hubs.use_hub(get_hub())
def _do_listdir(partition, replication_cycle):
return (((partition + replication_cycle) % 10) == 0)
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.replication_cycle = random.randint(0, 9)
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self._df_router = DiskFileRouter(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
error_line = _('Bad rsync return code: %(ret)d <- %(args)s') % \
{'args': str(args), 'ret': ret_val}
if self.rsync_error_log_line_length:
error_line = error_line[:self.rsync_error_log_line_length]
self.logger.error(error_line)
else:
log_method = self.logger.info if results else self.logger.debug
log_method(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
self.stats['rsync'] += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for cand_objs in synced_remote_regions.values():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
delete_handoff = len([resp for resp in responses if resp]) >= \
self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
self.stats['remove'] += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
handoff_partition_deleted = True
elif not suffixes:
self.delete_partition(job['path'])
handoff_partition_deleted = True
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
self._add_failure_stats(failure_devs_info)
finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
self.stats['success'] += len(target_devs_info - failure_devs_info)
if not handoff_partition_deleted:
self.handoffs_remaining += 1
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
self.replication_count += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
df_mgr = self._df_router[job['policy']]
try:
hashed, local_hash = tpool_reraise(
df_mgr._get_hashes, job['device'],
job['partition'], job['policy'],
do_listdir=_do_listdir(
int(job['partition']),
self.replication_cycle))
self.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
_('%(replication_ip)s/%(device)s '
'responded as unmounted'), node)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status,
'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
self.stats['hashmatch'] += 1
continue
hashed, recalc_hash = tpool_reraise(
df_mgr._get_hashes,
job['device'], job['partition'], job['policy'],
recalculate=suffixes)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
self.stats['rsync'] += 1
success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes),
headers=headers)
conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self._add_failure_stats(failure_devs_info)
self.logger.exception(_("Error syncing partition"))
finally:
self.stats['success'] += len(target_devs_info - failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
if self.replication_count:
elapsed = (time.time() - self.start) or 0.000001
rate = self.replication_count / elapsed
self.logger.info(
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count,
self.job_count)})
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
if self.suffix_count:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the replicator finishes its replication
pass in some eventuality.
"""
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
df_mgr = self._df_router[policy]
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
# Skip replication if next_part_power is set. In this case
# every object is hard-linked twice, but the replicator can't
# detect them and would create a second copy of the file if not
# yet existing - and this might double the actual transferred
# and stored data
next_part_power = getattr(
policy.object_ring, 'next_part_power', None)
if next_part_power is not None:
self.logger.warning(
_("next_part_power set in policy '%s'. Skipping"),
policy.name)
continue
if policy.policy_type == REPL_POLICY:
if (override_policies is not None and
str(policy.idx) not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None):
"""Run a replication pass"""
self.start = time.time()
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.replication_count = 0
self.last_replication_count = -1
self.replication_cycle = (self.replication_cycle + 1) % 10
self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
self.handoffs_remaining = 0
stats = eventlet.spawn(self.heartbeat)
lockup_detector = eventlet.spawn(self.detect_lockups)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = join(self.devices_dir, job['device'])
if self.mount_check and not ismount(dev_path):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warning(_('%s is not mounted'), job['device'])
continue
if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary
# partitions until rebalance was successful!
if self.handoffs_remaining:
self.logger.warning(_(
"Handoffs first mode still has handoffs "
"remaining. Aborting current "
"replication pass."))
break
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
if current_nodes:
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
self._add_failure_stats(self.all_devs_info)
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
self.stats['attempted'] = self.replication_count
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object replicator in daemon mode."))
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
|
psachin/swift
|
swift/obj/replicator.py
|
Python
|
apache-2.0
| 38,417 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._iot_hub_client_enums import *
class CertificateBodyDescription(msrest.serialization.Model):
"""The JSON-serialized X509 Certificate.
:ivar certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of the X509 leaf certificate .cer file or just
.pem file content.
:paramtype certificate: str
"""
super(CertificateBodyDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate.
:vartype properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificateProperties"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate.
:paramtype properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificateProperties
"""
super(CertificateDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(msrest.serialization.Model):
"""The JSON-serialized array of Certificate objects.
:ivar value: The array of Certificate objects.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.CertificateDescription]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateDescription]'},
}
def __init__(
self,
*,
value: Optional[List["CertificateDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of Certificate objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.CertificateDescription]
"""
super(CertificateListDescription, self).__init__(**kwargs)
self.value = value
class CertificateProperties(msrest.serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: The certificate content.
:paramtype certificate: str
"""
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = certificate
class CertificatePropertiesWithNonce(msrest.serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
'verification_code': {'readonly': True},
'certificate': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'verification_code': {'key': 'verificationCode', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CertificatePropertiesWithNonce, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(msrest.serialization.Model):
"""The JSON-serialized leaf certificate.
:ivar certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:paramtype certificate: str
"""
super(CertificateVerificationDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateWithNonceDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:vartype properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificatePropertiesWithNonce'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificatePropertiesWithNonce"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:paramtype properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificatePropertiesWithNonce
"""
super(CertificateWithNonceDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(msrest.serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:ivar max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
:ivar default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype default_ttl_as_iso8601: ~datetime.timedelta
:ivar feedback: The properties of the feedback queue for cloud-to-device messages.
:vartype feedback: ~azure.mgmt.iothub.v2019_11_04.models.FeedbackProperties
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
'default_ttl_as_iso8601': {'key': 'defaultTtlAsIso8601', 'type': 'duration'},
'feedback': {'key': 'feedback', 'type': 'FeedbackProperties'},
}
def __init__(
self,
*,
max_delivery_count: Optional[int] = None,
default_ttl_as_iso8601: Optional[datetime.timedelta] = None,
feedback: Optional["FeedbackProperties"] = None,
**kwargs
):
"""
:keyword max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
:keyword default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype default_ttl_as_iso8601: ~datetime.timedelta
:keyword feedback: The properties of the feedback queue for cloud-to-device messages.
:paramtype feedback: ~azure.mgmt.iothub.v2019_11_04.models.FeedbackProperties
"""
super(CloudToDeviceProperties, self).__init__(**kwargs)
self.max_delivery_count = max_delivery_count
self.default_ttl_as_iso8601 = default_ttl_as_iso8601
self.feedback = feedback
class EndpointHealthData(msrest.serialization.Model):
"""The health data for an endpoint.
:ivar endpoint_id: Id of the endpoint.
:vartype endpoint_id: str
:ivar health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Possible values include: "unknown", "healthy", "unhealthy",
"dead".
:vartype health_status: str or ~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthStatus
"""
_attribute_map = {
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
}
def __init__(
self,
*,
endpoint_id: Optional[str] = None,
health_status: Optional[Union[str, "EndpointHealthStatus"]] = None,
**kwargs
):
"""
:keyword endpoint_id: Id of the endpoint.
:paramtype endpoint_id: str
:keyword health_status: Health statuses have following meanings. The 'healthy' status shows
that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the
endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this
endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has
established an eventually consistent state of health. The 'dead' status shows that the endpoint
is not accepting messages, after IoT Hub retried sending messages for the retrial period. See
IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status
shows that the IoT Hub has not established a connection with the endpoint. No messages have
been delivered to or rejected from this endpoint. Possible values include: "unknown",
"healthy", "unhealthy", "dead".
:paramtype health_status: str or ~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthStatus
"""
super(EndpointHealthData, self).__init__(**kwargs)
self.endpoint_id = endpoint_id
self.health_status = health_status
class EndpointHealthDataListResult(msrest.serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: JSON-serialized array of Endpoint health data.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EndpointHealthData]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["EndpointHealthData"]] = None,
**kwargs
):
"""
:keyword value: JSON-serialized array of Endpoint health data.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthData]
"""
super(EndpointHealthDataListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EnrichmentProperties(msrest.serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar key: Required. The key or name for the enrichment property.
:vartype key: str
:ivar value: Required. The value for the enrichment property.
:vartype value: str
:ivar endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:vartype endpoint_names: list[str]
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
'endpoint_names': {'required': True, 'min_items': 1},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
}
def __init__(
self,
*,
key: str,
value: str,
endpoint_names: List[str],
**kwargs
):
"""
:keyword key: Required. The key or name for the enrichment property.
:paramtype key: str
:keyword value: Required. The value for the enrichment property.
:paramtype value: str
:keyword endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:paramtype endpoint_names: list[str]
"""
super(EnrichmentProperties, self).__init__(**kwargs)
self.key = key
self.value = value
self.endpoint_names = endpoint_names
class ErrorDetails(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
'code': {'readonly': True},
'http_status_code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(msrest.serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The tags.
:vartype properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword properties: The tags.
:paramtype properties: dict[str, str]
"""
super(EventHubConsumerGroupInfo, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupsListResult(msrest.serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of consumer groups objects.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["EventHubConsumerGroupInfo"]] = None,
**kwargs
):
"""
:keyword value: List of consumer groups objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo]
"""
super(EventHubConsumerGroupsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EventHubProperties(msrest.serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype retention_time_in_days: long
:ivar partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
'partition_ids': {'readonly': True},
'path': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'},
'partition_count': {'key': 'partitionCount', 'type': 'int'},
'partition_ids': {'key': 'partitionIds', 'type': '[str]'},
'path': {'key': 'path', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
*,
retention_time_in_days: Optional[int] = None,
partition_count: Optional[int] = None,
**kwargs
):
"""
:keyword retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype retention_time_in_days: long
:keyword partition_count: The number of partitions for receiving device-to-cloud messages in
the Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype partition_count: int
"""
super(EventHubProperties, self).__init__(**kwargs)
self.retention_time_in_days = retention_time_in_days
self.partition_count = partition_count
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar export_blob_container_uri: Required. The export blob container URI.
:vartype export_blob_container_uri: str
:ivar exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:vartype exclude_keys: bool
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
}
def __init__(
self,
*,
export_blob_container_uri: str,
exclude_keys: bool,
**kwargs
):
"""
:keyword export_blob_container_uri: Required. The export blob container URI.
:paramtype export_blob_container_uri: str
:keyword exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:paramtype exclude_keys: bool
"""
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = export_blob_container_uri
self.exclude_keys = exclude_keys
class FailoverInput(msrest.serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:ivar failover_region: Required. Region the hub will be failed over to.
:vartype failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
*,
failover_region: str,
**kwargs
):
"""
:keyword failover_region: Required. Region the hub will be failed over to.
:paramtype failover_region: str
"""
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = failover_region
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether the fallback route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
name: Optional[str] = None,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:paramtype name: str
:keyword source: Required. The source to which the routing rule is to be applied to. For
example, DeviceMessages. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether the fallback route is enabled.
:paramtype is_enabled: bool
"""
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: Required. The input blob container URI.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: Required. The output blob container URI.
:vartype output_blob_container_uri: str
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
}
def __init__(
self,
*,
input_blob_container_uri: str,
output_blob_container_uri: str,
**kwargs
):
"""
:keyword input_blob_container_uri: Required. The input blob container URI.
:paramtype input_blob_container_uri: str
:keyword output_blob_container_uri: Required. The output blob container URI.
:paramtype output_blob_container_uri: str
"""
super(ImportDevicesRequest, self).__init__(**kwargs)
self.input_blob_container_uri = input_blob_container_uri
self.output_blob_container_uri = output_blob_container_uri
class IotHubCapacity(msrest.serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: long
:ivar maximum: The maximum number of units.
:vartype maximum: long
:ivar default: The default number of units.
:vartype default: long
:ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubScaleType
"""
_validation = {
'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default': {'key': 'default', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IotHubCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(msrest.serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
:ivar etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:vartype etag: str
:ivar properties: IotHub properties.
:vartype properties: ~azure.mgmt.iothub.v2019_11_04.models.IotHubProperties
:ivar sku: Required. IotHub SKU info.
:vartype sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IotHubProperties'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
}
def __init__(
self,
*,
location: str,
sku: "IotHubSkuInfo",
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["IotHubProperties"] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
:keyword etag: The Etag field is *not* required. If it is provided in the response body, it
must also be provided as a header per the normal ETag convention.
:paramtype etag: str
:keyword properties: IotHub properties.
:paramtype properties: ~azure.mgmt.iothub.v2019_11_04.models.IotHubProperties
:keyword sku: Required. IotHub SKU info.
:paramtype sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
"""
super(IotHubDescription, self).__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.sku = sku
class IotHubDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubDescription objects.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of IotHubDescription objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubDescription]
"""
super(IotHubDescriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubLocationDescription(msrest.serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:ivar location: The name of the Azure region.
:vartype location: str
:ivar role: The role of the region, can be either primary or secondary. The primary region is
where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery
(DR) paired region and also the region where the IoT hub can failover to. Possible values
include: "primary", "secondary".
:vartype role: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubReplicaRoleType
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
role: Optional[Union[str, "IotHubReplicaRoleType"]] = None,
**kwargs
):
"""
:keyword location: The name of the Azure region.
:paramtype location: str
:keyword role: The role of the region, can be either primary or secondary. The primary region
is where the IoT hub is currently provisioned. The secondary region is the Azure disaster
recovery (DR) paired region and also the region where the IoT hub can failover to. Possible
values include: "primary", "secondary".
:paramtype role: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubReplicaRoleType
"""
super(IotHubLocationDescription, self).__init__(**kwargs)
self.location = location
self.role = role
class IotHubNameAvailabilityInfo(msrest.serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Possible values include: "Invalid",
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubNameUnavailabilityReason
:ivar message: The detailed reason message.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
message: Optional[str] = None,
**kwargs
):
"""
:keyword message: The detailed reason message.
:paramtype message: str
"""
super(IotHubNameAvailabilityInfo, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = message
class IotHubProperties(msrest.serialization.Model):
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:vartype authorization_policies:
list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:ivar ip_filter_rules: The IP filter rules.
:vartype ip_filter_rules: list[~azure.mgmt.iothub.v2019_11_04.models.IpFilterRule]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:ivar event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys
to this dictionary is events. This key has to be present in the dictionary while making create
or update calls for the IoT hub.
:vartype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.EventHubProperties]
:ivar routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:vartype routing: ~azure.mgmt.iothub.v2019_11_04.models.RoutingProperties
:ivar storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:vartype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.StorageEndpointProperties]
:ivar messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:vartype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.MessagingEndpointProperties]
:ivar enable_file_upload_notifications: If True, file upload notifications are enabled.
:vartype enable_file_upload_notifications: bool
:ivar cloud_to_device: The IoT hub cloud-to-device messaging properties.
:vartype cloud_to_device: ~azure.mgmt.iothub.v2019_11_04.models.CloudToDeviceProperties
:ivar comments: IoT hub comments.
:vartype comments: str
:ivar features: The capabilities and features enabled for the IoT hub. Possible values include:
"None", "DeviceManagement".
:vartype features: str or ~azure.mgmt.iothub.v2019_11_04.models.Capabilities
:ivar locations: Primary and secondary location for iot hub.
:vartype locations: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubLocationDescription]
"""
_validation = {
'provisioning_state': {'readonly': True},
'state': {'readonly': True},
'host_name': {'readonly': True},
'locations': {'readonly': True},
}
_attribute_map = {
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'},
'routing': {'key': 'routing', 'type': 'RoutingProperties'},
'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'},
'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'},
'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'},
'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'},
'comments': {'key': 'comments', 'type': 'str'},
'features': {'key': 'features', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[IotHubLocationDescription]'},
}
def __init__(
self,
*,
authorization_policies: Optional[List["SharedAccessSignatureAuthorizationRule"]] = None,
ip_filter_rules: Optional[List["IpFilterRule"]] = None,
event_hub_endpoints: Optional[Dict[str, "EventHubProperties"]] = None,
routing: Optional["RoutingProperties"] = None,
storage_endpoints: Optional[Dict[str, "StorageEndpointProperties"]] = None,
messaging_endpoints: Optional[Dict[str, "MessagingEndpointProperties"]] = None,
enable_file_upload_notifications: Optional[bool] = None,
cloud_to_device: Optional["CloudToDeviceProperties"] = None,
comments: Optional[str] = None,
features: Optional[Union[str, "Capabilities"]] = None,
**kwargs
):
"""
:keyword authorization_policies: The shared access policies you can use to secure a connection
to the IoT hub.
:paramtype authorization_policies:
list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:keyword ip_filter_rules: The IP filter rules.
:paramtype ip_filter_rules: list[~azure.mgmt.iothub.v2019_11_04.models.IpFilterRule]
:keyword event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:paramtype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.EventHubProperties]
:keyword routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:paramtype routing: ~azure.mgmt.iothub.v2019_11_04.models.RoutingProperties
:keyword storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:paramtype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.StorageEndpointProperties]
:keyword messaging_endpoints: The messaging endpoint properties for the file upload
notification queue.
:paramtype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.MessagingEndpointProperties]
:keyword enable_file_upload_notifications: If True, file upload notifications are enabled.
:paramtype enable_file_upload_notifications: bool
:keyword cloud_to_device: The IoT hub cloud-to-device messaging properties.
:paramtype cloud_to_device: ~azure.mgmt.iothub.v2019_11_04.models.CloudToDeviceProperties
:keyword comments: IoT hub comments.
:paramtype comments: str
:keyword features: The capabilities and features enabled for the IoT hub. Possible values
include: "None", "DeviceManagement".
:paramtype features: str or ~azure.mgmt.iothub.v2019_11_04.models.Capabilities
"""
super(IotHubProperties, self).__init__(**kwargs)
self.authorization_policies = authorization_policies
self.ip_filter_rules = ip_filter_rules
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = event_hub_endpoints
self.routing = routing
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.features = features
self.locations = None
class IotHubQuotaMetricInfo(msrest.serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: long
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: long
"""
_validation = {
'name': {'readonly': True},
'current_value': {'readonly': True},
'max_value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'max_value': {'key': 'maxValue', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IotHubQuotaMetricInfo, self).__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of quota metrics objects.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubQuotaMetricInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubQuotaMetricInfo"]] = None,
**kwargs
):
"""
:keyword value: The array of quota metrics objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubQuotaMetricInfo]
"""
super(IotHubQuotaMetricInfoListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuDescription(msrest.serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar sku: Required. The type of the resource.
:vartype sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
:ivar capacity: Required. IotHub capacity.
:vartype capacity: ~azure.mgmt.iothub.v2019_11_04.models.IotHubCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'capacity': {'key': 'capacity', 'type': 'IotHubCapacity'},
}
def __init__(
self,
*,
sku: "IotHubSkuInfo",
capacity: "IotHubCapacity",
**kwargs
):
"""
:keyword sku: Required. The type of the resource.
:paramtype sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
:keyword capacity: Required. IotHub capacity.
:paramtype capacity: ~azure.mgmt.iothub.v2019_11_04.models.IotHubCapacity
"""
super(IotHubSkuDescription, self).__init__(**kwargs)
self.resource_type = None
self.sku = sku
self.capacity = capacity
class IotHubSkuDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubSkuDescription.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubSkuDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubSkuDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of IotHubSkuDescription.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuDescription]
"""
super(IotHubSkuDescriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuInfo(msrest.serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:vartype name: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard",
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuTier
:ivar capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:vartype capacity: long
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Union[str, "IotHubSku"],
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:paramtype name: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSku
:keyword capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:paramtype capacity: long
"""
super(IotHubSkuInfo, self).__init__(**kwargs)
self.name = name
self.tier = None
self.capacity = capacity
class IpFilterRule(msrest.serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: Required. The name of the IP filter rule.
:vartype filter_name: str
:ivar action: Required. The desired action for requests captured by this rule. Possible values
include: "Accept", "Reject".
:vartype action: str or ~azure.mgmt.iothub.v2019_11_04.models.IpFilterActionType
:ivar ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:vartype ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
*,
filter_name: str,
action: Union[str, "IpFilterActionType"],
ip_mask: str,
**kwargs
):
"""
:keyword filter_name: Required. The name of the IP filter rule.
:paramtype filter_name: str
:keyword action: Required. The desired action for requests captured by this rule. Possible
values include: "Accept", "Reject".
:paramtype action: str or ~azure.mgmt.iothub.v2019_11_04.models.IpFilterActionType
:keyword ip_mask: Required. A string that contains the IP address range in CIDR notation for
the rule.
:paramtype ip_mask: str
"""
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class JobResponse(msrest.serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Possible values include: "unknown", "export", "import",
"backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration",
"rebootDevice", "factoryResetDevice", "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2019_11_04.models.JobType
:ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running",
"completed", "failed", "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2019_11_04.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
'job_id': {'readonly': True},
'start_time_utc': {'readonly': True},
'end_time_utc': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'failure_reason': {'readonly': True},
'status_message': {'readonly': True},
'parent_job_id': {'readonly': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time_utc': {'key': 'startTimeUtc', 'type': 'rfc-1123'},
'end_time_utc': {'key': 'endTimeUtc', 'type': 'rfc-1123'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'failure_reason': {'key': 'failureReason', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'parent_job_id': {'key': 'parentJobId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(JobResponse, self).__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(msrest.serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of JobResponse objects.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["JobResponse"]] = None,
**kwargs
):
"""
:keyword value: The array of JobResponse objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.JobResponse]
"""
super(JobResponseListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class MatchedRoute(msrest.serialization.Model):
"""Routes that matched.
:ivar properties: Properties of routes that matched.
:vartype properties: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RouteProperties'},
}
def __init__(
self,
*,
properties: Optional["RouteProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of routes that matched.
:paramtype properties: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
"""
super(MatchedRoute, self).__init__(**kwargs)
self.properties = properties
class MessagingEndpointProperties(msrest.serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:ivar lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message.
See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype max_delivery_count: int
"""
super(MessagingEndpointProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class Name(msrest.serialization.Model):
"""Name of Iot Hub type.
:ivar value: IotHub type.
:vartype value: str
:ivar localized_value: Localized value of name.
:vartype localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
"""
:keyword value: IotHub type.
:paramtype value: str
:keyword localized_value: Localized value of name.
:paramtype localized_value: str
"""
super(Name, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class Operation(msrest.serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~azure.mgmt.iothub.v2019_11_04.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
"""
:keyword display: The object that represents the operation.
:paramtype display: ~azure.mgmt.iothub.v2019_11_04.models.OperationDisplay
"""
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(msrest.serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the IoT hub to check.
:vartype name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
"""
:keyword name: Required. The name of the IoT hub to check.
:paramtype name: str
"""
super(OperationInputs, self).__init__(**kwargs)
self.name = name
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RegistryStatistics(msrest.serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: long
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: long
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: long
"""
_validation = {
'total_device_count': {'readonly': True},
'enabled_device_count': {'readonly': True},
'disabled_device_count': {'readonly': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(msrest.serialization.Model):
"""Compilation error when evaluating route.
:ivar message: Route error message.
:vartype message: str
:ivar severity: Severity of the route error. Possible values include: "error", "warning".
:vartype severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity
:ivar location: Location where the route error happened.
:vartype location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(
self,
*,
message: Optional[str] = None,
severity: Optional[Union[str, "RouteErrorSeverity"]] = None,
location: Optional["RouteErrorRange"] = None,
**kwargs
):
"""
:keyword message: Route error message.
:paramtype message: str
:keyword severity: Severity of the route error. Possible values include: "error", "warning".
:paramtype severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity
:keyword location: Location where the route error happened.
:paramtype location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange
"""
super(RouteCompilationError, self).__init__(**kwargs)
self.message = message
self.severity = severity
self.location = location
class RouteErrorPosition(msrest.serialization.Model):
"""Position where the route error happened.
:ivar line: Line where the route error happened.
:vartype line: int
:ivar column: Column where the route error happened.
:vartype column: int
"""
_attribute_map = {
'line': {'key': 'line', 'type': 'int'},
'column': {'key': 'column', 'type': 'int'},
}
def __init__(
self,
*,
line: Optional[int] = None,
column: Optional[int] = None,
**kwargs
):
"""
:keyword line: Line where the route error happened.
:paramtype line: int
:keyword column: Column where the route error happened.
:paramtype column: int
"""
super(RouteErrorPosition, self).__init__(**kwargs)
self.line = line
self.column = column
class RouteErrorRange(msrest.serialization.Model):
"""Range of route errors.
:ivar start: Start where the route error happened.
:vartype start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
:ivar end: End where the route error happened.
:vartype end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'RouteErrorPosition'},
'end': {'key': 'end', 'type': 'RouteErrorPosition'},
}
def __init__(
self,
*,
start: Optional["RouteErrorPosition"] = None,
end: Optional["RouteErrorPosition"] = None,
**kwargs
):
"""
:keyword start: Start where the route error happened.
:paramtype start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
:keyword end: End where the route error happened.
:paramtype end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
"""
super(RouteErrorRange, self).__init__(**kwargs)
self.start = start
self.end = end
class RouteProperties(msrest.serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:ivar condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether a route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: Required. The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be
unique.
:paramtype name: str
:keyword source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:keyword condition: The condition that is evaluated to apply the routing rule. If no condition
is provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether a route is enabled.
:paramtype is_enabled: bool
"""
super(RouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class RoutingEndpoints(msrest.serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:ivar service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_queues:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties]
:ivar service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_topics:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties]
:ivar event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:vartype event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties]
:ivar storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:vartype storage_containers:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'},
'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'},
'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'},
'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'},
}
def __init__(
self,
*,
service_bus_queues: Optional[List["RoutingServiceBusQueueEndpointProperties"]] = None,
service_bus_topics: Optional[List["RoutingServiceBusTopicEndpointProperties"]] = None,
event_hubs: Optional[List["RoutingEventHubProperties"]] = None,
storage_containers: Optional[List["RoutingStorageContainerProperties"]] = None,
**kwargs
):
"""
:keyword service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:paramtype service_bus_queues:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties]
:keyword service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes
the messages to, based on the routing rules.
:paramtype service_bus_topics:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties]
:keyword event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:paramtype event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties]
:keyword storage_containers: The list of storage container endpoints that IoT hub routes
messages to, based on the routing rules.
:paramtype storage_containers:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties]
"""
super(RoutingEndpoints, self).__init__(**kwargs)
self.service_bus_queues = service_bus_queues
self.service_bus_topics = service_bus_topics
self.event_hubs = event_hubs
self.storage_containers = storage_containers
class RoutingEventHubProperties(msrest.serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: Required. The connection string of the event hub endpoint.
:vartype connection_string: str
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:vartype name: str
:ivar subscription_id: The subscription identifier of the event hub endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the event hub endpoint.
:vartype resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: Required. The connection string of the event hub endpoint.
:paramtype connection_string: str
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the event hub endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the event hub endpoint.
:paramtype resource_group: str
"""
super(RoutingEventHubProperties, self).__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingMessage(msrest.serialization.Model):
"""Routing message.
:ivar body: Body of routing message.
:vartype body: str
:ivar app_properties: App properties.
:vartype app_properties: dict[str, str]
:ivar system_properties: System properties.
:vartype system_properties: dict[str, str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'app_properties': {'key': 'appProperties', 'type': '{str}'},
'system_properties': {'key': 'systemProperties', 'type': '{str}'},
}
def __init__(
self,
*,
body: Optional[str] = None,
app_properties: Optional[Dict[str, str]] = None,
system_properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword body: Body of routing message.
:paramtype body: str
:keyword app_properties: App properties.
:paramtype app_properties: dict[str, str]
:keyword system_properties: System properties.
:paramtype system_properties: dict[str, str]
"""
super(RoutingMessage, self).__init__(**kwargs)
self.body = body
self.app_properties = app_properties
self.system_properties = system_properties
class RoutingProperties(msrest.serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:ivar endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:vartype endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints
:ivar routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:vartype routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties]
:ivar fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:vartype fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties
:ivar enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid.
:vartype enrichments: list[~azure.mgmt.iothub.v2019_11_04.models.EnrichmentProperties]
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': 'RoutingEndpoints'},
'routes': {'key': 'routes', 'type': '[RouteProperties]'},
'fallback_route': {'key': 'fallbackRoute', 'type': 'FallbackRouteProperties'},
'enrichments': {'key': 'enrichments', 'type': '[EnrichmentProperties]'},
}
def __init__(
self,
*,
endpoints: Optional["RoutingEndpoints"] = None,
routes: Optional[List["RouteProperties"]] = None,
fallback_route: Optional["FallbackRouteProperties"] = None,
enrichments: Optional[List["EnrichmentProperties"]] = None,
**kwargs
):
"""
:keyword endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:paramtype endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints
:keyword routes: The list of user-provided routing rules that the IoT hub uses to route
messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid
hubs and a maximum of 5 routing rules are allowed for free hubs.
:paramtype routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties]
:keyword fallback_route: The properties of the route that is used as a fall-back route when
none of the conditions specified in the 'routes' section are met. This is an optional
parameter. When this property is not set, the messages which do not meet any of the conditions
specified in the 'routes' section get routed to the built-in eventhub endpoint.
:paramtype fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties
:keyword enrichments: The list of user-provided enrichments that the IoT hub applies to
messages to be delivered to built-in and custom endpoints. See:
https://aka.ms/telemetryoneventgrid.
:paramtype enrichments: list[~azure.mgmt.iothub.v2019_11_04.models.EnrichmentProperties]
"""
super(RoutingProperties, self).__init__(**kwargs)
self.endpoints = endpoints
self.routes = routes
self.fallback_route = fallback_route
self.enrichments = enrichments
class RoutingServiceBusQueueEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: Required. The connection string of the service bus queue endpoint.
:vartype connection_string: str
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus queue endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus queue endpoint.
:vartype resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: Required. The connection string of the service bus queue endpoint.
:paramtype connection_string: str
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus queue endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus queue endpoint.
:paramtype resource_group: str
"""
super(RoutingServiceBusQueueEndpointProperties, self).__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingServiceBusTopicEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: Required. The connection string of the service bus topic endpoint.
:vartype connection_string: str
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus topic endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus topic endpoint.
:vartype resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: Required. The connection string of the service bus topic endpoint.
:paramtype connection_string: str
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus topic endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus topic endpoint.
:paramtype resource_group: str
"""
super(RoutingServiceBusTopicEndpointProperties, self).__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingStorageContainerProperties(msrest.serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: Required. The connection string of the storage account.
:vartype connection_string: str
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:vartype name: str
:ivar subscription_id: The subscription identifier of the storage account.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the storage account.
:vartype resource_group: str
:ivar container_name: Required. The name of storage container in the storage account.
:vartype container_name: str
:ivar file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:vartype file_name_format: str
:ivar batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:vartype batch_frequency_in_seconds: int
:ivar max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:vartype max_chunk_size_in_bytes: int
:ivar encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:vartype encoding: str or
~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'container_name': {'required': True},
'batch_frequency_in_seconds': {'maximum': 720, 'minimum': 60},
'max_chunk_size_in_bytes': {'maximum': 524288000, 'minimum': 10485760},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'file_name_format': {'key': 'fileNameFormat', 'type': 'str'},
'batch_frequency_in_seconds': {'key': 'batchFrequencyInSeconds', 'type': 'int'},
'max_chunk_size_in_bytes': {'key': 'maxChunkSizeInBytes', 'type': 'int'},
'encoding': {'key': 'encoding', 'type': 'str'},
}
def __init__(
self,
*,
connection_string: str,
name: str,
container_name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
file_name_format: Optional[str] = None,
batch_frequency_in_seconds: Optional[int] = None,
max_chunk_size_in_bytes: Optional[int] = None,
encoding: Optional[Union[str, "RoutingStorageContainerPropertiesEncoding"]] = None,
**kwargs
):
"""
:keyword connection_string: Required. The connection string of the storage account.
:paramtype connection_string: str
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the storage account.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the storage account.
:paramtype resource_group: str
:keyword container_name: Required. The name of storage container in the storage account.
:paramtype container_name: str
:keyword file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:paramtype file_name_format: str
:keyword batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:paramtype batch_frequency_in_seconds: int
:keyword max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage.
Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:paramtype max_chunk_size_in_bytes: int
:keyword encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:paramtype encoding: str or
~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerPropertiesEncoding
"""
super(RoutingStorageContainerProperties, self).__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
self.container_name = container_name
self.file_name_format = file_name_format
self.batch_frequency_in_seconds = batch_frequency_in_seconds
self.max_chunk_size_in_bytes = max_chunk_size_in_bytes
self.encoding = encoding
class RoutingTwin(msrest.serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:ivar tags: A set of tags. Twin Tags.
:vartype tags: any
:ivar properties:
:vartype properties: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwinProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'RoutingTwinProperties'},
}
def __init__(
self,
*,
tags: Optional[Any] = None,
properties: Optional["RoutingTwinProperties"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Twin Tags.
:paramtype tags: any
:keyword properties:
:paramtype properties: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwinProperties
"""
super(RoutingTwin, self).__init__(**kwargs)
self.tags = tags
self.properties = properties
class RoutingTwinProperties(msrest.serialization.Model):
"""RoutingTwinProperties.
:ivar desired: Twin desired properties.
:vartype desired: any
:ivar reported: Twin desired properties.
:vartype reported: any
"""
_attribute_map = {
'desired': {'key': 'desired', 'type': 'object'},
'reported': {'key': 'reported', 'type': 'object'},
}
def __init__(
self,
*,
desired: Optional[Any] = None,
reported: Optional[Any] = None,
**kwargs
):
"""
:keyword desired: Twin desired properties.
:paramtype desired: any
:keyword reported: Twin desired properties.
:paramtype reported: any
"""
super(RoutingTwinProperties, self).__init__(**kwargs)
self.desired = desired
self.reported = reported
class SharedAccessSignatureAuthorizationRule(msrest.serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:ivar key_name: Required. The name of the shared access policy.
:vartype key_name: str
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
:ivar rights: Required. The permissions assigned to the shared access policy. Possible values
include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:vartype rights: str or ~azure.mgmt.iothub.v2019_11_04.models.AccessRights
"""
_validation = {
'key_name': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'rights': {'key': 'rights', 'type': 'str'},
}
def __init__(
self,
*,
key_name: str,
rights: Union[str, "AccessRights"],
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs
):
"""
:keyword key_name: Required. The name of the shared access policy.
:paramtype key_name: str
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
:keyword rights: Required. The permissions assigned to the shared access policy. Possible
values include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect",
"RegistryRead, RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect",
"RegistryWrite, ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect,
DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite,
DeviceConnect", "RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect,
DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:paramtype rights: str or ~azure.mgmt.iothub.v2019_11_04.models.AccessRights
"""
super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs)
self.key_name = key_name
self.primary_key = primary_key
self.secondary_key = secondary_key
self.rights = rights
class SharedAccessSignatureAuthorizationRuleListResult(msrest.serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shared access policies.
:vartype value:
list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["SharedAccessSignatureAuthorizationRule"]] = None,
**kwargs
):
"""
:keyword value: The list of shared access policies.
:paramtype value:
list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
"""
super(SharedAccessSignatureAuthorizationRuleListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StorageEndpointProperties(msrest.serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:ivar sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:vartype sas_ttl_as_iso8601: ~datetime.timedelta
:ivar connection_string: Required. The connection string for the Azure Storage account to which
files are uploaded.
:vartype connection_string: str
:ivar container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:vartype container_name: str
"""
_validation = {
'connection_string': {'required': True},
'container_name': {'required': True},
}
_attribute_map = {
'sas_ttl_as_iso8601': {'key': 'sasTtlAsIso8601', 'type': 'duration'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
}
def __init__(
self,
*,
connection_string: str,
container_name: str,
sas_ttl_as_iso8601: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:paramtype sas_ttl_as_iso8601: ~datetime.timedelta
:keyword connection_string: Required. The connection string for the Azure Storage account to
which files are uploaded.
:paramtype connection_string: str
:keyword container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:paramtype container_name: str
"""
super(StorageEndpointProperties, self).__init__(**kwargs)
self.sas_ttl_as_iso8601 = sas_ttl_as_iso8601
self.connection_string = connection_string
self.container_name = container_name
class TagsResource(msrest.serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(TagsResource, self).__init__(**kwargs)
self.tags = tags
class TestAllRoutesInput(msrest.serialization.Model):
"""Input for testing all routes.
:ivar routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:vartype routing_source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
_attribute_map = {
'routing_source': {'key': 'routingSource', 'type': 'str'},
'message': {'key': 'message', 'type': 'RoutingMessage'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
*,
routing_source: Optional[Union[str, "RoutingSource"]] = None,
message: Optional["RoutingMessage"] = None,
twin: Optional["RoutingTwin"] = None,
**kwargs
):
"""
:keyword routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:paramtype routing_source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
super(TestAllRoutesInput, self).__init__(**kwargs)
self.routing_source = routing_source
self.message = message
self.twin = twin
class TestAllRoutesResult(msrest.serialization.Model):
"""Result of testing all routes.
:ivar routes: JSON-serialized array of matched routes.
:vartype routes: list[~azure.mgmt.iothub.v2019_11_04.models.MatchedRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[MatchedRoute]'},
}
def __init__(
self,
*,
routes: Optional[List["MatchedRoute"]] = None,
**kwargs
):
"""
:keyword routes: JSON-serialized array of matched routes.
:paramtype routes: list[~azure.mgmt.iothub.v2019_11_04.models.MatchedRoute]
"""
super(TestAllRoutesResult, self).__init__(**kwargs)
self.routes = routes
class TestRouteInput(msrest.serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:ivar route: Required. Route properties.
:vartype route: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
_validation = {
'route': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'RoutingMessage'},
'route': {'key': 'route', 'type': 'RouteProperties'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
*,
route: "RouteProperties",
message: Optional["RoutingMessage"] = None,
twin: Optional["RoutingTwin"] = None,
**kwargs
):
"""
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:keyword route: Required. Route properties.
:paramtype route: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
super(TestRouteInput, self).__init__(**kwargs)
self.message = message
self.route = route
self.twin = twin
class TestRouteResult(msrest.serialization.Model):
"""Result of testing one route.
:ivar result: Result of testing route. Possible values include: "undefined", "false", "true".
:vartype result: str or ~azure.mgmt.iothub.v2019_11_04.models.TestResultStatus
:ivar details: Detailed result of testing route.
:vartype details: ~azure.mgmt.iothub.v2019_11_04.models.TestRouteResultDetails
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'details': {'key': 'details', 'type': 'TestRouteResultDetails'},
}
def __init__(
self,
*,
result: Optional[Union[str, "TestResultStatus"]] = None,
details: Optional["TestRouteResultDetails"] = None,
**kwargs
):
"""
:keyword result: Result of testing route. Possible values include: "undefined", "false",
"true".
:paramtype result: str or ~azure.mgmt.iothub.v2019_11_04.models.TestResultStatus
:keyword details: Detailed result of testing route.
:paramtype details: ~azure.mgmt.iothub.v2019_11_04.models.TestRouteResultDetails
"""
super(TestRouteResult, self).__init__(**kwargs)
self.result = result
self.details = details
class TestRouteResultDetails(msrest.serialization.Model):
"""Detailed result of testing a route.
:ivar compilation_errors: JSON-serialized list of route compilation errors.
:vartype compilation_errors: list[~azure.mgmt.iothub.v2019_11_04.models.RouteCompilationError]
"""
_attribute_map = {
'compilation_errors': {'key': 'compilationErrors', 'type': '[RouteCompilationError]'},
}
def __init__(
self,
*,
compilation_errors: Optional[List["RouteCompilationError"]] = None,
**kwargs
):
"""
:keyword compilation_errors: JSON-serialized list of route compilation errors.
:paramtype compilation_errors:
list[~azure.mgmt.iothub.v2019_11_04.models.RouteCompilationError]
"""
super(TestRouteResultDetails, self).__init__(**kwargs)
self.compilation_errors = compilation_errors
class UserSubscriptionQuota(msrest.serialization.Model):
"""User subscription quota response.
:ivar id: IotHub type id.
:vartype id: str
:ivar type: Response type.
:vartype type: str
:ivar unit: Unit of IotHub type.
:vartype unit: str
:ivar current_value: Current number of IotHub type.
:vartype current_value: int
:ivar limit: Numerical limit on IotHub type.
:vartype limit: int
:ivar name: IotHub type.
:vartype name: ~azure.mgmt.iothub.v2019_11_04.models.Name
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'Name'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
unit: Optional[str] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["Name"] = None,
**kwargs
):
"""
:keyword id: IotHub type id.
:paramtype id: str
:keyword type: Response type.
:paramtype type: str
:keyword unit: Unit of IotHub type.
:paramtype unit: str
:keyword current_value: Current number of IotHub type.
:paramtype current_value: int
:keyword limit: Numerical limit on IotHub type.
:paramtype limit: int
:keyword name: IotHub type.
:paramtype name: ~azure.mgmt.iothub.v2019_11_04.models.Name
"""
super(UserSubscriptionQuota, self).__init__(**kwargs)
self.id = id
self.type = type
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UserSubscriptionQuotaListResult(msrest.serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UserSubscriptionQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["UserSubscriptionQuota"]] = None,
**kwargs
):
"""
:keyword value:
:paramtype value: list[~azure.mgmt.iothub.v2019_11_04.models.UserSubscriptionQuota]
"""
super(UserSubscriptionQuotaListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
|
Azure/azure-sdk-for-python
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models_py3.py
|
Python
|
mit
| 123,002 | 0.003382 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import signal
import subprocess
import time
import traceback
from contextlib import contextmanager
import psutil
from pants.base.build_environment import get_buildroot
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
logger = logging.getLogger(__name__)
@contextmanager
def swallow_psutil_exceptions():
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
class ProcessGroup(object):
"""Wraps a logical group of processes and provides convenient access to ProcessManager objects."""
def __init__(self, name):
self._name = name
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(name=process.name(), pid=process.pid, process_name=process.name())
def iter_processes(self, proc_filter=None):
proc_filter = proc_filter or (lambda x: True)
with swallow_psutil_exceptions():
for proc in (x for x in psutil.process_iter() if proc_filter(x)):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessMetadataManager(object):
""""Manages contextual, on-disk process metadata."""
FILE_WAIT_SEC = 10
WAIT_INTERVAL_SEC = .1
PID_DIR_NAME = '.pids' # TODO(kwlzn): Make this configurable.
class MetadataError(Exception): pass
class Timeout(Exception): pass
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _get_metadata_dir_by_name(cls, name):
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(get_buildroot(), cls.PID_DIR_NAME, name)
@classmethod
def _maybe_init_metadata_dir_by_name(cls, name):
"""Initialize the metadata directory for a named identity if it doesn't exist."""
safe_mkdir(cls._get_metadata_dir_by_name(name))
@classmethod
def read_metadata_by_name(cls, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
try:
file_path = os.path.join(cls._get_metadata_dir_by_name(name), metadata_key)
return cls._maybe_cast(read_file(file_path).strip(), caster)
except (IOError, OSError):
return None
@classmethod
def write_metadata_by_name(cls, name, metadata_key, metadata_value):
"""Write process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
cls._maybe_init_metadata_dir_by_name(name)
file_path = os.path.join(cls._get_metadata_dir_by_name(name), metadata_key)
safe_file_dump(file_path, metadata_value)
@classmethod
def _deadline_until(cls, closure, timeout, wait_interval=WAIT_INTERVAL_SEC):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
deadline = time.time() + timeout
while 1:
if closure():
return True
elif time.time() > deadline:
raise cls.Timeout('exceeded timeout of {} seconds for {}'.format(timeout, closure))
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(cls, filename, timeout=FILE_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
try:
return cls._deadline_until(file_waiter, timeout)
except cls.Timeout:
# Re-raise with a more helpful exception message.
raise cls.Timeout('exceeded timeout of {} seconds while waiting for file {} to appear'
.format(timeout, filename))
@classmethod
def await_metadata_by_name(cls, name, metadata_key, timeout, caster=None):
"""Block up to a timeout for process metadata to arrive on disk.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param int timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessMetadataManager.Timeout` on timeout.
"""
file_path = os.path.join(cls._get_metadata_dir_by_name(name), metadata_key)
cls._wait_for_file(file_path, timeout=timeout)
return cls.read_metadata_by_name(name, metadata_key, caster)
@classmethod
def purge_metadata_by_name(cls, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = cls._get_metadata_dir_by_name(name)
logging.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise cls.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
class ProcessManager(ProcessMetadataManager):
"""Subprocess/daemon management mixin/superclass. Not intended to be thread-safe."""
class InvalidCommandOutput(Exception): pass
class NonResponsiveProcess(Exception): pass
class ExecutionError(Exception):
def __init__(self, message, output=None):
super(ProcessManager.ExecutionError, self).__init__(message)
self.message = message
self.output = output
def __repr__(self):
return '{}(message={!r}, output={!r})'.format(type(self).__name__, self.message, self.output)
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(self, name, pid=None, socket=None, process_name=None, socket_type=int):
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param int pid: The process pid. Overrides fetching of the self.pid @property.
:param string socket: The socket metadata. Overrides fetching of the self.socket @property.
:param string process_name: The process name for cmdline executable name matching.
:param type socket_type: The type to be used for socket type casting (e.g. int).
"""
self._name = name
self._pid = pid
self._socket = socket
self._socket_type = socket_type
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name. If defined, this is compared to exe_name for stale pid checking."""
return self._process_name
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
with swallow_psutil_exceptions():
process = self._as_process()
if process:
return process.cmdline()
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self.read_metadata_by_name(self._name, 'pid', int)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type)
@classmethod
def get_subprocess_output(cls, *args):
"""Get the output of an executed command.
:param *args: An iterable representing the command to execute (e.g. ['ls', '-al']).
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
try:
return subprocess.check_output(*args, stderr=subprocess.STDOUT)
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, 'output', '').strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, 'pid', timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def write_pid(self, pid):
"""Write the current processes PID to the pidfile location"""
self.write_metadata_by_name(self._name, 'pid', str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, 'socket', str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, 'socket_{}'.format(socket_name), str(socket_info))
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process) or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE) or
# Check for stale pids.
(self.process_name and self.process_name != process.name()) or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks
for process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise self.MetadataError('cannot purge metadata for a running process!')
super(ProcessManager, self).purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logging.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
pass
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
pass
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
pass
|
dbentley/pants
|
src/python/pants/pantsd/process_manager.py
|
Python
|
apache-2.0
| 18,348 | 0.011609 |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio SAT_OBSERVER module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the sat_observer namespace
try:
# this might fail if the module is python-only
from sat_observer_swig import *
except ImportError:
pass
# import any pure python here
#
|
MLAB-project/satellite-observer
|
gr-sat_observer/python/__init__.py
|
Python
|
gpl-3.0
| 1,155 | 0.002597 |
from datetime import datetime
import subprocess
import logging
import os
from flask import Flask, current_app
from flask.ext.sqlalchemy import SQLAlchemy
from jinja2 import FileSystemLoader
from werkzeug.local import LocalProxy
import yaml
from flask.ext.cache import Cache
from bitcoinrpc import AuthServiceProxy
root = os.path.abspath(os.path.dirname(__file__) + '/../')
db = SQLAlchemy()
cache = Cache()
coinserv = LocalProxy(
lambda: getattr(current_app, 'rpc_connection', None))
def create_app(config='/config.yml', celery=False):
# initialize our flask application
app = Flask(__name__, static_folder='../static', static_url_path='/static')
# set our template path and configs
app.jinja_loader = FileSystemLoader(os.path.join(root, 'templates'))
config_vars = yaml.load(open(root + config))
# inject all the yaml configs
app.config.update(config_vars)
app.logger.info(app.config)
app.rpc_connection = AuthServiceProxy(
"http://{0}:{1}@{2}:{3}/"
.format(app.config['coinserv']['username'],
app.config['coinserv']['password'],
app.config['coinserv']['address'],
app.config['coinserv']['port'],
pool_kwargs=dict(maxsize=app.config.get('maxsize', 10))))
# add the debug toolbar if we're in debug mode...
if app.config['DEBUG']:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
app.logger.handlers[0].setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(filename)s:%(lineno)d]'))
# register all our plugins
db.init_app(app)
cache_config = {'CACHE_TYPE': 'redis'}
cache_config.update(app.config.get('main_cache', {}))
cache.init_app(app, config=cache_config)
if not celery:
hdlr = logging.FileHandler(app.config.get('log_file', 'webserver.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
app.logger.addHandler(hdlr)
app.logger.setLevel(logging.INFO)
# try and fetch the git version information
try:
output = subprocess.check_output("git show -s --format='%ci %h'",
shell=True).strip().rsplit(" ", 1)
app.config['hash'] = output[1]
app.config['revdate'] = output[0]
# celery won't work with this, so set some default
except Exception:
app.config['hash'] = ''
app.config['revdate'] = ''
# filters for jinja
@app.template_filter('time_ago')
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.utcnow()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time, datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff/7) + " weeks ago"
if day_diff < 365:
return str(day_diff/30) + " months ago"
return str(day_diff/365) + " years ago"
from .tasks import celery
celery.conf.update(app.config)
# Route registration
# =========================================================================
from . import views, models, api, rpc_views
app.register_blueprint(views.main)
app.register_blueprint(api.api, url_prefix='/api')
return app
|
fscook/simplevert
|
simplecoin/__init__.py
|
Python
|
mit
| 4,347 | 0.00046 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (http://tiny.be). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import avanzosc_french_amortization
import wizard
|
avanzosc/avanzosc6.1
|
avanzosc_french_amortization/__init__.py
|
Python
|
agpl-3.0
| 1,037 | 0.001929 |
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RankedChoiceRestaurants.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
kc0bfv/RankedChoiceRestaurants
|
manage.py
|
Python
|
gpl-3.0
| 822 | 0.001217 |
#!/usr/bin/env python
##########################################################################
# run/ec2-setup/spot.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Matthias Stumpp <mstumpp@gmail.com>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import boto3
import time
import json
import datetime
import sys
with open('config.json') as data_file:
data = json.load(data_file)
client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
job_id = int(time.time())
blockMappings = [{'DeviceName': '/dev/sda1',
'Ebs': {
'VolumeSize': 8,
'DeleteOnTermination': True,
'VolumeType': 'gp2'
}
}]
if data["VOL_SNAPSHOT_ID"]:
blockMappings.append(
{
'DeviceName': data["DEVICE"],
'Ebs': {
'SnapshotId': data["VOL_SNAPSHOT_ID"],
'DeleteOnTermination': True,
'VolumeType': 'gp2'
}
})
response = client.request_spot_instances(SpotPrice=data["SPOT_PRICE"],
InstanceCount=data["COUNT"],
Type=data["TYPE"],
#ValidFrom=datetime.datetime(2015, 10, 11, 18, 10, 00),
ValidUntil=datetime.datetime(2015, 10, 11, 19, 37, 00),
LaunchSpecification={
'ImageId' : data["AMI_ID"],
'KeyName' : data["EC2_KEY_HANDLE"],
'InstanceType' : data["INSTANCE_TYPE"],
'SecurityGroups' : [ data["SECGROUP_HANDLE"] ],
'Placement' : { 'AvailabilityZone': data["ZONE"]
'BlockDeviceMappings' : blockMappings}
})
request_ids = []
for request in response['SpotInstanceRequests']:
request_ids.append(request['SpotInstanceRequestId'])
fulfilled_instances = []
loop = True;
print "waiting for instances to get fulfilled..."
while loop:
requests = client.describe_spot_instance_requests(SpotInstanceRequestIds=request_ids)
for request in requests['SpotInstanceRequests']:
if request['State'] in ['closed', 'cancelled', 'failed']:
print request['SpotInstanceRequestId'] + " " + request['State']
loop = False
break; # TODO(ms) ensure running instances are terminated
if 'InstanceId' in request and request['InstanceId'] not in running_instances:
fulfilled_instances.append(request['InstanceId'])
print request['InstanceId'] + " running..."
if len(fulfilled_instances) == int(data["COUNT"]):
print 'all requested instances are fulfilled'
break;
time.sleep(5)
if loop == False:
print "unable to fulfill all requested instances... aborting..."
sys.exit();
# add tag to each instance
for instance in fulfilled_instances:
instance.create_tags(Tags=[{'Key': 'JobId', 'Value': str(job_id)}])
# ensure all instances are running
loop = True;
while loop:
loop = False
response = client.describe_instance_status(InstanceIds=running_instances, IncludeAllInstances=True)
for status in response['InstanceStatuses']:
if status['InstanceState']['Name'] != 'running':
loop = True
print "all instances are running..."
print str(data["COUNT"]) + " instances up and running! JobId: " + str(job_id)
##########################################################################
|
manpen/thrill
|
run/ec2-setup/spot.py
|
Python
|
bsd-2-clause
| 3,829 | 0.00888 |
import logging
from unittest import TestCase
try:
# Python 3 >
from unittest.mock import patch # noqa: F401
except ImportError:
from mock import patch # noqa: F401
try:
# Python 3.3 >
from unittest.mock import MagicMock, ANY # noqa: F401
except ImportError:
from mock import MagicMock, ANY # noqa: F401
import requests
from reviewrot.pagurestack import PagureService
from . import mock_pagure
PATH = 'reviewrot.pagurestack.'
# Disable logging to avoid messing up test output
logging.disable(logging.CRITICAL)
class PagureTest(TestCase):
"""
This class represents the Pagure test cases
"""
def setUp(self):
"""
Setting up the testing environment
"""
# Mock Last Comment
self.mock_last_comment = MagicMock()
# Mock Datetime
self.mock_utcfromtimestamp = MagicMock()
self.mock_utcfromtimestamp.strftime.return_value = 'mock_date'
# Mock Datetime ValueError
self.mock_utcfromtimestamp_error = MagicMock()
self.mock_utcfromtimestamp_error.strftime.return_value = '2019-01-05 12:12:12'
# Mock Age
self.mock_age = MagicMock()
self.mock_age.state = 'mock_age_state'
@patch(PATH + 'urllib')
@patch(PATH + 'hashlib')
@patch(PATH + 'PagureService._call_api')
def test_avatar_with_url(self, mock_call_api, mock_hashlib, mock_urllib):
"""
Tests '_avatar' function where we have an avatar url
"""
# Set up mock return values and side effects
mock_response = {'user': {'avatar_url': 'dummy_avatar_url'}}
mock_url_parts = MagicMock()
mock_url_parts.return_value = 'mock_url_parts'
mock_url_parts.query = 'mock_url_parts_query'
mock_url_query = MagicMock()
mock_url_query.return_value = 'mock_url_query'
mock_urllib.parse.urlparse.return_value = mock_url_parts
mock_urllib.parse.parse_qs.return_value = mock_url_query
mock_urllib.parse.urlencode.return_value = mock_url_query
mock_urllib.parse.urlunparse.return_value = "dummy_avatar_url"
mock_call_api.return_value = mock_response
# Call function
response = PagureService()._avatar(
username='dummy_user'
)
# Validate function calls and response
self.assertEqual(response, 'dummy_avatar_url')
mock_urllib.parse.urlparse.assert_called_with('dummy_avatar_url')
mock_urllib.parse.parse_qs.assert_called_with('mock_url_parts_query')
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/user/dummy_user',
ssl_verify=True)
mock_url_query.update.assert_called_with({'s': 64, 'd': 'retro'})
mock_urllib.parse.urlencode.assert_called_with(mock_url_query, doseq=True)
mock_urllib.parse.urlunparse.assert_called_with(
mock_url_parts[:4] + (mock_url_query,) + mock_url_parts[5:]
)
mock_hashlib.assert_not_called()
@patch(PATH + 'urllib')
@patch(PATH + 'hashlib')
@patch(PATH + 'PagureService._call_api')
def test_avatar_without_url(self, mock_call_api, mock_hashlib, mock_urllib):
"""
Tests '_avatar' function where we don't have an avatar url
"""
# Set up mock return values and side effects
mock_hash = MagicMock()
mock_hash.hexdigest.return_value = 'mock_idx'
mock_call_api.return_value = {}
mock_urllib.parse.urlencode.return_value = "mock_query"
mock_hashlib.sha256.return_value = mock_hash
# Call function
response = PagureService()._avatar(
username='dummy_user'
)
# Validate function calls and response
mock_urllib.parse.urlencode.assert_called_with({'s': 64, 'd': 'retro'})
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/user/dummy_user',
ssl_verify=True
)
mock_hashlib.sha256.assert_called_once_with(
b'http://dummy_user.id.fedoraproject.org/')
self.assertEqual(
response,
'https://seccdn.libravatar.org/avatar/mock_idx?mock_query'
)
mock_urllib.parse.urlparse.assert_not_called()
mock_urllib.parse.parse_qs.assert_not_called()
mock_urllib.parse.urlunparse.assert_not_called()
@patch(PATH + 'urllib')
@patch(PATH + 'hashlib')
@patch(PATH + 'PagureService._call_api')
def test_avatar_ValueError(self, mock_call_api, mock_hashlib, mock_urllib):
"""
Tests '_avatar' function where we get a HTTPError and raise a ValueError
"""
# Set up mock return values and side effects
mock_call_api.side_effect = requests.exceptions.HTTPError
# Call function
with self.assertRaises(ValueError):
PagureService()._avatar(
username='dummy_user'
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/user/dummy_user',
ssl_verify=True
)
mock_urllib.parse.urlencode.assert_not_called()
mock_hashlib.sha256.assert_not_called()
mock_urllib.parse.parse_qs.assert_not_called()
mock_urllib.parse.urlunparse.assert_not_called()
@patch(PATH + 'LastComment')
@patch(PATH + 'datetime')
def test_last_comment_no_comments(self, mock_datetime, mock_LastComment):
"""
Tests 'get_last_comment' with no comments
"""
response = PagureService().get_last_comment(
res={'comments': {}}
)
mock_datetime.utcfromtimestamp.assert_not_called()
mock_LastComment.assert_not_called()
self.assertEqual(None, response)
@patch(PATH + 'LastComment')
@patch(PATH + 'datetime')
def test_last_comment(self, mock_datetime, mock_LastComment):
"""
Tests 'get_last_comment'
"""
# Set up mock return values and side effects
mock_datetime.utcfromtimestamp.return_value = 'mock_date'
mock_LastComment.return_value = 'mock_return_value'
# Call function
response = PagureService().get_last_comment(
res={'comments': [{
'date_created': '1',
'comment': 'mock_comment',
'user': {'name': 'mock_name'}
}]}
)
# Validate function calls and response
mock_datetime.utcfromtimestamp.assert_called_with(1)
mock_LastComment.assert_called_with(
author='mock_name',
body='mock_comment',
created_at='mock_date'
)
self.assertEqual('mock_return_value', response)
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_with_repo(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function with repos and:
* no last comment,
* check_request_state returns True
* no errors,
* no namespace
"""
# Set up mock return values and side effects
mock_check_request_state.return_value = True
mock_avatar.return_value = 'dummy_avatar'
mock_get_last_comment.return_value = 'dummy_last_comment'
mock_datetime.utcfromtimestamp.return_value = self.mock_utcfromtimestamp
mock_datetime.strptime.return_value = 'mock_strptime_date'
mock_PagureReview.return_value = '1'
mock_call_api.return_value = mock_pagure.mock_api_call_return_value()
# Call function
response = PagureService().request_reviews(
user_name='dummy_user',
repo_name='dummy_repo'
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/dummy_repo/pull-requests',
ssl_verify=True)
mock_get_last_comment.assert_called_with(
mock_call_api.return_value['requests'][0]
)
mock_datetime.strptime.assert_called_with('mock_date', '%Y-%m-%d %H:%M:%S.%f')
mock_has_new_comments.assert_not_called()
mock_check_request_state.assert_called_with('mock_strptime_date', None)
mock_avatar.assert_called_with(
'dummy_user', ssl_verify=True
)
mock_PagureReview.assert_called_with(
user='dummy_user',
title='dummy_title',
url='https://pagure.io/mock_repo_reference/pull-request/mock_id',
time='mock_strptime_date',
updated_time='mock_strptime_date',
comments=3,
image='dummy_avatar',
last_comment='dummy_last_comment',
project_name='mock_repo_reference',
project_url='https://pagure.io/mock_repo_reference'
)
self.assertEqual(response, ['1'])
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_no_repo(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function without repos and:
* no last comment,
* check_request_state returns True
* _call_api raises a HTTPError,
* no namespace
"""
# Set up mock return values and side effects
mock_call_api.side_effect = requests.exceptions.HTTPError
# Call function
with self.assertRaises(Exception):
PagureService().request_reviews(
user_name='dummy_user'
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/pull-requests',
ssl_verify=True
)
mock_get_last_comment.assert_not_called()
mock_datetime.strptime.assert_not_called()
mock_has_new_comments.assert_not_called()
mock_check_request_state.assert_not_called()
mock_avatar.assert_not_called()
mock_PagureReview.assert_not_called()
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_with_repo_last_comment(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function with repos and:
* with last comment,
* check_request_state returns True
* no errors,
* no namespace
"""
# Set up mock return values and side effects
mock_check_request_state.return_value = True
mock_avatar.return_value = 'dummy_avatar'
self.mock_last_comment.created_at = 'dummy_date'
mock_get_last_comment.return_value = self.mock_last_comment
mock_datetime.utcfromtimestamp.return_value = self.mock_utcfromtimestamp
mock_datetime.strptime.return_value = 'mock_strptime_date'
mock_PagureReview.return_value = '1'
mock_call_api.return_value = mock_pagure.mock_api_call_return_value()
# Call function
response = PagureService().request_reviews(
user_name='dummy_user',
repo_name='dummy_repo',
show_last_comment=True
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/dummy_repo/pull-requests',
ssl_verify=True
)
mock_get_last_comment.assert_called_with(
mock_call_api.return_value['requests'][0]
)
mock_datetime.strptime.assert_called_with('mock_date', '%Y-%m-%d %H:%M:%S.%f')
mock_has_new_comments.assert_called_with(
'dummy_date', True
)
mock_check_request_state.assert_called_with('mock_strptime_date', None)
mock_avatar.assert_not_called()
mock_PagureReview.assert_not_called()
self.assertEqual(response, [])
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_with_repo_with_age(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function with repos and:
* no last comment,
* check_request_state returns False
* no errors,
* no namespace
"""
# Set up mock return values and side effects
mock_check_request_state.return_value = False
mock_get_last_comment.return_value = 'dummy_last_comment'
mock_datetime.utcfromtimestamp.return_value = self.mock_utcfromtimestamp_error
mock_datetime.strptime.side_effect = [ValueError, 'mock_date', 'mock_date']
mock_PagureReview.return_value = '1'
mock_call_api.return_value = mock_pagure.mock_api_call_return_value_age()
# Call function
response = PagureService().request_reviews(
user_name='dummy_user',
repo_name='dummy_repo',
age=self.mock_age
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/dummy_repo/pull-requests',
ssl_verify=True
)
mock_get_last_comment.assert_called_with(
mock_call_api.return_value['requests'][0]
)
mock_datetime.strptime.assert_any_call(
'2019-01-05 12:12:12', '%Y-%m-%d %H:%M:%S')
mock_has_new_comments.assert_not_called()
mock_check_request_state.assert_called_with('mock_date', self.mock_age)
mock_avatar.assert_not_called()
mock_PagureReview.assert_not_called()
self.assertEqual(response, [])
|
nirzari/review-rot
|
test/pagure_tests/test_pagure.py
|
Python
|
gpl-3.0
| 16,196 | 0.000679 |
"""Handle nice json response for error."""
from flask import jsonify
def not_found(e):
"""Send a correct json for 404."""
response = jsonify({'status': 404, 'error': 'Not found',
'message': 'Invalid resource URI'})
response.status_code = 404
return response
def method_not_supported(e):
"""Send a correct json for 405."""
response = jsonify({'status': 405, 'error': 'Method not supported',
'message': 'This method is not supported'})
response.status_code = 405
return response
def internal_server_error(e):
"""Send a correct json for 500."""
response = jsonify({'status': 500, 'error': 'Internal server error',
'message': e.args[0]})
response.status_code = 500
return response
|
alkivi-sas/nefario-api
|
nefario/errors.py
|
Python
|
lgpl-3.0
| 804 | 0 |
"""
Method agnostic utility functions for linear progamming
"""
import numpy as np
import scipy.sparse as sps
from warnings import warn
from .optimize import OptimizeWarning
from scipy.optimize._remove_redundancy import (
_remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense
)
from collections import namedtuple
_LPProblem = namedtuple('_LPProblem', 'c A_ub b_ub A_eq b_eq bounds x0')
_LPProblem.__new__.__defaults__ = (None,) * 6 # make c the only required arg
_LPProblem.__doc__ = \
""" Represents a linear-programming problem.
Attributes
----------
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Notes
-----
This namedtuple supports 2 ways of initialization:
>>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
>>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
Note that only ``c`` is a required argument here, whereas all other arguments
``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
default values of None.
For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
>>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
"""
def _check_sparse_inputs(options, A_ub, A_eq):
"""
Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
optional sparsity variables.
Parameters
----------
A_ub : 2-D array, optional
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
inequality constraints at ``x``.
A_eq : 2-D array, optional
2-D array such that ``A_eq @ x`` gives the values of the equality
constraints at ``x``.
options : dict
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
Returns
-------
A_ub : 2-D array, optional
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
inequality constraints at ``x``.
A_eq : 2-D array, optional
2-D array such that ``A_eq @ x`` gives the values of the equality
constraints at ``x``.
options : dict
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
"""
# This is an undocumented option for unit testing sparse presolve
_sparse_presolve = options.pop('_sparse_presolve', False)
if _sparse_presolve and A_eq is not None:
A_eq = sps.coo_matrix(A_eq)
if _sparse_presolve and A_ub is not None:
A_ub = sps.coo_matrix(A_ub)
sparse = options.get('sparse', False)
if not sparse and (sps.issparse(A_eq) or sps.issparse(A_ub)):
options['sparse'] = True
warn("Sparse constraint matrix detected; setting 'sparse':True.",
OptimizeWarning, stacklevel=4)
return options, A_ub, A_eq
def _format_A_constraints(A, n_x, sparse_lhs=False):
"""Format the left hand side of the constraints to a 2-D array
Parameters
----------
A : 2-D array
2-D array such that ``A @ x`` gives the values of the upper-bound
(in)equality constraints at ``x``.
n_x : int
The number of variables in the linear programming problem.
sparse_lhs : bool
Whether either of `A_ub` or `A_eq` are sparse. If true return a
coo_matrix instead of a numpy array.
Returns
-------
np.ndarray or sparse.coo_matrix
2-D array such that ``A @ x`` gives the values of the upper-bound
(in)equality constraints at ``x``.
"""
if sparse_lhs:
return sps.coo_matrix(
(0, n_x) if A is None else A, dtype=float, copy=True
)
elif A is None:
return np.zeros((0, n_x), dtype=float)
else:
return np.array(A, dtype=float, copy=True)
def _format_b_constraints(b):
"""Format the upper bounds of the constraints to a 1-D array
Parameters
----------
b : 1-D array
1-D array of values representing the upper-bound of each (in)equality
constraint (row) in ``A``.
Returns
-------
1-D np.array
1-D array of values representing the upper-bound of each (in)equality
constraint (row) in ``A``.
"""
if b is None:
return np.array([], dtype=float)
b = np.array(b, dtype=float, copy=True).squeeze()
return b if b.size != 1 else b.reshape((-1))
def _clean_inputs(lp):
"""
Given user inputs for a linear programming problem, return the
objective vector, upper bound constraints, equality constraints,
and simple bounds in a preferred format.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
"""
c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = lp
if c is None:
raise TypeError
try:
c = np.array(c, dtype=np.float, copy=True).squeeze()
except ValueError:
raise TypeError(
"Invalid input for linprog: c must be a 1-D array of numerical "
"coefficients")
else:
# If c is a single value, convert it to a 1-D array.
if c.size == 1:
c = c.reshape((-1))
n_x = len(c)
if n_x == 0 or len(c.shape) != 1:
raise ValueError(
"Invalid input for linprog: c must be a 1-D array and must "
"not have more than one non-singleton dimension")
if not(np.isfinite(c).all()):
raise ValueError(
"Invalid input for linprog: c must not contain values "
"inf, nan, or None")
sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
try:
A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
except ValueError:
raise TypeError(
"Invalid input for linprog: A_ub must be a 2-D array "
"of numerical values")
else:
n_ub = A_ub.shape[0]
if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
raise ValueError(
"Invalid input for linprog: A_ub must have exactly two "
"dimensions, and the number of columns in A_ub must be "
"equal to the size of c")
if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
raise ValueError(
"Invalid input for linprog: A_ub must not contain values "
"inf, nan, or None")
try:
b_ub = _format_b_constraints(b_ub)
except ValueError:
raise TypeError(
"Invalid input for linprog: b_ub must be a 1-D array of "
"numerical values, each representing the upper bound of an "
"inequality constraint (row) in A_ub")
else:
if b_ub.shape != (n_ub,):
raise ValueError(
"Invalid input for linprog: b_ub must be a 1-D array; b_ub "
"must not have more than one non-singleton dimension and "
"the number of rows in A_ub must equal the number of values "
"in b_ub")
if not(np.isfinite(b_ub).all()):
raise ValueError(
"Invalid input for linprog: b_ub must not contain values "
"inf, nan, or None")
try:
A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
except ValueError:
raise TypeError(
"Invalid input for linprog: A_eq must be a 2-D array "
"of numerical values")
else:
n_eq = A_eq.shape[0]
if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
raise ValueError(
"Invalid input for linprog: A_eq must have exactly two "
"dimensions, and the number of columns in A_eq must be "
"equal to the size of c")
if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
raise ValueError(
"Invalid input for linprog: A_eq must not contain values "
"inf, nan, or None")
try:
b_eq = _format_b_constraints(b_eq)
except ValueError:
raise TypeError(
"Invalid input for linprog: b_eq must be a 1-D array of "
"numerical values, each representing the upper bound of an "
"inequality constraint (row) in A_eq")
else:
if b_eq.shape != (n_eq,):
raise ValueError(
"Invalid input for linprog: b_eq must be a 1-D array; b_eq "
"must not have more than one non-singleton dimension and "
"the number of rows in A_eq must equal the number of values "
"in b_eq")
if not(np.isfinite(b_eq).all()):
raise ValueError(
"Invalid input for linprog: b_eq must not contain values "
"inf, nan, or None")
# x0 gives a (optional) starting solution to the solver. If x0 is None,
# skip the checks. Initial solution will be generated automatically.
if x0 is not None:
try:
x0 = np.array(x0, dtype=float, copy=True).squeeze()
except ValueError:
raise TypeError(
"Invalid input for linprog: x0 must be a 1-D array of "
"numerical coefficients")
if x0.ndim == 0:
x0 = x0.reshape((-1))
if len(x0) == 0 or x0.ndim != 1:
raise ValueError(
"Invalid input for linprog: x0 should be a 1-D array; it "
"must not have more than one non-singleton dimension")
if not x0.size == c.size:
raise ValueError(
"Invalid input for linprog: x0 and c should contain the "
"same number of elements")
if not np.isfinite(x0).all():
raise ValueError(
"Invalid input for linprog: x0 must not contain values "
"inf, nan, or None")
# "If a sequence containing a single tuple is provided, then min and max
# will be applied to all variables in the problem."
# linprog doesn't treat this right: it didn't accept a list with one tuple
# in it
try:
if isinstance(bounds, str):
raise TypeError
if bounds is None or len(bounds) == 0:
bounds = [(0, None)] * n_x
elif len(bounds) == 1:
b = bounds[0]
if len(b) != 2:
raise ValueError(
"Invalid input for linprog: exactly one lower bound and "
"one upper bound must be specified for each element of x")
bounds = [b] * n_x
elif len(bounds) == n_x:
try:
len(bounds[0])
except BaseException:
bounds = [(bounds[0], bounds[1])] * n_x
for i, b in enumerate(bounds):
if len(b) != 2:
raise ValueError(
"Invalid input for linprog, bound " +
str(i) +
" " +
str(b) +
": exactly one lower bound and one upper bound must "
"be specified for each element of x")
elif (len(bounds) == 2 and np.isreal(bounds[0])
and np.isreal(bounds[1])):
bounds = [(bounds[0], bounds[1])] * n_x
else:
raise ValueError(
"Invalid input for linprog: exactly one lower bound and one "
"upper bound must be specified for each element of x")
clean_bounds = [] # also creates a copy so user's object isn't changed
for i, b in enumerate(bounds):
if b[0] is not None and b[1] is not None and b[0] > b[1]:
raise ValueError(
"Invalid input for linprog, bound " +
str(i) +
" " +
str(b) +
": a lower bound must be less than or equal to the "
"corresponding upper bound")
if b[0] == np.inf:
raise ValueError(
"Invalid input for linprog, bound " +
str(i) +
" " +
str(b) +
": infinity is not a valid lower bound")
if b[1] == -np.inf:
raise ValueError(
"Invalid input for linprog, bound " +
str(i) +
" " +
str(b) +
": negative infinity is not a valid upper bound")
lb = float(b[0]) if b[0] is not None and b[0] != -np.inf else None
ub = float(b[1]) if b[1] is not None and b[1] != np.inf else None
clean_bounds.append((lb, ub))
bounds = clean_bounds
except ValueError as e:
if "could not convert string to float" in e.args[0]:
raise TypeError
else:
raise e
except TypeError as e:
print(e)
raise TypeError(
"Invalid input for linprog: bounds must be a sequence of "
"(min,max) pairs, each defining bounds on an element of x ")
return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0)
def _presolve(lp, rr, tol=1e-9):
"""
Given inputs for a linear programming problem in preferred format,
presolve the problem: identify trivial infeasibilities, redundancies,
and unboundedness, tighten bounds where possible, and eliminate fixed
variables.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
rr : bool
If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
Set False if ``A_eq`` is known to be of full row rank, or if you are
looking for a potential speedup (at the expense of reliability).
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
Returns
-------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
c0 : 1D array
Constant term in objective function due to fixed (and eliminated)
variables.
x : 1D array
Solution vector (when the solution is trivial and can be determined
in presolve)
undo: list of tuples
(index, value) pairs that record the original index and fixed value
for each variable removed from the problem
complete: bool
Whether the solution is complete (solved or determined to be infeasible
or unbounded in presolve)
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
References
----------
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
"""
# ideas from Reference [5] by Andersen and Andersen
# however, unlike the reference, this is performed before converting
# problem to standard form
# There are a few advantages:
# * artificial variables have not been added, so matrices are smaller
# * bounds have not been converted to constraints yet. (It is better to
# do that after presolve because presolve may adjust the simple bounds.)
# There are many improvements that can be made, namely:
# * implement remaining checks from [5]
# * loop presolve until no additional changes are made
# * implement additional efficiency improvements in redundancy removal [2]
c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = lp
undo = [] # record of variables eliminated from problem
# constant term in cost function may be added if variables are eliminated
c0 = 0
complete = False # complete is True if detected infeasible/unbounded
x = np.zeros(c.shape) # this is solution vector if completed in presolve
status = 0 # all OK unless determined otherwise
message = ""
# Standard form for bounds (from _clean_inputs) is list of tuples
# but NumPy array is more convenient here
# In retrospect, numpy array should have been the standard
bounds = np.array(bounds)
lb = bounds[:, 0]
ub = bounds[:, 1]
lb[np.equal(lb, None)] = -np.inf
ub[np.equal(ub, None)] = np.inf
bounds = bounds.astype(float)
lb = lb.astype(float)
ub = ub.astype(float)
m_eq, n = A_eq.shape
m_ub, n = A_ub.shape
if (sps.issparse(A_eq)):
A_eq = A_eq.tolil()
A_ub = A_ub.tolil()
def where(A):
return A.nonzero()
vstack = sps.vstack
else:
where = np.where
vstack = np.vstack
# zero row in equality constraints
zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
if np.any(zero_row):
if np.any(
np.logical_and(
zero_row,
np.abs(b_eq) > tol)): # test_zero_row_1
# infeasible if RHS is not zero
status = 2
message = ("The problem is (trivially) infeasible due to a row "
"of zeros in the equality constraint matrix with a "
"nonzero corresponding constraint value.")
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
else: # test_zero_row_2
# if RHS is zero, we can eliminate this equation entirely
A_eq = A_eq[np.logical_not(zero_row), :]
b_eq = b_eq[np.logical_not(zero_row)]
# zero row in inequality constraints
zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
if np.any(zero_row):
if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1
# infeasible if RHS is less than zero (because LHS is zero)
status = 2
message = ("The problem is (trivially) infeasible due to a row "
"of zeros in the equality constraint matrix with a "
"nonzero corresponding constraint value.")
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
else: # test_zero_row_2
# if LHS is >= 0, we can eliminate this constraint entirely
A_ub = A_ub[np.logical_not(zero_row), :]
b_ub = b_ub[np.logical_not(zero_row)]
# zero column in (both) constraints
# this indicates that a variable isn't constrained and can be removed
A = vstack((A_eq, A_ub))
if A.shape[0] > 0:
zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
# variable will be at upper or lower bound, depending on objective
x[np.logical_and(zero_col, c < 0)] = ub[
np.logical_and(zero_col, c < 0)]
x[np.logical_and(zero_col, c > 0)] = lb[
np.logical_and(zero_col, c > 0)]
if np.any(np.isinf(x)): # if an unconstrained variable has no bound
status = 3
message = ("If feasible, the problem is (trivially) unbounded "
"due to a zero column in the constraint matrices. If "
"you wish to check whether the problem is infeasible, "
"turn presolve off.")
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
# variables will equal upper/lower bounds will be removed later
lb[np.logical_and(zero_col, c < 0)] = ub[
np.logical_and(zero_col, c < 0)]
ub[np.logical_and(zero_col, c > 0)] = lb[
np.logical_and(zero_col, c > 0)]
# row singleton in equality constraints
# this fixes a variable and removes the constraint
singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
rows = where(singleton_row)[0]
cols = where(A_eq[rows, :])[1]
if len(rows) > 0:
for row, col in zip(rows, cols):
val = b_eq[row] / A_eq[row, col]
if not lb[col] - tol <= val <= ub[col] + tol:
# infeasible if fixed value is not within bounds
status = 2
message = ("The problem is (trivially) infeasible because a "
"singleton row in the equality constraints is "
"inconsistent with the bounds.")
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
else:
# sets upper and lower bounds at that fixed value - variable
# will be removed later
lb[col] = val
ub[col] = val
A_eq = A_eq[np.logical_not(singleton_row), :]
b_eq = b_eq[np.logical_not(singleton_row)]
# row singleton in inequality constraints
# this indicates a simple bound and the constraint can be removed
# simple bounds may be adjusted here
# After all of the simple bound information is combined here, get_Abc will
# turn the simple bounds into constraints
singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
cols = where(A_ub[singleton_row, :])[1]
rows = where(singleton_row)[0]
if len(rows) > 0:
for row, col in zip(rows, cols):
val = b_ub[row] / A_ub[row, col]
if A_ub[row, col] > 0: # upper bound
if val < lb[col] - tol: # infeasible
complete = True
elif val < ub[col]: # new upper bound
ub[col] = val
else: # lower bound
if val > ub[col] + tol: # infeasible
complete = True
elif val > lb[col]: # new lower bound
lb[col] = val
if complete:
status = 2
message = ("The problem is (trivially) infeasible because a "
"singleton row in the upper bound constraints is "
"inconsistent with the bounds.")
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
A_ub = A_ub[np.logical_not(singleton_row), :]
b_ub = b_ub[np.logical_not(singleton_row)]
# identical bounds indicate that variable can be removed
i_f = np.abs(lb - ub) < tol # indices of "fixed" variables
i_nf = np.logical_not(i_f) # indices of "not fixed" variables
# test_bounds_equal_but_infeasible
if np.all(i_f): # if bounds define solution, check for consistency
residual = b_eq - A_eq.dot(lb)
slack = b_ub - A_ub.dot(lb)
if ((A_ub.size > 0 and np.any(slack < 0)) or
(A_eq.size > 0 and not np.allclose(residual, 0))):
status = 2
message = ("The problem is (trivially) infeasible because the "
"bounds fix all variables to values inconsistent with "
"the constraints")
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
ub_mod = ub
lb_mod = lb
if np.any(i_f):
c0 += c[i_f].dot(lb[i_f])
b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
c = c[i_nf]
x = x[i_nf]
# user guess x0 stays separate from presolve solution x
if x0 is not None:
x0 = x0[i_nf]
A_eq = A_eq[:, i_nf]
A_ub = A_ub[:, i_nf]
# record of variables to be added back in
undo = [np.nonzero(i_f)[0], lb[i_f]]
# don't remove these entries from bounds; they'll be used later.
# but we _also_ need a version of the bounds with these removed
lb_mod = lb[i_nf]
ub_mod = ub[i_nf]
# no constraints indicates that problem is trivial
if A_eq.size == 0 and A_ub.size == 0:
b_eq = np.array([])
b_ub = np.array([])
# test_empty_constraint_1
if c.size == 0:
status = 0
message = ("The solution was determined in presolve as there are "
"no non-trivial constraints.")
elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
# test_no_constraints()
# test_unbounded_no_nontrivial_constraints_1
# test_unbounded_no_nontrivial_constraints_2
status = 3
message = ("The problem is (trivially) unbounded "
"because there are no non-trivial constraints and "
"a) at least one decision variable is unbounded "
"above and its corresponding cost is negative, or "
"b) at least one decision variable is unbounded below "
"and its corresponding cost is positive. ")
else: # test_empty_constraint_2
status = 0
message = ("The solution was determined in presolve as there are "
"no non-trivial constraints.")
complete = True
x[c < 0] = ub_mod[c < 0]
x[c > 0] = lb_mod[c > 0]
# where c is zero, set x to a finite bound or zero
x_zero_c = ub_mod[c == 0]
x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
x_zero_c[np.isinf(x_zero_c)] = 0
x[c == 0] = x_zero_c
# if this is not the last step of presolve, should convert bounds back
# to array and return here
# *sigh* - convert bounds back to their standard form (list of tuples)
# again, in retrospect, numpy array would be standard form
lb[np.equal(lb, -np.inf)] = None
ub[np.equal(ub, np.inf)] = None
bounds = np.hstack((lb[:, np.newaxis], ub[:, np.newaxis]))
bounds = bounds.tolist()
for i, row in enumerate(bounds):
for j, col in enumerate(row):
if str(col) == "nan":
# comparing col to float("nan") and np.nan doesn't work.
# should use np.isnan
bounds[i][j] = None
# remove redundant (linearly dependent) rows from equality constraints
n_rows_A = A_eq.shape[0]
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
"improve performance, check the problem formulation "
"for redundant equality constraints.")
if (sps.issparse(A_eq)):
if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)
if A_eq.shape[0] < n_rows_A:
warn(redundancy_warning, OptimizeWarning, stacklevel=1)
if status != 0:
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
# This is a wild guess for which redundancy removal algorithm will be
# faster. More testing would be good.
small_nullspace = 5
if rr and A_eq.size > 0:
try: # TODO: instead use results of first SVD in _remove_redundancy
rank = np.linalg.matrix_rank(A_eq)
except Exception: # oh well, we'll have to go with _remove_redundancy_dense
rank = 0
if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
dim_row_nullspace = A_eq.shape[0]-rank
if dim_row_nullspace <= small_nullspace:
A_eq, b_eq, status, message = _remove_redundancy(A_eq, b_eq)
if dim_row_nullspace > small_nullspace or status == 4:
A_eq, b_eq, status, message = _remove_redundancy_dense(A_eq, b_eq)
if A_eq.shape[0] < rank:
message = ("Due to numerical issues, redundant equality "
"constraints could not be removed automatically. "
"Try providing your constraint matrices as sparse "
"matrices to activate sparse presolve, try turning "
"off redundancy removal, or try turning off presolve "
"altogether.")
status = 4
if status != 0:
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
def _parse_linprog(lp, options):
"""
Parse the provided linear programming problem
``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
provided constraints (``A_ub`` and ``A_eq) and if these match the provided
sparsity optional values.
``_clean inputs`` checks of the provided inputs. If no violations are
identified the objective vector, upper bound constraints, equality
constraints, and simple bounds are returned in the expected format.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
options : dict
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
Returns
-------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
"""
if options is None:
options = {}
solver_options = {k: v for k, v in options.items()}
solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, lp.A_ub, lp.A_eq)
# Convert lists to numpy arrays, etc...
lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
return lp, solver_options
def _get_Abc(lp, c0, undo=[]):
"""
Given a linear programming problem of the form:
Minimize::
c @ x
Subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
Return the problem in standard form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
by adding slack variables and making variable substitutions as necessary.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables.
undo: list of tuples
(`index`, `value`) pairs that record the original index and fixed value
for each variable removed from the problem
Returns
-------
A : 2-D array
2-D array such that ``A`` @ ``x``, gives the values of the equality
constraints at ``x``.
b : 1-D array
1-D array of values representing the RHS of each equality constraint
(row) in A (for standard form problem).
c : 1-D array
Coefficients of the linear objective function to be minimized (for
standard form problem).
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables.
x0 : 1-D array
Starting values of the independent variables, which will be refined by
the optimization algorithm
References
----------
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
"""
c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = lp
if sps.issparse(A_eq):
sparse = True
A_eq = sps.lil_matrix(A_eq)
A_ub = sps.lil_matrix(A_ub)
def hstack(blocks):
return sps.hstack(blocks, format="lil")
def vstack(blocks):
return sps.vstack(blocks, format="lil")
zeros = sps.lil_matrix
eye = sps.eye
else:
sparse = False
hstack = np.hstack
vstack = np.vstack
zeros = np.zeros
eye = np.eye
fixed_x = set()
if len(undo) > 0:
# these are indices of variables removed from the problem
# however, their bounds are still part of the bounds list
fixed_x = set(undo[0])
# they are needed elsewhere, but not here
bounds = [bounds[i] for i in range(len(bounds)) if i not in fixed_x]
# in retrospect, the standard form of bounds should have been an n x 2
# array. maybe change it someday.
# modify problem such that all variables have only non-negativity bounds
bounds = np.array(bounds)
lbs = bounds[:, 0]
ubs = bounds[:, 1]
m_ub, n_ub = A_ub.shape
lb_none = np.equal(lbs, None)
ub_none = np.equal(ubs, None)
lb_some = np.logical_not(lb_none)
ub_some = np.logical_not(ub_none)
# if preprocessing is on, lb == ub can't happen
# if preprocessing is off, then it would be best to convert that
# to an equality constraint, but it's tricky to make the other
# required modifications from inside here.
# unbounded below: substitute xi = -xi' (unbounded above)
l_nolb_someub = np.logical_and(lb_none, ub_some)
i_nolb = np.nonzero(l_nolb_someub)[0]
lbs[l_nolb_someub], ubs[l_nolb_someub] = (
-ubs[l_nolb_someub], lbs[l_nolb_someub])
lb_none = np.equal(lbs, None)
ub_none = np.equal(ubs, None)
lb_some = np.logical_not(lb_none)
ub_some = np.logical_not(ub_none)
c[i_nolb] *= -1
if x0 is not None:
x0[i_nolb] *= -1
if len(i_nolb) > 0:
if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird
A_ub[:, i_nolb] *= -1
if A_eq.shape[0] > 0:
A_eq[:, i_nolb] *= -1
# upper bound: add inequality constraint
i_newub = np.nonzero(ub_some)[0]
ub_newub = ubs[ub_some]
n_bounds = np.count_nonzero(ub_some)
A_ub = vstack((A_ub, zeros((n_bounds, A_ub.shape[1]))))
b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
A_ub[range(m_ub, A_ub.shape[0]), i_newub] = 1
b_ub[m_ub:] = ub_newub
A1 = vstack((A_ub, A_eq))
b = np.concatenate((b_ub, b_eq))
c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
if x0 is not None:
x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
# unbounded: substitute xi = xi+ + xi-
l_free = np.logical_and(lb_none, ub_none)
i_free = np.nonzero(l_free)[0]
n_free = len(i_free)
A1 = hstack((A1, zeros((A1.shape[0], n_free))))
c = np.concatenate((c, np.zeros(n_free)))
if x0 is not None:
x0 = np.concatenate((x0, np.zeros(n_free)))
A1[:, range(n_ub, A1.shape[1])] = -A1[:, i_free]
c[np.arange(n_ub, A1.shape[1])] = -c[i_free]
if x0 is not None:
i_free_neg = x0[i_free] < 0
x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
x0[i_free[i_free_neg]] = 0
# add slack variables
A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
A = hstack([A1, A2])
# lower bound: substitute xi = xi' + lb
# now there is a constant term in objective
i_shift = np.nonzero(lb_some)[0]
lb_shift = lbs[lb_some].astype(float)
c0 += np.sum(lb_shift * c[i_shift])
if sparse:
b = b.reshape(-1, 1)
A = A.tocsc()
b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
b = b.ravel()
else:
b -= (A[:, i_shift] * lb_shift).sum(axis=1)
if x0 is not None:
x0[i_shift] -= lb_shift
return A, b, c, c0, x0
def _round_to_power_of_two(x):
"""
Round elements of the array to the nearest power of two.
"""
return 2**np.around(np.log2(x))
def _autoscale(A, b, c, x0):
"""
Scales the problem according to equilibration from [12].
Also normalizes the right hand side vector by its maximum element.
"""
m, n = A.shape
C = 1
R = 1
if A.size > 0:
R = np.max(np.abs(A), axis=1)
if sps.issparse(A):
R = R.toarray().flatten()
R[R == 0] = 1
R = 1/_round_to_power_of_two(R)
A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
b = b*R
C = np.max(np.abs(A), axis=0)
if sps.issparse(A):
C = C.toarray().flatten()
C[C == 0] = 1
C = 1/_round_to_power_of_two(C)
A = A*sps.diags(C) if sps.issparse(A) else A*C
c = c*C
b_scale = np.max(np.abs(b)) if b.size > 0 else 1
if b_scale == 0:
b_scale = 1.
b = b/b_scale
if x0 is not None:
x0 = x0/b_scale*(1/C)
return A, b, c, x0, C, b_scale
def _unscale(x, C, b_scale):
"""
Converts solution to _autoscale problem -> solution to original problem.
"""
try:
n = len(C)
# fails if sparse or scalar; that's OK.
# this is only needed for original simplex (never sparse)
except TypeError as e:
n = len(x)
return x[:n]*b_scale*C
def _display_summary(message, status, fun, iteration):
"""
Print the termination summary of the linear program
Parameters
----------
message : str
A string descriptor of the exit status of the optimization.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
fun : float
Value of the objective function.
iteration : iteration
The number of iterations performed.
"""
print(message)
if status in (0, 1):
print(" Current function value: {0: <12.6f}".format(fun))
print(" Iterations: {0:d}".format(iteration))
def _postsolve(x, postsolve_args, complete=False, tol=1e-8, copy=False):
"""
Given solution x to presolved, standard form linear program x, add
fixed variables back into the problem and undo the variable substitutions
to get solution to original linear program. Also, calculate the objective
function value, slack in original upper bound constraints, and residuals
in original equality constraints.
Parameters
----------
x : 1-D array
Solution vector to the standard-form problem.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem, including:
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
undo: list of tuples
(`index`, `value`) pairs that record the original index and fixed value
for each variable removed from the problem
complete : bool
Whether the solution is was determined in presolve (``True`` if so)
tol : float
Termination tolerance; see [1]_ Section 4.5.
Returns
-------
x : 1-D array
Solution vector to original linear programming problem
fun: float
optimal objective value for original problem
slack : 1-D array
The (non-negative) slack in the upper bound constraints, that is,
``b_ub - A_ub @ x``
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
lb : 1-D array
The lower bound constraints on the original variables
ub: 1-D array
The upper bound constraints on the original variables
"""
# note that all the inputs are the ORIGINAL, unmodified versions
# no rows, columns have been removed
# the only exception is bounds; it has been modified
# we need these modified values to undo the variable substitutions
# in retrospect, perhaps this could have been simplified if the "undo"
# variable also contained information for undoing variable substitutions
(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), undo, C, b_scale = postsolve_args
x = _unscale(x, C, b_scale)
n_x = len(c)
# we don't have to undo variable substitutions for fixed variables that
# were removed from the problem
no_adjust = set()
# if there were variables removed from the problem, add them back into the
# solution vector
if len(undo) > 0:
no_adjust = set(undo[0])
x = x.tolist()
for i, val in zip(undo[0], undo[1]):
x.insert(i, val)
copy = True
if copy:
x = np.array(x, copy=True)
# now undo variable substitutions
# if "complete", problem was solved in presolve; don't do anything here
if not complete and bounds is not None: # bounds are never none, probably
n_unbounded = 0
for i, b in enumerate(bounds):
if i in no_adjust:
continue
lb, ub = b
if lb is None and ub is None:
n_unbounded += 1
x[i] = x[i] - x[n_x + n_unbounded - 1]
else:
if lb is None:
x[i] = ub - x[i]
else:
x[i] += lb
n_x = len(c)
x = x[:n_x] # all the rest of the variables were artificial
fun = x.dot(c)
slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints
# report residuals of ORIGINAL EQ constraints
con = b_eq - A_eq.dot(x)
# Patch for bug #8664. Detecting this sort of issue earlier
# (via abnormalities in the indicators) would be better.
bounds = np.array(bounds) # again, this should have been the standard form
lb = bounds[:, 0]
ub = bounds[:, 1]
lb[np.equal(lb, None)] = -np.inf
ub[np.equal(ub, None)] = np.inf
return x, fun, slack, con, lb, ub
def _check_result(x, fun, status, slack, con, lb, ub, tol, message):
"""
Check the validity of the provided solution.
A valid (optimal) solution satisfies all bounds, all slack variables are
negative and all equality constraint residuals are strictly non-zero.
Further, the lower-bounds, upper-bounds, slack and residuals contain
no nan values.
Parameters
----------
x : 1-D array
Solution vector to original linear programming problem
fun: float
optimal objective value for original problem
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
slack : 1-D array
The (non-negative) slack in the upper bound constraints, that is,
``b_ub - A_ub @ x``
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
lb : 1-D array
The lower bound constraints on the original variables
ub: 1-D array
The upper bound constraints on the original variables
message : str
A string descriptor of the exit status of the optimization.
tol : float
Termination tolerance; see [1]_ Section 4.5.
Returns
-------
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
"""
# Somewhat arbitrary, but status 5 is very unusual
tol = np.sqrt(tol) * 10
contains_nans = (
np.isnan(x).any()
or np.isnan(fun)
or np.isnan(slack).any()
or np.isnan(con).any()
)
if contains_nans:
is_feasible = False
else:
invalid_bounds = (x < lb - tol).any() or (x > ub + tol).any()
invalid_slack = status != 3 and (slack < -tol).any()
invalid_con = status != 3 and (np.abs(con) > tol).any()
is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
if status == 0 and not is_feasible:
status = 4
message = ("The solution does not satisfy the constraints within the "
"required tolerance of " + "{:.2E}".format(tol) + ", yet "
"no errors were raised and there is no certificate of "
"infeasibility or unboundedness. This is known to occur "
"if the `presolve` option is False and the problem is "
"infeasible. This can also occur due to the limited "
"accuracy of the `interior-point` method. Check whether "
"the slack and constraint residuals are acceptable; "
"if not, consider enabling presolve, reducing option "
"`tol`, and/or using method `revised simplex`. "
"If you encounter this message under different "
"circumstances, please submit a bug report.")
elif status == 0 and contains_nans:
status = 4
message = ("Numerical difficulties were encountered but no errors "
"were raised. This is known to occur if the 'presolve' "
"option is False, 'sparse' is True, and A_eq includes "
"redundant rows. If you encounter this under different "
"circumstances, please submit a bug report. Otherwise, "
"remove linearly dependent equations from your equality "
"constraints or enable presolve.")
elif status == 2 and is_feasible:
# Occurs if the simplex method exits after phase one with a very
# nearly basic feasible solution. Postsolving can make the solution
# basic, however, this solution is NOT optimal
raise ValueError(message)
return status, message
def _postprocess(x, postsolve_args, complete=False, status=0, message="",
tol=1e-8, iteration=None, disp=False):
"""
Given solution x to presolved, standard form linear program x, add
fixed variables back into the problem and undo the variable substitutions
to get solution to original linear program. Also, calculate the objective
function value, slack in original upper bound constraints, and residuals
in original equality constraints.
Parameters
----------
x : 1-D array
Solution vector to the standard-form problem.
c : 1-D array
Original coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
inequality constraints at ``x``.
b_ub : 1-D array, optional
1-D array of values representing the upper-bound of each inequality
constraint (row) in ``A_ub``.
A_eq : 2-D array, optional
2-D array such that ``A_eq @ x`` gives the values of the equality
constraints at ``x``.
b_eq : 1-D array, optional
1-D array of values representing the RHS of each equality constraint
(row) in ``A_eq``.
bounds : sequence of tuples
Bounds, as modified in presolve
complete : bool
Whether the solution is was determined in presolve (``True`` if so)
undo: list of tuples
(`index`, `value`) pairs that record the original index and fixed value
for each variable removed from the problem
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
tol : float
Termination tolerance; see [1]_ Section 4.5.
Returns
-------
x : 1-D array
Solution vector to original linear programming problem
fun: float
optimal objective value for original problem
slack : 1-D array
The (non-negative) slack in the upper bound constraints, that is,
``b_ub - A_ub @ x``
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
"""
x, fun, slack, con, lb, ub = _postsolve(
x, postsolve_args, complete, tol
)
status, message = _check_result(
x, fun, status, slack, con,
lb, ub, tol, message
)
if disp:
_display_summary(message, status, fun, iteration)
return x, fun, slack, con, status, message
|
jamestwebber/scipy
|
scipy/optimize/_linprog_util.py
|
Python
|
bsd-3-clause
| 65,452 | 0.000993 |
import unittest
import pickle
import cPickle
import StringIO
import cStringIO
import pickletools
import copy_reg
from test.test_support import TestFailed, have_unicode, TESTFN
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
assert pickle.HIGHEST_PROTOCOL == cPickle.HIGHEST_PROTOCOL == 2
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Copy of test.test_support.run_with_locale. This is needed to support Python
# 2.4, which didn't include it. This is all to support test_xpickle, which
# bounces pickled objects through older Python versions to test backwards
# compatibility.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code:
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code:
n += 1
return n
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copy_reg._inverted_registry:
self.pair = copy_reg._inverted_registry[code]
copy_reg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copy_reg._inverted_registry.get(code)
if curpair is not None:
copy_reg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copy_reg.add_extension(pair[0], pair[1], code)
class C:
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
import __main__
__main__.C = C
C.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object):
__metaclass__ = metaclass
# DATA0 .. DATA2 are the pickles we expect under the various protocols, for
# the object returned by create_data().
# break into multiple strings to avoid confusing font-lock-mode
DATA0 = """(lp1
I0
aL1L
aF2
ac__builtin__
complex
p2
""" + \
"""(F3
F0
tRp3
aI1
aI-1
aI255
aI-255
aI-256
aI65535
aI-65535
aI-65536
aI2147483647
aI-2147483647
aI-2147483648
a""" + \
"""(S'abc'
p4
g4
""" + \
"""(i__main__
C
p5
""" + \
"""(dp6
S'foo'
p7
I1
sS'bar'
p8
I2
sbg5
tp9
ag9
aI5
a.
"""
# Disassembly of DATA0.
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 1
5: I INT 0
8: a APPEND
9: L LONG 1L
13: a APPEND
14: F FLOAT 2.0
17: a APPEND
18: c GLOBAL '__builtin__ complex'
39: p PUT 2
42: ( MARK
43: F FLOAT 3.0
46: F FLOAT 0.0
49: t TUPLE (MARK at 42)
50: R REDUCE
51: p PUT 3
54: a APPEND
55: I INT 1
58: a APPEND
59: I INT -1
63: a APPEND
64: I INT 255
69: a APPEND
70: I INT -255
76: a APPEND
77: I INT -256
83: a APPEND
84: I INT 65535
91: a APPEND
92: I INT -65535
100: a APPEND
101: I INT -65536
109: a APPEND
110: I INT 2147483647
122: a APPEND
123: I INT -2147483647
136: a APPEND
137: I INT -2147483648
150: a APPEND
151: ( MARK
152: S STRING 'abc'
159: p PUT 4
162: g GET 4
165: ( MARK
166: i INST '__main__ C' (MARK at 165)
178: p PUT 5
181: ( MARK
182: d DICT (MARK at 181)
183: p PUT 6
186: S STRING 'foo'
193: p PUT 7
196: I INT 1
199: s SETITEM
200: S STRING 'bar'
207: p PUT 8
210: I INT 2
213: s SETITEM
214: b BUILD
215: g GET 5
218: t TUPLE (MARK at 151)
219: p PUT 9
222: a APPEND
223: g GET 9
226: a APPEND
227: I INT 5
230: a APPEND
231: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (']q\x01(K\x00L1L\nG@\x00\x00\x00\x00\x00\x00\x00'
'c__builtin__\ncomplex\nq\x02(G@\x08\x00\x00\x00\x00\x00'
'\x00G\x00\x00\x00\x00\x00\x00\x00\x00tRq\x03K\x01J\xff\xff'
'\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xff'
'J\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00'
'\x00\x80J\x00\x00\x00\x80(U\x03abcq\x04h\x04(c__main__\n'
'C\nq\x05oq\x06}q\x07(U\x03fooq\x08K\x01U\x03barq\tK\x02ubh'
'\x06tq\nh\nK\x05e.'
)
# Disassembly of DATA1.
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 1
3: ( MARK
4: K BININT1 0
6: L LONG 1L
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 2
42: ( MARK
43: G BINFLOAT 3.0
52: G BINFLOAT 0.0
61: t TUPLE (MARK at 42)
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: U SHORT_BINSTRING 'abc'
118: q BINPUT 4
120: h BINGET 4
122: ( MARK
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: o OBJ (MARK at 122)
138: q BINPUT 6
140: } EMPTY_DICT
141: q BINPUT 7
143: ( MARK
144: U SHORT_BINSTRING 'foo'
149: q BINPUT 8
151: K BININT1 1
153: U SHORT_BINSTRING 'bar'
158: q BINPUT 9
160: K BININT1 2
162: u SETITEMS (MARK at 143)
163: b BUILD
164: h BINGET 6
166: t TUPLE (MARK at 112)
167: q BINPUT 10
169: h BINGET 10
171: K BININT1 5
173: e APPENDS (MARK at 3)
174: . STOP
highest protocol among opcodes = 1
"""
DATA2 = ('\x80\x02]q\x01(K\x00\x8a\x01\x01G@\x00\x00\x00\x00\x00\x00\x00'
'c__builtin__\ncomplex\nq\x02G@\x08\x00\x00\x00\x00\x00\x00G\x00'
'\x00\x00\x00\x00\x00\x00\x00\x86Rq\x03K\x01J\xff\xff\xff\xffK'
'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xff'
'J\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00'
'\x80(U\x03abcq\x04h\x04(c__main__\nC\nq\x05oq\x06}q\x07(U\x03foo'
'q\x08K\x01U\x03barq\tK\x02ubh\x06tq\nh\nK\x05e.')
# Disassembly of DATA2.
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 1
5: ( MARK
6: K BININT1 0
8: \x8a LONG1 1L
11: G BINFLOAT 2.0
20: c GLOBAL '__builtin__ complex'
41: q BINPUT 2
43: G BINFLOAT 3.0
52: G BINFLOAT 0.0
61: \x86 TUPLE2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: U SHORT_BINSTRING 'abc'
118: q BINPUT 4
120: h BINGET 4
122: ( MARK
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: o OBJ (MARK at 122)
138: q BINPUT 6
140: } EMPTY_DICT
141: q BINPUT 7
143: ( MARK
144: U SHORT_BINSTRING 'foo'
149: q BINPUT 8
151: K BININT1 1
153: U SHORT_BINSTRING 'bar'
158: q BINPUT 9
160: K BININT1 2
162: u SETITEMS (MARK at 143)
163: b BUILD
164: h BINGET 6
166: t TUPLE (MARK at 112)
167: q BINPUT 10
169: h BINGET 10
171: K BININT1 5
173: e APPENDS (MARK at 5)
174: . STOP
highest protocol among opcodes = 2
"""
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1L, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads, self.error.
_testdata = create_data()
def setUp(self):
pass
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assertEqual(expected, got)
def test_load_from_canned_string(self):
expected = self._testdata
for canned in DATA0, DATA1, DATA2:
got = self.loads(canned)
self.assertEqual(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = cStringIO.StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertTrue(x is x[0])
def test_recursive_tuple(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(len(x[0]), 1)
self.assertTrue(x is x[0][0])
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertEqual(x.keys(), [1])
self.assertTrue(x[1] is x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, 2)
x = self.loads(s)
self.assertEqual(dir(x), dir(i))
self.assertTrue(x.attr is x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(x[0].attr.keys(), [1])
self.assertTrue(x[0].attr[1] is x)
def test_garyp(self):
self.assertRaises(self.error, self.loads, 'garyp')
def test_insecure_strings(self):
insecure = ["abc", "2 + 2", # not quoted
#"'abc' + 'def'", # not a single quoted string
"'abc", # quote is not closed
"'abc\"", # open quote and close quote don't match
"'abc' ?", # junk after close quote
"'\\'", # trailing backslash
# some tests of the quoting rules
#"'abc\"\''",
#"'\\\\a\'\'\'\\\'\\\\\''",
]
for s in insecure:
buf = "S" + s + "\012p0\012."
self.assertRaises(ValueError, self.loads, buf)
if have_unicode:
def test_unicode(self):
endcases = [u'', u'<\\u>', u'<\\\u1234>', u'<\n>',
u'<\\>', u'<\\\U00012345>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assertEqual(u2, u)
def test_unicode_high_plane(self):
t = u'\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assertEqual(t2, t)
def test_ints(self):
import sys
for proto in protocols:
n = sys.maxint
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assertEqual(expected, n2)
n = n >> 1
def test_maxint64(self):
maxint64 = (1L << 63) - 1
data = 'I' + str(maxint64) + '\n.'
got = self.loads(data)
self.assertEqual(got, maxint64)
# Try too with a bogus literal.
data = 'I' + str(maxint64) + 'JUNK\n.'
self.assertRaises(ValueError, self.loads, data)
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1L << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assertEqual(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = long("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertEqual(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent
self.assertEqual(self.dumps(1.2)[0:3], 'F1.')
def test_reduce(self):
pass
def test_getinitargs(self):
pass
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
# Tests for protocol 2
def test_proto(self):
build_none = pickle.NONE + pickle.STOP
for proto in protocols:
expected = build_none
if proto >= 2:
expected = pickle.PROTO + chr(proto) + expected
p = self.dumps(None, proto)
self.assertEqual(p, expected)
oob = protocols[-1] + 1 # a future protocol
badpickle = pickle.PROTO + chr(oob) + build_none
try:
self.loads(badpickle)
except ValueError, detail:
self.assertTrue(str(detail).startswith(
"unsupported pickle protocol"))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920L
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920L << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y, (proto, x, s, y))
expected = expected_opcode[proto, len(x)]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[proto, x]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(tuple(x), tuple(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
# Register a type with copy_reg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copy_reg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__, s1)
self.assertIn("MyList", s1)
self.assertEqual(opcode_in_pickle(opcode, s1), False)
y = self.loads(s1)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__, s2)
self.assertNotIn("MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True)
y = self.loads(s2)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = range(n)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = range(n)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_simple_newobj(self):
x = object.__new__(SimpleNewObj) # avoid __init__
x.abc = 666
for proto in protocols:
s = self.dumps(x, proto)
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s), proto >= 2)
y = self.loads(s) # will raise TypeError if __init__ called
self.assertEqual(y.abc, 666)
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
self.assertEqual(x.foo, y.foo)
self.assertEqual(x.bar, y.bar)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Protocol 0 is less strict and also accept iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
try:
self.dumps(D(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in xrange(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, obj,
"Failed protocol %d: %r != %r"
% (proto, obj, loaded))
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
# Test classes for reduce_ex
class REX_one(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
# No __reduce_ex__ here, but inheriting it from object
class REX_two(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
# No __reduce__ here, but inheriting it from object
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed, "This __reduce__ shouldn't be called"
class REX_four(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
# Calling base class method should succeed
class REX_five(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
# This one used to fail with infinite recursion
# Test classes for newobj
class MyInt(int):
sample = 1
class MyLong(long):
sample = 1L
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(unicode):
sample = u"hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
myclasses = [MyInt, MyLong, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(object):
def __init__(self, a, b, c):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = cStringIO.StringIO()
data = [123, {}, 124]
self.module.dump(data, stream)
stream.seek(0)
unpickled = self.module.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(self.module.HIGHEST_PROTOCOL, 2)
def test_callapi(self):
f = cStringIO.StringIO()
# With and without keyword arguments
self.module.dump(123, f, -1)
self.module.dump(123, file=f, protocol=-1)
self.module.dumps(123, -1)
self.module.dumps(123, protocol=-1)
self.module.Pickler(f, -1)
self.module.Pickler(f, protocol=-1)
def test_incomplete_input(self):
s = StringIO.StringIO("X''.")
self.assertRaises(EOFError, self.module.load, s)
def test_restricted(self):
# issue7128: cPickle failed in restricted mode
builtins = {self.module.__name__: self.module,
'__import__': __import__}
d = {}
teststr = "def f(): {0}.dumps(0)".format(self.module.__name__)
exec teststr in {'__builtins__': builtins}, d
d['f']()
def test_bad_input(self):
# Test issue4298
s = '\x58\0\0\0\x54'
self.assertRaises(EOFError, self.module.loads, s)
# Test issue7455
s = '0'
# XXX Why doesn't pickle raise UnpicklingError?
self.assertRaises((IndexError, cPickle.UnpicklingError),
self.module.loads, s)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
else:
return None
def persistent_load(self, oid):
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
def test_bin_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L, 1)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset StringIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and StringIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = cStringIO.StringIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = cStringIO.StringIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(cStringIO.StringIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(cStringIO.StringIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = cStringIO.StringIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
|
antb/TPT----My-old-mod
|
src/python/stdlib/test/pickletester.py
|
Python
|
gpl-2.0
| 39,620 | 0.001514 |
#
# Andrei Korostelev <andrei at korostelev dot net>
#
# Before using this product in any way please read the license agreement.
# If you do not agree to the terms in this agreement you are not allowed
# to use this product or parts of it. You can read this license in the
# file named LICENSE.
#
"""
CcPy project configuration file parser
"""
import xml.etree.ElementTree as ET
import logging
from copy import deepcopy
from .common import LoggerName
from .util import EmailFormat, EmailSecurity, formatTb
from . import svntask
from . import gittask
from . import maketask
from . import exectask
Logger = logging.getLogger(LoggerName)
DefCcPyConfigFileName = "/etc/ccpy.conf"
def _get_elem_str_value(element, default_value):
if element is not None:
return element.text
else:
return default_value
def _get_elem_int_value(element, default_value):
if element is not None:
return int(element.text)
else:
return default_value
def _get_elem_bool_value(element, default_value):
if element is not None:
if element.text.lower() in ('on', 'yes', 'true'):
return True
elif element.text.lower() in ('off', 'no', 'false'):
return False
else:
raise Exception("Invalid boolean value: %s in %s" % (element.text, element.tag))
else:
return default_value
def _get_elem_email_format_value(element, default_value):
if element is not None:
return EmailFormat[element.text]
else:
return default_value
def _get_elem_email_security_value(element, default_value):
if element is not None:
return EmailSecurity[element.text]
else:
return default_value
def _get_elem_list_value(element, default_value):
if element is not None:
return element.text.split(', ')
else:
return default_value
def _get_elem_tasks_value(element, default_value):
if element is None:
return default_value
tasks = []
for task in element:
if task.tag == 'sourcecontrol':
if task.attrib['type'] in ('svn', 'git'):
url = task.find('./url').text
workingDirectory = _get_elem_str_value(task.find('./workingDirectory'), '')
preCleanWorkingDirectory = _get_elem_bool_value(
task.find('./preCleanWorkingDirectory'),
False)
if task.attrib['type'] == 'svn':
tasks.append(svntask.SvnTask(url, workingDirectory, preCleanWorkingDirectory))
else: # git
tasks.append(gittask.GitTask(url, workingDirectory, preCleanWorkingDirectory))
else:
Logger.warning('Unsupported sourcecontrol type ' + task.attrib['type'])
elif task.tag == 'make':
workingDirectory = _get_elem_str_value(task.find('./workingDirectory'), '')
args = _get_elem_str_value(task.find('./args'), '')
timeout = _get_elem_int_value(task.find('./timeout'), 600)
tasks.append(maketask.MakeTask(workingDirectory, args, timeout))
elif task.tag == 'exec':
executable = task.find('./executable').text
workingDirectory = _get_elem_str_value(task.find('./workingDirectory'), '')
args = _get_elem_str_value(task.find('./args'), '')
timeout = _get_elem_int_value(task.find('./timeout'), 600)
warningExitCode = _get_elem_int_value(task.find('./warningExitCode'), None)
tasks.append(
exectask.ExecTask(
executable,
workingDirectory,
args,
timeout,
warningExitCode))
else:
Logger.warning('Unsupported task ' + task.tag)
return tasks
class ParseError(Exception):
pass
class Projects:
def __init__(self):
self._projects = []
self.cur = 0
def exists(self, name):
for project in self._projects:
if project['name'] == name:
return True
return False
def append(self, name,
tasks,
emailFrom, emailTo, emailFormat,
emailServerHost, emailServerPort,
emailServerSecurity,
emailServerUsername, emailServerPassword,
emailAttachments,
failOnError):
if self.exists(name):
raise Exception(
"Failed to add project because the project named '%s' already exists" %
name)
if tasks is None:
tasks = []
if emailTo is None:
emailTo = []
self._projects.append({'name': name,
'tasks': tasks,
'emailFrom': emailFrom,
'emailTo': emailTo,
'emailFormat': emailFormat,
'emailServerHost': emailServerHost,
'emailServerPort': emailServerPort,
'emailServerSecurity': emailServerSecurity,
'emailServerUsername': emailServerUsername,
'emailServerPassword': emailServerPassword,
'emailAttachments': emailAttachments,
'failOnError': failOnError})
def addTask(self, name, task):
if not self.exists(name):
raise Exception(
"Failed to add task because the project named '%s' does not exist" %
name)
for project in self._projects:
if project['name'] == name:
project['tasks'].append(task)
def next(self):
if self.cur >= len(self._projects):
self.cur = 0
raise StopIteration
else:
cur = self.cur
self.cur = cur + 1
key = self._projects[cur]['name']
val = deepcopy(self._projects[cur])
val.pop('name')
return key, val
def __next__(self):
# for compatibility between Python 2 that uses next() and Python 3 that uses __next__()
return self.next()
def __iter__(self):
return self
def __getitem__(self, name):
for project in self._projects:
if project['name'] == name:
retVal = deepcopy(project)
retVal.pop('name')
return retVal
raise Exception("Project named '%s' does not exist" % name)
def __len__(self):
return len(self._projects)
def parse(aCcPyConfigFileName=DefCcPyConfigFileName):
"""Parse ccpy project configuration file
Return the instance if Projects class
Projects and tasks within each project are returned in the order they appear in the config file.
Supported tasks are: SvnTask, MakeTask and ExecTask
Throw ParseError
"""
try:
Logger.debug("Reading ccpy configuration from %s..." % aCcPyConfigFileName)
tree = ET.parse(aCcPyConfigFileName)
root = tree.getroot()
if (root.tag != 'ccpy'):
raise Exception('Invalid root tag name: ' + root.tag)
projects = Projects()
for projectElem in root.findall('./project'):
tasks = _get_elem_tasks_value(projectElem.find('./tasks'), None)
emailFrom = _get_elem_str_value(projectElem.find('./emailNotification/from'), "")
emailTo = _get_elem_list_value(projectElem.find('./emailNotification/to'), None)
emailFormat = _get_elem_email_format_value(
projectElem.find('./emailNotification/format'),
EmailFormat.attachment)
emailServerHost = _get_elem_str_value(
projectElem.find('./emailNotification/server'),
'localhost')
emailServerPort = _get_elem_int_value(
projectElem.find('./emailNotification/port'),
25)
emailServerSecurity = _get_elem_email_security_value(
projectElem.find('./emailNotification/security'),
EmailSecurity.none)
emailServerUsername = _get_elem_str_value(
projectElem.find('./emailNotification/username'),
None)
emailServerPassword = _get_elem_str_value(
projectElem.find('./emailNotification/password'),
None)
emailAttachments = []
for emailAttachment in projectElem.findall('./emailNotification/attachment'):
emailAttachments.append(emailAttachment.text)
failOnError = _get_elem_bool_value(projectElem.find('./failOnError'), True)
projects.append(projectElem.attrib['name'],
tasks=tasks,
emailFrom=emailFrom,
emailTo=emailTo,
emailFormat=emailFormat,
emailServerHost=emailServerHost,
emailServerPort=emailServerPort,
emailServerSecurity=emailServerSecurity,
emailServerUsername=emailServerUsername,
emailServerPassword=emailServerPassword,
emailAttachments=emailAttachments,
failOnError=failOnError)
return projects
except Exception as e:
raise ParseError(
"Failed to parse %s. %s: %s. %s" %
(aCcPyConfigFileName, type(e), str(e), formatTb()))
|
kindkaktus/CcPy
|
ccpy/ccpyconfparser.py
|
Python
|
bsd-3-clause
| 9,656 | 0.001761 |
# Sumber: LMD/models.py
from google.appengine.ext import db
class Users(db.Model):
username = db.StringProperty()
passwd = db.StringProperty()
email = db.StringProperty()
fullname = db.StringProperty()
address = db.TextProperty()
phone = db.StringProperty()
role = db.IntegerProperty(default=99)
livecenter = db.ListProperty(db.Key,default=[])
|
rimbalinux/LMD3
|
user/models.py
|
Python
|
bsd-3-clause
| 383 | 0.015666 |
# Deckard, a Web based Glade Runner
# Copyright (C) 2013 Nicolas Delvaux <contact@nicolas-delvaux.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Sessions handling and utilities for the deckard project"""
import os
import re
import locale
import shutil
import tempfile
import urllib.request
from uuid import uuid4
from threading import Lock, Timer
from collections import OrderedDict
from subprocess import Popen, PIPE, STDOUT, check_output, CalledProcessError
from languages import locale_language_mapping
class DeckardException(Exception):
"""Standard exception"""
def __init__(self, short, log):
Exception.__init__(self, "%s\n\n%s" % (short, log))
class Session:
"""
This represents a Deckard session for one user.
It manages its gladerunner instance (both launch and keep-alive) and custom
PO files.
Everything is cleaned-up when the session is deleted.
"""
def __init__(
self,
uuid,
gladerunner,
content_root,
max_custom_po,
max_po_download_size,
glade_catalog,
po_urls,
):
self.port = 0
self.uuid = uuid # unique id to avoid session spoofing
self.process = None
self.custom_po = OrderedDict() # {po_name: (module, root_path, lang)}
self.removable = False # can the manager delete this Session?
self.gladerunner = gladerunner
self.content_root = content_root
self.max_custom_po = max_custom_po
self.max_po_download_size = max_po_download_size
self.glade_catalog = glade_catalog
# URL sorted by priority
# If one URL does not work, the next one will be tried
self.po_urls = po_urls
def spawn_runner(self, module, module_file, language, port):
"""Launch a gladerunner instance.
If a running process is attached to this session, it will be replaced.
"""
self.port = port
env = {
"GDK_BACKEND": "broadway",
"UBUNTU_MENUPROXY": "",
"LIBOVERLAY_SCROLLBAR": "0",
}
if self.process is not None and self.process.poll() is None:
self.process.kill()
if language in self.custom_po:
if self.custom_po[language][0] != module:
raise DeckardException(
'"%s" does not exist' % language,
"No such file was registered for the " "%s module." % module,
)
lang_root = os.path.join(self.custom_po[language][1], "LANGS")
# This locale has to be available on your system
language = "%s.UTF-8" % self.custom_po[language][2]
else:
if language != "POSIX":
language = "%s.UTF-8" % language
lang_root = os.path.join(self.content_root, "LANGS")
env["LANG"] = language
# Build the gladerunner command line
args = [
self.gladerunner,
"--suicidal",
"--with-broadwayd",
str(port),
os.path.join(self.content_root, module, module_file),
module,
language,
lang_root,
]
# Should we use a Glade catalog?
if os.path.isfile(self.glade_catalog):
args.extend(("--catalog-path", self.glade_catalog))
# Launch it!
self.process = Popen(args, stdin=PIPE, env=env)
def store_po(self, name, module, fd=None):
"""Store a custom PO file
If fd is None, try to download name from self.po_urls.
Each url of the list will be tried until the file is found.
If a PO file with the same name is already attached to this session,
it will be replaced.
Returns a dictionary, associating all relevant modules with a list of
stored PO files for it on this session, from the oldest to the newest.
"""
# Very basic check, msgfmt will crash anyway if the file is not valid
if not name.lower().endswith(".po"):
raise DeckardException(
"This is not a PO file", "%s is not a PO file." % name
)
lang_root = tempfile.mkdtemp(prefix="deckard_")
po_path = os.path.join(lang_root, "file.po")
po = open(po_path, "bw")
if fd is not None:
# The file was sent by the user
for line in fd:
po.write(line)
po.close()
fd.close()
elif len(self.po_urls) > 0:
# Let's try to download 'name'
response = None
error = None
for url in self.po_urls:
try:
response = urllib.request.urlopen(url % name)
break
except Exception as e:
error = str(e)
if response is None:
# Most likely a '404: not found' error
raise DeckardException("Enable to retrieve the file", error)
res_len = response.length
if res_len > self.max_po_download_size:
response.close()
raise DeckardException(
"File too big",
'The "%s" file is %d long and this app '
"will not retrieve a file bigger than "
"%d bytes." % (name, res_len, self.max_po_download_size),
)
# Let's finally download this file!
po.write(response.read(res_len))
response.close()
po.close()
else:
raise DeckardException(
"Operation not supported",
"The PO download feature is not configured " "on this instance.",
)
# Try to guess the language of this PO file, default is 'en_US'
# This is good to know to later set proper environment variables and so
# load the right GTK translation and reverse the interface if necessary
po_lang = "en_US"
with open(po_path, encoding="utf8") as po:
# Give up if we find nothing in the 50 first lines
for _ in range(50):
line = po.readline()
match = re.match(r'^"Language: (.+)\\n"$', line)
if match:
po_lang = match.group(1)
# The encoding is often wrong, so strip it
po_lang = locale.normalize(po_lang).rsplit(".")[0]
# Test if the detected locale is available on the system
try:
locale.setlocale(locale.LC_ALL, "%s.UTF-8" % po_lang)
except:
# Fallback to a known locale
po_lang = "en_US"
finally:
locale.resetlocale()
break
# create necessary directories
mo_path = os.path.join(lang_root, "LANGS", po_lang, "LC_MESSAGES")
os.makedirs(mo_path)
try:
check_output(
[
"/usr/bin/msgfmt",
"--check",
"--output-file",
os.path.join(mo_path, module) + ".mo",
po_path,
],
stderr=STDOUT,
)
except CalledProcessError as e:
shutil.rmtree(lang_root)
# We don't need to expose the file name in the error message
log = e.output.decode("unicode_escape").replace("%s:" % po_path, "")
raise DeckardException("Error while building the .mo", log)
if name in self.custom_po:
shutil.rmtree(self.custom_po[name][1])
del self.custom_po[name] # drop to re-add at the end of the queue
elif len(self.custom_po) >= self.max_custom_po:
# delete the oldest
shutil.rmtree(self.custom_po.popitem(last=False)[1][1])
self.custom_po[name] = (module, lang_root, po_lang)
res = {}
for item in self.custom_po:
if self.custom_po[item][0] not in res:
res[self.custom_po[item][0]] = [item]
else:
res[self.custom_po[item][0]].append(item)
return res
def keep_process_alive(self):
"""Beg the runner (if any) to stay alive
Returns True if the message was sent, False if it wasn't (eg. if there
is no process)."""
if self.process is not None and self.process.poll() is None:
self.process.stdin.write(b"Please stay alive!")
self.process.stdin.flush()
return True
return False
def is_removable(self):
"""State if this Session is removable.
Returns True if no running process is attached to this Session and
if no PO file is stored.
It also returns True if this Session was tagged as removable.
Otherwise, this function will return False.
"""
if self.removable:
return True
elif self.process is None or self.process.poll() is not None:
if len(self.custom_po) == 0:
return True
return False
def __del__(self):
"""Kill the process if it is running and delete any custom PO files"""
if self.process is not None and self.process.poll() is None:
self.process.kill()
for name in self.custom_po:
shutil.rmtree(self.custom_po[name][1])
class SessionsManager:
"""Helper to manage all Deckard sessions."""
def __init__(
self,
gladerunner,
content_root,
max_users=10,
first_port=2019,
max_custom_po_per_session=4,
max_po_download_size=1500000,
glade_catalog="",
po_urls=[],
):
self.gladerunner = gladerunner
self.content_root = content_root
self.max_users = max_users
self.first_port = first_port
self.max_custom_po_per_session = max_custom_po_per_session
self.max_po_download_size = max_po_download_size
self.glade_catalog = glade_catalog
self.po_urls = po_urls
self.sessions = {} # Sessions, by UUID
self._lock = Lock() # allows to only manipulate one session at a time
self._cleanup_loop_running = False
def _get_session(self, uuid):
"""Returns the Session object from an UUID.
Returns None if the Session does not exist."""
if uuid in self.sessions:
return self.sessions[uuid]
else:
return None
def _create_session(self):
"""Create a new session an returns its uuid
Raise an exception if we don't have room for one more session.
"""
if len(self.sessions) >= self.max_users:
raise DeckardException(
"Too many users!",
"For performance purposes, this "
"application is currently limited to %d "
"simultaneous sessions.\n"
"You may want to retry in a few minutes." % self.max_users,
)
uuid = str(uuid4())
self.sessions[uuid] = Session(
uuid,
self.gladerunner,
self.content_root,
self.max_custom_po_per_session,
self.max_po_download_size,
self.glade_catalog,
self.po_urls,
)
if not self._cleanup_loop_running:
self._cleanup_loop(init=True) # Restart the cleanup loop
self._cleanup_loop_running = True
return uuid
def _find_free_port(self):
"""Returns a free port ready to be used by a session.
Checked ports are between first_port and (first_port + max_users - 1).
"""
for port in range(self.first_port, self.first_port + self.max_users):
try_next = False
for uuid in self.sessions:
if self.sessions[uuid].port == port:
try_next = True
break
if not try_next:
return port
# No free port!
# This should never if you managed to create a session
raise DeckardException(
"Could not find a free port.",
"This should never happen.\n" "Please report this bug.",
)
def spawn_runner(self, uuid, module, module_file, language):
"""Ask a session to launch a gladerunner instance.
If a running process is attached to this session, it will be replaced.
Returns a tuple with the session uuid and the port of the launched
instance.
"""
with self._lock:
# get or create the session
session = self._get_session(uuid)
if session is None:
uuid = self._create_session()
session = self._get_session(uuid)
else:
session._removable = False
if session.port == 0:
port = self._find_free_port()
else:
port = session.port # Reuse the same port
session.spawn_runner(module, module_file, language, port)
return uuid, port
def store_po(self, uuid, name, module, fd=None):
"""Ask a session to store a PO file.
If fd is None, try to download name from session.po_urls.
If a PO file with the same name is already attached to this session,
it will be replaced.
Returns a tuple with the session uuid and a dictionary, associating all
relevant modules with a list of stored PO files for it on this session,
from the oldest to the newest.
"""
with self._lock:
# get or create the session
session = self._get_session(uuid)
if session is None:
uuid = self._create_session()
session = self._get_session(uuid)
else:
session.removable = False
session.keep_process_alive() # if any
return uuid, session.store_po(name, module, fd)
def keep_alive(self, uuid):
"""Keep the uuid session alive a bit more.
Returns False in case of problem (the session is already dead?),
True otherwise.
"""
with self._lock:
session = self._get_session(uuid)
if session is not None:
session.removable = False
session.keep_process_alive() # if any
return True
return False
def _cleanup_loop(self, timer=5, init=False):
"""Delete garbage sessions regularly.
If init is True, do not acquire lock in this iteration."""
if not init:
self._lock.acquire()
try:
for uuid in list(self.sessions.keys()):
if not init and self.sessions[uuid].is_removable():
del self.sessions[uuid]
else:
# This session may be deleted next time (if no keep_alive)
self.sessions[uuid].removable = True
if len(self.sessions) > 0:
Timer(timer, self._cleanup_loop, (timer,)).start()
else:
# Break the loop when there is no more sessions
self._cleanup_loop_running = False
finally:
if not init:
self._lock.release()
def get_displayable_content(self):
"""Build the content structure by exploring self.content_root
The returned structure is as below:
{'LANG': {'locale1_code': 'locale1_name_in_the_relative_locale',
'locale2_code': 'locale2_name_in_the_relative_locale'},
'MODULES': {'module1': ['file1.ui', 'file2.glade'],
'module2': ['file1.xml', 'path/in/module/file2.ui']}
}
"""
content = {"LANGS": {}, "MODULES": {}}
for lang in os.listdir(os.path.join(self.content_root, "LANGS")):
if lang in locale_language_mapping:
content["LANGS"][lang] = locale_language_mapping[lang]
for item in os.listdir(self.content_root):
if (
not os.path.isdir(os.path.join(self.content_root, item))
or item == "LANGS"
):
continue
content["MODULES"][item] = []
modules_to_ignore = set()
for module in content["MODULES"]:
mod_root = os.path.join(self.content_root, module)
ui_found = False
for root, _, files in os.walk(mod_root):
for file_ in files:
_, ext = os.path.splitext(file_)
ext = ext.lower()
if ext == ".ui" or ext == ".xml" or ext == ".glade":
ui_found = True
rel_path = os.path.join(root, file_).split(mod_root)[1]
rel_path = rel_path[1:] # strip the leading '/'
content["MODULES"][module].append(rel_path)
if not ui_found:
# Nothing is displayable in this folder, ignore it
modules_to_ignore.add(module)
# Finally, filter empty modules
for module in modules_to_ignore:
del content["MODULES"][module]
return content
|
Malizor/deckard
|
libdeckard.py
|
Python
|
agpl-3.0
| 17,967 | 0.000223 |
#!/usr/bin/env python
import os
import sys
from autoprocess import autoProcessTV, autoProcessMovie, autoProcessTVSR, sonarr, radarr
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from deluge_client import DelugeRPCClient
import logging
from logging.config import fileConfig
logpath = '/var/log/sickbeard_mp4_automator'
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("delugePostProcess")
log.info("Deluge post processing started.")
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
categories = [settings.deluge['sb'], settings.deluge['cp'], settings.deluge['sonarr'], settings.deluge['radarr'], settings.deluge['sr'], settings.deluge['bypass']]
remove = settings.deluge['remove']
if len(sys.argv) < 4:
log.error("Not enough command line parameters present, are you launching this from deluge?")
sys.exit()
path = str(sys.argv[3])
torrent_name = str(sys.argv[2])
torrent_id = str(sys.argv[1])
delete_dir = None
log.debug("Path: %s." % path)
log.debug("Torrent: %s." % torrent_name)
log.debug("Hash: %s." % torrent_id)
client = DelugeRPCClient(host=settings.deluge['host'], port=int(settings.deluge['port']), username=settings.deluge['user'], password=settings.deluge['pass'])
client.connect()
if client.connected:
log.info("Successfully connected to Deluge")
else:
log.error("Failed to connect to Deluge")
sys.exit()
torrent_data = client.call('core.get_torrent_status', torrent_id, ['files', 'label'])
try:
torrent_files = torrent_data[b'files']
category = torrent_data[b'label'].lower().decode()
except:
torrent_files = torrent_data['files']
category = torrent_data['label'].lower()
files = []
log.debug("List of files in torrent:")
for contents in torrent_files:
try:
files.append(contents[b'path'].decode())
log.debug(contents[b'path'].decode())
except:
files.append(contents['path'])
log.debug(contents['path'])
if category.lower() not in categories:
log.error("No valid category detected.")
sys.exit()
if len(categories) != len(set(categories)):
log.error("Duplicate category detected. Category names must be unique.")
sys.exit()
if settings.deluge['convert']:
# Check for custom Deluge output_dir
if settings.deluge['output_dir']:
settings.output_dir = settings.deluge['output_dir']
log.debug("Overriding output_dir to %s." % settings.deluge['output_dir'])
# Perform conversion.
settings.delete = False
if not settings.output_dir:
suffix = "convert"
settings.output_dir = os.path.join(path, ("%s-%s" % (torrent_name, suffix)))
if not os.path.exists(settings.output_dir):
os.mkdir(settings.output_dir)
delete_dir = settings.output_dir
converter = MkvtoMp4(settings)
for filename in files:
inputfile = os.path.join(path, filename)
if MkvtoMp4(settings).validSource(inputfile):
log.info("Converting file %s at location %s." % (inputfile, settings.output_dir))
try:
output = converter.process(inputfile)
except:
log.exception("Error converting file %s." % inputfile)
path = converter.output_dir
else:
suffix = "copy"
newpath = os.path.join(path, ("%s-%s" % (torrent_name, suffix)))
if not os.path.exists(newpath):
os.mkdir(newpath)
for filename in files:
inputfile = os.path.join(path, filename)
log.info("Copying file %s to %s." % (inputfile, newpath))
shutil.copy(inputfile, newpath)
path = newpath
delete_dir = newpath
# Send to Sickbeard
if (category == categories[0]):
log.info("Passing %s directory to Sickbeard." % path)
autoProcessTV.processEpisode(path, settings)
# Send to CouchPotato
elif (category == categories[1]):
log.info("Passing %s directory to Couch Potato." % path)
autoProcessMovie.process(path, settings, torrent_name)
# Send to Sonarr
elif (category == categories[2]):
log.info("Passing %s directory to Sonarr." % path)
sonarr.processEpisode(path, settings)
elif (category == categories[3]):
log.info("Passing %s directory to Radarr." % path)
radarr.processMovie(path, settings)
elif (category == categories[4]):
log.info("Passing %s directory to Sickrage." % path)
autoProcessTVSR.processEpisode(path, settings)
elif (category == categories[5]):
log.info("Bypassing any further processing as per category.")
if delete_dir:
if os.path.exists(delete_dir):
try:
os.rmdir(delete_dir)
log.debug("Successfully removed tempoary directory %s." % delete_dir)
except:
log.exception("Unable to delete temporary directory.")
if remove:
try:
client.call('core.remove_torrent', torrent_id, True)
except:
log.exception("Unable to remove torrent from deluge.")
|
Filechaser/sickbeard_mp4_automator
|
delugePostProcess.py
|
Python
|
mit
| 5,303 | 0.003206 |
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
from django.views.generic.base import View
app_name = 'secapp'
urlpatterns = [
# Index view
url(r'^$', views.index, name='index'),
# List of events for a Log Source
url(r'^(?P<id_log_source>[0-9]+)/event/$', views.events, name='events'),
# Packet of an event (for a Log Source)
url(r'^(?P<id_log_source>[0-9]+)/event/(?P<id_event>[0-9]+)$', views.event_information,
name='event_information'),
# Additional information about a packet event
url(r'^(?P<id_log_source>[0-9]+)/event/(?P<id_event>[0-9]+)/additional_info/$',
views.additional_info,
name='additional_info'),
url(r'^api/events/$', views.EventsInformation().events_list, name='events_list'),
url(r'^api/events/by_source/(?P<pk>[0-9]+)/$', views.EventsInformation().events_by_source,
name='events_by_source'),
url(r'^api/events/by_source/(?P<pk>[0-9]+)/(?P<fk>[0-9]+)/$', views.EventsInformation().events_by_source_detail,
name='events_by_source_detail'),
url(r'^api/events/(?P<pk>[0-9]+)/json$', views.EventsInformation().event_detail, name='event_detail'),
url(r'^api/events/(?P<pk>[0-9]+)/$', views.EventsInformation.as_view()),
url(r'^api/events/hour/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_hour,
name='events_source_in_hour'),
url(r'^api/events/day/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_day,
name='events_source_in_day'),
url(r'^api/events/week/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_week,
name='events_source_in_week'),
url(r'^api/events/month/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_month,
name='events_source_in_month'),
url(r'^api/events/year/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_year,
name='events_source_in_year'),
url(r'^api/events/last_day/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_last_day,
name='events_source_last_day'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
MGautier/security-sensor
|
branches/webenv/secproject/secapp/urls.py
|
Python
|
mit
| 2,174 | 0.00552 |
#!python
"""
VMController Host - a general purpose host-side virtual machine controller via exposed hypervisors apis.
"""
try:
import os
import sys
import logging
import warnings
import multiprocessing
import time
import inject
from twisted.internet import reactor
from pkg_resources import resource_stream
from ConfigParser import SafeConfigParser
from optparse import OptionParser
from vmcontroller.common import StompProtocolFactory, StompProtocol
from vmcontroller.host.config import init_config, init_config_file, debug_config
from vmcontroller.host.controller import HyperVisorController
from vmcontroller.host.services import HostStompEngine, HostWords
from vmcontroller.host.services.HostServices import Host, HostXMLRPCService
except ImportError, e:
print "Import error in %s : %s" % (__name__, e)
import sys
sys.exit()
logger = logging.getLogger(__name__)
def init_logging(logfile=None, loglevel=logging.INFO):
"""
Sets logging configuration.
@param logfile: File to log messages. Default is None.
@param loglevel: Log level. Default is logging.INFO.
"""
format = '%(asctime)s - [%(threadName)s] %(filename)s:%(lineno)s - (%(levelname)s) %(message)s'
if logfile:
logging.basicConfig(filename=logfile, level=loglevel, format=format)
else:
logging.basicConfig(level=loglevel, format=format)
def init():
"""
Initializes VMController Host package.
First parses command line options. Then, creates config object from default cfg file.
Re-initializes config object if a config file is supplied and sets logger configuration.
Finally, uses dependency injection to bind objects to names.
"""
parser = OptionParser()
parser.add_option("-c", "--config", dest="configfile",
help="Read configuration from FILE. (Overrides default config file.)", metavar="FILE")
parser.add_option("-a", "--host", dest="xmlrpc_host",
help="Listen on specified address for XMLRPC interface (default 127.0.0.1)", metavar="ADDR")
parser.add_option("-p", "--port", dest="xmlrpc_port",
help="Listen on specified port for XMLRPC interface (default 50505)", type="int", metavar="PORT")
parser.add_option("-l", "--logfile", dest="logfile",
help="Log to specified file.", metavar="FILE")
parser.add_option("--debug", action="store_true", dest="debug", default=False,
help="Sets logging to debug (unless logging configured in config file).")
(options, args) = parser.parse_args()
config = init_config()
injector = inject.Injector()
inject.register(injector)
injector.bind('config', to=config)
injector.bind('stompEngine', to=HostStompEngine, scope=inject.appscope)
injector.bind('words', to=HostWords.getWords)
injector.bind('stompProtocol', to=StompProtocol, scope=inject.appscope)
injector.bind('subject', to=Host)
injector.bind('hvController', to=HyperVisorController)
init_config_file(options.configfile)
if options.xmlrpc_host is not None:
config.set('xmlrpc', 'host', options.xmlrpc_host)
if options.xmlrpc_port is not None:
config.set('xmlrpc', 'port', str(options.xmlrpc_port))
level = logging.DEBUG if options.debug else logging.INFO
init_logging(logfile=options.logfile, loglevel=level)
#debug_config(config)
def start_coilmq(config, server_event, tries=-1, delay=1, backoff=1.5):
"""
Starts CoilMQ broker.
@param config: Config for CoilMQ.
@param server_event: Event attached to multiprocessing manager.
@param tries: Maximum retries to start the server. Default -1 (infinite).
@param delay: Time to wait before next try to start broker. Default 1.
@param backoff: Factor to set delay. Default 1.5.
"""
m_tries = tries
m_delay = delay
m_server = None
try:
from coilmq.config import config as broker_config
import coilmq.start
except ImportError, e:
print "Import error: %s\nPlease check." % e
exit()
if config.has_section('broker'):
for (attribute, value) in config.items('broker'):
if attribute != 'name':
broker_config.set('coilmq', attribute, value)
logger.debug("[coilmq] %s = %s" % (attribute, value))
broker_server = None
while True:
try:
broker_server = coilmq.start.server_from_config(broker_config)
logger.info("Stomp server listening on %s:%s" % broker_server.server_address)
server_event.set()
broker_server.serve_forever()
except (KeyboardInterrupt, SystemExit):
logger.info("Stomp server stopped by user interrupt.")
raise SystemExit()
except IOError as ex:
logger.error("Exception while starting coilmq broker: '%s'", ex)
if m_tries != 0:
logger.debug("Retrying coilmq startup in %.1f seconds...", m_delay)
time.sleep(m_delay)
m_delay *= backoff
m_tries -= 1
else:
logger.debug("Ran out of trials (tried %d times) for coilmq startup. Giving up.", tries)
break
except Exception, e:
logger.error("Stomp server stopped due to error: %s" % e)
logger.exception(e)
raise SystemExit()
finally:
if broker_server: broker_server.server_close()
@inject.param('config')
def init_coilmq(config, brokerTimeout=60):
"""
Intializes and starts CoilMQ stomp broker as a light weight (multiprocessing) process.
@param config: Injected config object.
@param brokerTimeout: Timeout to check is broker is running. Default 60s.
"""
manager = multiprocessing.Manager()
server_event = manager.Event()
broker = multiprocessing.Process(target=start_coilmq, args=(config, server_event))
broker.daemon = False
broker.name = 'VMController-Broker'
broker.start()
server_event.wait(brokerTimeout)
if not server_event.is_set():
logger.fatal("Broker not available after %.1f seconds. Giving up", brokerTimeout)
return -1
@inject.param('config')
def init_morbid(config):
"""
Starts up light weight, twisted based MorbidQ stomp broker.
@param config: Injected config object.
"""
try:
import morbid
except ImportError, e:
import sys
print "Import error: %s\nPlease check." % e
sys.exit()
morbid_factory = morbid.StompFactory(verbose=True)
broker_host = config.get('broker', 'host')
broker_port = int(config.get('broker', 'port'))
try:
reactor.listenTCP(broker_port, morbid_factory, interface=broker_host)
except:
logger.fatal("Unable to start Morbid, port may not be free. Exiting.")
import sys
sys.exit()
logger.info("Starting MorbidQ broker %s:%s", broker_host, broker_port)
@inject.param('config')
def start(config):
"""
Starts VMController Host.
@param config: The injected config object.
"""
broker_name = config.get('broker', 'name')
if broker_name == 'morbid':
init_morbid()
elif broker_name == 'coilmq':
init_coilmq()
else:
logger.fatal("No broker found... Exiting")
exit()
stompProtocolFactory = StompProtocolFactory()
xmlrpcService = HostXMLRPCService()
xmlrpcService.makeEngineAccesible()
host = config.get('broker', 'host')
port = int(config.get('broker', 'port'))
reactor.connectTCP(host, port, stompProtocolFactory)
reactor.run()
def main():
"""
Initializes and starts VMController Host.
"""
init()
logger.info("Welcome to VMController Host!")
start()
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
pass
except Exception, e:
logger.error("Server terminated due to error: %s" % e)
logger.exception(e)
|
dgquintas/vmcontroller.unstable
|
src/vmcontroller.host/vmcontroller/host/__main__.py
|
Python
|
bsd-3-clause
| 8,090 | 0.004203 |
import sys
out = sys.stdout
class Colors:
def black (self, fmt='', *args): self('\x1b[1;30m'+fmt+'\x1b[0m', *args)
def red (self, fmt='', *args): self('\x1b[1;31m'+fmt+'\x1b[0m', *args)
def green (self, fmt='', *args): self('\x1b[1;32m'+fmt+'\x1b[0m', *args)
def yellow (self, fmt='', *args): self('\x1b[1;33m'+fmt+'\x1b[0m', *args)
def blue (self, fmt='', *args): self('\x1b[1;34m'+fmt+'\x1b[0m', *args)
def purple (self, fmt='', *args): self('\x1b[1;35m'+fmt+'\x1b[0m', *args)
def cyan (self, fmt='', *args): self('\x1b[1;36m'+fmt+'\x1b[0m', *args)
def white (self, fmt='', *args): self('\x1b[1;37m'+fmt+'\x1b[0m', *args)
class PrintF(Colors):
def __call__(self, fmt='', *args):
out.write(fmt % args)
out.flush()
class WriteLine(Colors):
def __call__(self, fmt='', *args):
out.write(fmt % args)
out.write('\n')
def hexdump(blob, width=16, offset=0):
fmt = '%%.%dx: ' % len('%.x' % (len(blob) - 1))
while blob:
line = blob[:width]
blob = blob[width:]
printf.white(fmt, offset)
printf.cyan(' '.join('%.2x' % ord(c) for c in line))
printf(' ' * ((width-len(line))*3+1))
for c in line:
if ord(c) < 32 or ord(c) > 126:
printf.black('.')
else:
printf.white('%c', c)
writeln()
offset += width
__builtins__['printf'] = PrintF()
__builtins__['writeln'] = WriteLine()
__builtins__['hexdump'] = hexdump
|
moertle/pyaas
|
pyaas/io.py
|
Python
|
mit
| 1,515 | 0.009241 |
# -*- coding: utf-8 -*-
"""
eve.methods.get
~~~~~~~~~~~~~~~
This module implements the API 'GET' methods, supported by both the
resources and single item endpoints.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import math
import copy
import json
from flask import current_app as app, abort, request
from werkzeug import MultiDict
from .common import ratelimit, epoch, pre_event, resolve_embedded_fields, \
build_response_document, resource_link, document_link, last_updated
from eve.auth import requires_auth
from eve.utils import parse_request, home_link, querydef, config
from eve.versioning import synthesize_versioned_document, versioned_id_field, \
get_old_document, diff_document
@ratelimit()
@requires_auth('resource')
@pre_event
def get(resource, **lookup):
"""
Default function for handling GET requests, it has decorators for
rate limiting, authentication and for raising pre-request events. After the
decorators are applied forwards to call to :func:`get_internal`
.. versionadded:: 0.6.2
"""
return get_internal(resource, **lookup)
def get_internal(resource, **lookup):
""" Retrieves the resource documents that match the current request.
:param resource: the name of the resource.
.. versionchanged:: 0.6
Support for HEADER_TOTAL_COUNT returned with response header.
.. versionchanged:: 0.5
Support for customisable query parameters.
.. versionchanged:: 0.4
Add pagination info whatever the HATEOAS status.
'on_fetched' events now return the whole response (HATEOAS metafields
included.)
Replaced ID_FIELD by item_lookup_field on self link.
item_lookup_field will default to ID_FIELD if blank.
Changed ``on_fetch_*`` changed to ``on_fetched_*``.
.. versionchanged:: 0.3
Don't return 304 if resource is empty. Fixes #243.
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
When If-Modified-Since header is present, either no documents (304) or
all documents (200) are sent per the HTTP spec. Original behavior can be
achieved with:
/resource?where={"updated":{"$gt":"if-modified-since-date"}}
.. versionchanged:: 0.2
Use the new ITEMS configuration setting.
Raise 'on_pre_<method>' event.
Let cursor add extra info to response.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
Support for embeddable documents.
.. versionchanged:: 0.0.9
Event hooks renamed to be more robuts and consistent: 'on_getting'
renamed to 'on_fetch'.
.. versionchanged:: 0.0.8
'on_getting' and 'on_getting_<resource>' events are raised when
documents have been read from the database and are about to be sent to
the client.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.5
Support for user-restricted access to resources.
Support for LAST_UPDATED field missing from documents, because they were
created outside the API context.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionchanged:: 0.0.3
Superflous ``response`` container removed. Collection items wrapped
with ``_items``. Links wrapped with ``_links``. Links are now properly
JSON formatted.
"""
datasource = config.DOMAIN[resource]['datasource']
aggregation = datasource.get('aggregation')
if aggregation:
return _perform_aggregation(resource, aggregation['pipeline'],
aggregation['options'])
else:
return _perform_find(resource, lookup)
def _perform_aggregation(resource, pipeline, options):
"""
.. versionadded:: 0.7
"""
# TODO move most of this down to the Mongo layer?
# TODO experiment with cursor.batch_size as alternative pagination
# implementation
def parse_aggregation_stage(d, key, value):
for st_key, st_value in d.items():
if isinstance(st_value, dict):
parse_aggregation_stage(st_value, key, value)
if key == st_value:
d[st_key] = value
response = {}
documents = []
req = parse_request(resource)
req_pipeline = copy.deepcopy(pipeline)
if req.aggregation:
try:
query = json.loads(req.aggregation)
except ValueError:
abort(400, description='Aggregation query could not be parsed.')
for key, value in query.items():
if key[0] != '$':
pass
for stage in req_pipeline:
parse_aggregation_stage(stage, key, value)
if req.max_results > 1:
limit = {"$limit": req.max_results}
skip = {"$skip": (req.page - 1) * req.max_results}
req_pipeline.append(skip)
req_pipeline.append(limit)
cursor = app.data.aggregate(resource, req_pipeline, options)
for document in cursor:
documents.append(document)
response[config.ITEMS] = documents
# PyMongo's CommandCursor does not return a count, so we cannot
# provide paination/total count info as we do with a normal (non-aggregate)
# GET request.
return response, None, None, 200, []
def _perform_find(resource, lookup):
"""
.. versionadded:: 0.7
"""
documents = []
response = {}
etag = None
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
# continue processing the full request
last_update = epoch()
# If-Modified-Since disabled on collections (#334)
req.if_modified_since = None
cursor = app.data.find(resource, req, lookup)
# If soft delete is enabled, data.find will not include items marked
# deleted unless req.show_deleted is True
for document in cursor:
build_response_document(document, resource, embedded_fields)
documents.append(document)
# build last update for entire response
if document[config.LAST_UPDATED] > last_update:
last_update = document[config.LAST_UPDATED]
status = 200
headers = []
last_modified = last_update if last_update > epoch() else None
response[config.ITEMS] = documents
if config.OPTIMIZE_PAGINATION_FOR_SPEED:
count = None
else:
count = cursor.count(with_limit_and_skip=False)
headers.append((config.HEADER_TOTAL_COUNT, count))
if config.DOMAIN[resource]['hateoas']:
response[config.LINKS] = _pagination_links(resource, req, count)
# add pagination info
if config.DOMAIN[resource]['pagination']:
response[config.META] = _meta_links(req, count)
# notify registered callback functions. Please note that, should the
# functions modify the documents, the last_modified and etag won't be
# updated to reflect the changes (they always reflect the documents
# state on the database.)
getattr(app, "on_fetched_resource")(resource, response)
getattr(app, "on_fetched_resource_%s" % resource)(response)
# the 'extra' cursor field, if present, will be added to the response.
# Can be used by Eve extensions to add extra, custom data to any
# response.
if hasattr(cursor, 'extra'):
getattr(cursor, 'extra')(response)
return response, last_modified, etag, status, headers
@ratelimit()
@requires_auth('item')
@pre_event
def getitem(resource, **lookup):
"""
Default function for handling GET requests to document endpoints, it has
decorators for rate limiting, authentication and for raising pre-request
events. After the decorators are applied forwards to call to
:func:`getitem_internal`
.. versionadded:: 0.6.2
"""
return getitem_internal(resource, **lookup)
def getitem_internal(resource, **lookup):
"""
:param resource: the name of the resource to which the document belongs.
:param **lookup: the lookup query.
.. versionchanged:: 0.6
Handle soft deleted documents
.. versionchanged:: 0.5
Allow ``?version=all`` requests to fire ``on_fetched_*`` events.
Create pagination links for document versions. (#475)
Pagination links reflect current query. (#464)
.. versionchanged:: 0.4
HATOEAS link for contains the business unit value even when
regexes have been configured for the resource endpoint.
'on_fetched' now returns the whole response (HATEOAS metafields
included.)
Support for document versioning.
Changed ``on_fetch_*`` changed to ``on_fetched_*``.
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
.. versionchanged:: 0.1.1
Support for Embeded Resource Serialization.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged: 0.0.8
'on_getting_item' event is raised when a document has been read from the
database and is about to be sent to the client.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.6
ETag added to payload.
.. versionchanged:: 0.0.5
Support for user-restricted access to resources.
Support for LAST_UPDATED field missing from documents, because they were
created outside the API context.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionchanged:: 0.0.3
Superflous ``response`` container removed. Links wrapped with
``_links``. Links are now properly JSON formatted.
"""
req = parse_request(resource)
resource_def = config.DOMAIN[resource]
embedded_fields = resolve_embedded_fields(resource, req)
soft_delete_enabled = config.DOMAIN[resource]['soft_delete']
if soft_delete_enabled:
# GET requests should always fetch soft deleted documents from the db
# They are handled and included in 404 responses below.
req.show_deleted = True
document = app.data.find_one(resource, req, **lookup)
if not document:
abort(404)
response = {}
etag = None
version = request.args.get(config.VERSION_PARAM)
latest_doc = None
cursor = None
# calculate last_modified before get_old_document rolls back the document,
# allowing us to invalidate the cache when _latest_version changes
last_modified = last_updated(document)
# synthesize old document version(s)
if resource_def['versioning'] is True:
latest_doc = document
document = get_old_document(
resource, req, lookup, document, version)
# meld into response document
build_response_document(document, resource, embedded_fields, latest_doc)
if config.IF_MATCH:
etag = document[config.ETAG]
if resource_def['versioning'] is True:
# In order to keep the LATEST_VERSION field up to date in client
# caches, changes to the latest version should invalidate cached
# copies of previous verisons. Incorporate the latest version into
# versioned document ETags on the fly to ensure 'If-None-Match'
# comparisons support this caching behavior.
etag += str(document[config.LATEST_VERSION])
# check embedded fields resolved in build_response_document() for more
# recent last updated timestamps. We don't want to respond 304 if embedded
# fields have changed
for field in embedded_fields:
embedded_document = document.get(field)
if isinstance(embedded_document, dict):
embedded_last_updated = last_updated(embedded_document)
if embedded_last_updated > last_modified:
last_modified = embedded_last_updated
# facilitate client caching by returning a 304 when appropriate
cache_validators = {True: 0, False: 0}
if req.if_modified_since:
cache_valid = (last_modified <= req.if_modified_since)
cache_validators[cache_valid] += 1
if req.if_none_match:
cache_valid = (etag == req.if_none_match)
cache_validators[cache_valid] += 1
# If all cache validators are true, return 304
if (cache_validators[True] > 0) and (cache_validators[False] == 0):
return {}, last_modified, etag, 304
if version == 'all' or version == 'diffs':
# find all versions
lookup[versioned_id_field(resource_def)] \
= lookup[resource_def['id_field']]
del lookup[resource_def['id_field']]
if version == 'diffs' or req.sort is None:
# default sort for 'all', required sort for 'diffs'
req.sort = '[("%s", 1)]' % config.VERSION
req.if_modified_since = None # we always want the full history here
cursor = app.data.find(resource + config.VERSIONS, req, lookup)
# build all versions
documents = []
if cursor.count() == 0:
# this is the scenario when the document existed before
# document versioning got turned on
documents.append(latest_doc)
else:
last_document = {}
# if we aren't starting on page 1, then we need to init last_doc
if version == 'diffs' and req.page > 1:
# grab the last document on the previous page to diff from
last_version = cursor[0][app.config['VERSION']] - 1
last_document = get_old_document(
resource, req, lookup, latest_doc, last_version)
for i, document in enumerate(cursor):
document = synthesize_versioned_document(
latest_doc, document, resource_def)
build_response_document(
document, resource, embedded_fields, latest_doc)
if version == 'diffs':
if i == 0:
documents.append(document)
else:
documents.append(diff_document(
resource_def, last_document, document))
last_document = document
else:
documents.append(document)
# add documents to response
if config.DOMAIN[resource]['hateoas']:
response[config.ITEMS] = documents
else:
response = documents
elif soft_delete_enabled and document.get(config.DELETED) is True:
# This document was soft deleted. Respond with 404 and the deleted
# version of the document.
document[config.STATUS] = config.STATUS_ERR,
document[config.ERROR] = {
'code': 404,
'message': 'The requested URL was not found on this server.'
}
return document, last_modified, etag, 404
else:
response = document
# extra hateoas links
if config.DOMAIN[resource]['hateoas']:
# use the id of the latest document for multi-document requests
if cursor:
count = cursor.count(with_limit_and_skip=False)
response[config.LINKS] = \
_pagination_links(resource, req, count,
latest_doc[resource_def['id_field']])
if config.DOMAIN[resource]['pagination']:
response[config.META] = _meta_links(req, count)
else:
response[config.LINKS] = \
_pagination_links(resource, req, None,
response[resource_def['id_field']])
# callbacks not supported on version diffs because of partial documents
if version != 'diffs':
# TODO: callbacks not currently supported with ?version=all
# notify registered callback functions. Please note that, should
# the functions modify the document, last_modified and etag
# won't be updated to reflect the changes (they always reflect the
# documents state on the database).
if resource_def['versioning'] is True and version == 'all':
versions = response
if config.DOMAIN[resource]['hateoas']:
versions = response[config.ITEMS]
for version_item in versions:
getattr(app, "on_fetched_item")(resource, version_item)
getattr(app, "on_fetched_item_%s" % resource)(version_item)
else:
getattr(app, "on_fetched_item")(resource, response)
getattr(app, "on_fetched_item_%s" % resource)(response)
return response, last_modified, etag, 200
def _pagination_links(resource, req, document_count, document_id=None):
""" Returns the appropriate set of resource links depending on the
current page and the total number of documents returned by the query.
:param resource: the resource name.
:param req: and instace of :class:`eve.utils.ParsedRequest`.
:param document_count: the number of documents returned by the query.
:param document_id: the document id (used for versions). Defaults to None.
.. versionchanged:: 0.5
Create pagination links given a document ID to allow paginated versions
pages (#475).
Pagination links reflect current query. (#464)
.. versionchanged:: 0.4
HATOEAS link for contains the business unit value even when
regexes have been configured for the resource endpoint.
.. versionchanged:: 0.0.8
Link to last page is provided if pagination is enabled (and the current
page is not the last one).
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.5
Support for optional pagination.
.. versionchanged:: 0.0.3
JSON links
"""
version = None
if config.DOMAIN[resource]['versioning'] is True:
version = request.args.get(config.VERSION_PARAM)
other_params = _other_params(req.args)
# construct the default links
q = querydef(req.max_results, req.where, req.sort, version, req.page,
other_params)
resource_title = config.DOMAIN[resource]['resource_title']
_links = {'parent': home_link(),
'self': {'title': resource_title,
'href': resource_link()}}
# change links if document ID is given
if document_id:
_links['self'] = document_link(resource, document_id)
_links['collection'] = {'title': resource_title,
'href': '%s%s' % (resource_link(), q)}
# make more specific links for versioned requests
if version in ('all', 'diffs'):
_links['parent'] = {'title': resource_title,
'href': resource_link()}
_links['collection'] = document_link(resource, document_id)
elif version:
_links['parent'] = document_link(resource, document_id)
_links['collection'] = {'title': resource_title,
'href': '%s?version=all'
% _links['parent']['href']}
# modify the self link to add query params or version number
if document_count:
_links['self']['href'] = '%s%s' % (_links['self']['href'], q)
elif not document_count and version and version not in ('all', 'diffs'):
_links['self'] = document_link(resource, document_id, version)
# create pagination links
if config.DOMAIN[resource]['pagination']:
# strip any queries from the self link if present
_pagination_link = _links['self']['href'].split('?')[0]
if (req.page * req.max_results < (document_count or 0) or
config.OPTIMIZE_PAGINATION_FOR_SPEED):
q = querydef(req.max_results, req.where, req.sort, version,
req.page + 1, other_params)
_links['next'] = {'title': 'next page', 'href': '%s%s' %
(_pagination_link, q)}
if document_count:
last_page = int(math.ceil(document_count / float(
req.max_results)))
q = querydef(req.max_results, req.where, req.sort, version,
last_page, other_params)
_links['last'] = {'title': 'last page', 'href': '%s%s' % (
_pagination_link, q)}
if req.page > 1:
q = querydef(req.max_results, req.where, req.sort, version,
req.page - 1, other_params)
_links['prev'] = {'title': 'previous page', 'href': '%s%s' %
(_pagination_link, q)}
return _links
def _other_params(args):
""" Returns a multidict of params that are not used internally by Eve.
:param args: multidict containing the request parameters
"""
default_params = [config.QUERY_WHERE, config.QUERY_SORT,
config.QUERY_PAGE, config.QUERY_MAX_RESULTS,
config.QUERY_EMBEDDED, config.QUERY_PROJECTION]
return MultiDict((key, value) for key, values in args.lists()
for value in values if key not in default_params)
def _meta_links(req, count):
""" Reterns the meta links for a paginated query.
:param req: parsed request object.
:param count: total number of documents in a query.
.. versionadded:: 0.5
"""
meta = {
config.QUERY_PAGE: req.page,
config.QUERY_MAX_RESULTS: req.max_results,
}
if config.OPTIMIZE_PAGINATION_FOR_SPEED is False:
meta['total'] = count
return meta
|
bcrochet/eve
|
eve/methods/get.py
|
Python
|
bsd-3-clause
| 21,656 | 0 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WebofknowledgePipeline(object):
def process_item(self, item, spider):
return item
|
alabarga/wos-scrapy
|
webofknowledge/pipelines.py
|
Python
|
gpl-2.0
| 294 | 0 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
from jinja2 import TemplateSyntaxError
from flexget import plugin
from flexget.event import event
from flexget.utils.search import normalize_unicode
log = logging.getLogger('search_rss')
class SearchRSS(object):
"""A generic search plugin that can use rss based search feeds. Configure it like rss
plugin, but include {{{search_term}}} in the url where the search term should go."""
schema = {'$ref': '/schema/plugin/rss'}
def search(self, task, entry, config=None):
from flexget.utils.template import environment
search_strings = [quote(normalize_unicode(s).encode('utf-8'))
for s in entry.get('search_strings', [entry['title']])]
rss_plugin = plugin.get_plugin_by_name('rss')
entries = set()
rss_config = rss_plugin.instance.build_config(config)
try:
template = environment.from_string(rss_config['url'])
except TemplateSyntaxError as e:
raise plugin.PluginError('Invalid jinja template as rss url: %s' % e)
rss_config['all_entries'] = True
for search_string in search_strings:
rss_config['url'] = template.render({'search_term': search_string})
# TODO: capture some other_fields to try to find seed/peer/content_size numbers?
try:
results = rss_plugin.phase_handlers['input'](task, rss_config)
except plugin.PluginError as e:
log.error('Error attempting to get rss for %s: %s', rss_config['url'], e)
else:
entries.update(results)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchRSS, 'search_rss', groups=['search'], api_ver=2)
|
drwyrm/Flexget
|
flexget/plugins/sites/rss.py
|
Python
|
mit
| 1,942 | 0.00309 |
from . import visualize_classic_binning
from . import visualize_tree_binning
from . import visualize_llh
from . import visualize_model
__all__ = ('visualize_classic_binning',
'visualize_llh',
'visualize_tree_binning',
'visualize_model')
|
mbrner/funfolding
|
funfolding/visualization/__init__.py
|
Python
|
mit
| 272 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos (danielcampos@avanzosc.es) Date: 07/10/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
from . import wizard
|
InakiZabala/odoomrp-wip
|
product_pricelist_import/__init__.py
|
Python
|
agpl-3.0
| 966 | 0 |
import sys
import struct
import logging
import ibapi
from ibapi.client import EClient
from ibapi.wrapper import EWrapper, iswrapper
import PyQt5.Qt as qt
import PyQt5.QtNetwork as qtnetwork
import tws_async.util as util
util.allowCtrlC()
__all__ = ['TWSClientQt', 'iswrapper']
class TWSClientQt(EWrapper, EClient):
"""
Version of ibapi.client.EClient that integrates with the Qt event loop.
"""
def __init__(self):
EClient.__init__(self, wrapper=self)
self.qApp = qt.QApplication.instance() or qt.QApplication(sys.argv)
self.readyTrigger = Trigger()
self._logger = logging.getLogger(__class__.__name__)
def reset(self):
EClient.reset(self)
self._data = b''
self._reqIdSeq = 0
def run(self):
self.qApp.exec_()
def connect(self, host, port, clientId, asyncConnect=False):
self._logger.info('Connecting to {}:{} with clientId {}...'.
format(host, port, clientId))
self.host = host
self.port = port
self.clientId = clientId
self.conn = TWSConnection(host, port)
self.conn.connect()
self.conn.socket.connected.connect(self._onSocketConnected)
self.conn.socket.disconnected.connect(self._onSocketDisonnected)
self.conn.socket.readyRead.connect(self._onSocketReadyRead)
self.conn.socket.error.connect(self._onSocketError)
self.setConnState(EClient.CONNECTING)
if not asyncConnect:
self.readyTrigger.wait()
def getReqId(self) -> int:
"""
Get new request ID.
"""
assert self._reqIdSeq
newId = self._reqIdSeq
self._reqIdSeq += 1
return newId
def dataHandlingPre(self):
pass
def dataHandlingPost(self):
pass
def _prefix(self, msg):
# prefix a message with its length
return struct.pack('>I', len(msg)) + msg
def _onSocketConnected(self):
# start handshake
msg = b'API\0'
msg += self._prefix(b'v%d..%d' % (
ibapi.server_versions.MIN_CLIENT_VER,
ibapi.server_versions.MAX_CLIENT_VER))
self.conn.sendMsg(msg)
self.decoder = ibapi.decoder.Decoder(self.wrapper, None)
def _onSocketDisonnected(self):
EClient.disconnect(self)
def _onSocketError(self, socketError):
if self.conn.socket:
self._logger.error(self.conn.socket.errorString())
def _onSocketReadyRead(self):
self.dataHandlingPre()
self._data += bytes(self.conn.socket.readAll())
while True:
if len(self._data) <= 4:
break
# 4 byte prefix tells the message length
msgEnd = 4 + struct.unpack('>I', self._data[:4])[0]
if len(self._data) < msgEnd:
# insufficient data for now
break
msg = self._data[4:msgEnd]
self._data = self._data[msgEnd:]
fields = msg.split(b'\0')
fields.pop() # pop off last empty element
if not self.serverVersion_ and len(fields) == 2:
# this concludes the handshake
version, self.connTime = fields
self.serverVersion_ = int(version)
self.decoder.serverVersion = self.serverVersion_
self.setConnState(EClient.CONNECTED)
self.startApi()
self.wrapper.connectAck()
self._logger.info('Logged on to server version {}'.
format(self.serverVersion_))
else:
# snoop for next valid id response,
# it signals readiness of the client
if fields[0] == b'9':
_, _, validId = fields
self._reqIdSeq = int(validId)
self.readyTrigger.go()
# decode and handle the message
self.decoder.interpret(fields)
self.dataHandlingPost()
class TWSConnection:
"""
Replacement for ibapi.connection.Connection that uses a QTcpSocket.
"""
def __init__(self, host, port):
self.host = host
self.port = port
self.socket = None
def connect(self):
self.socket = qtnetwork.QTcpSocket()
# set TCP_NODELAY (disable Nagle's algorithm)
self.socket.setSocketOption(
qtnetwork.QAbstractSocket.LowDelayOption, True)
self.socket.connectToHost(self.host, self.port)
def disconnect(self):
self.socket.close()
self.socket = None
def isConnected(self):
return self.socket is not None
def sendMsg(self, msg):
self.socket.write(msg)
self.socket.flush()
class Trigger(qt.QObject):
"""
Wait synchronously on a trigger.
"""
trigger = qt.pyqtSignal()
def __init__(self):
qt.QObject.__init__(self)
def go(self):
self.trigger.emit()
def wait(self, timeout=5000):
spy = qt.QSignalSpy(self.trigger)
spy.wait(timeout)
class TWS_TestQt(TWSClientQt):
"""
Test to connect to a running TWS or gateway server.
"""
def __init__(self):
TWSClientQt.__init__(self)
@iswrapper
def updateAccountValue(self, key: str, val: str, currency: str,
accountName: str):
print('Account update: {} = {} {}'.format(key, val, currency))
if __name__ == '__main__':
util.logToConsole()
tws = TWS_TestQt()
tws.connect(host='127.0.0.1', port=7497, clientId=1)
tws.reqAccountUpdates(1, '')
tws.run()
|
erdewit/tws_async
|
tws_async/twsclientqt.py
|
Python
|
unlicense
| 5,611 | 0.000535 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import sys
import time
from webkitpy.layout_tests.port import DeviceFailure, Driver, DriverOutput, Port
from webkitpy.layout_tests.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.models import test_run_results
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.crashlogs import CrashLogs
# This sets basic expectations for a test. Each individual expectation
# can be overridden by a keyword argument in TestList.add().
class TestInstance(object):
def __init__(self, name):
self.name = name
self.base = name[(name.rfind("/") + 1):name.rfind(".")]
self.crash = False
self.web_process_crash = False
self.exception = False
self.keyboard = False
self.error = ''
self.timeout = False
self.is_reftest = False
self.device_failure = False
# The values of each field are treated as raw byte strings. They
# will be converted to unicode strings where appropriate using
# FileSystem.read_text_file().
self.actual_text = self.base + '-txt'
self.actual_checksum = self.base + '-checksum'
# We add the '\x8a' for the image file to prevent the value from
# being treated as UTF-8 (the character is invalid)
self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
self.expected_text = self.actual_text
self.expected_image = self.actual_image
self.actual_audio = None
self.expected_audio = None
# This is an in-memory list of tests, what we want them to produce, and
# what we want to claim are the expected results.
class TestList(object):
def __init__(self):
self.tests = {}
def add(self, name, **kwargs):
test = TestInstance(name)
for key, value in kwargs.items():
test.__dict__[key] = value
self.tests[name] = test
def add_reftest(self, name, reference_name, same_image, crash=False):
self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True, crash=crash)
if same_image:
self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
else:
self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
def keys(self):
return self.tests.keys()
def __contains__(self, item):
return item in self.tests
def __getitem__(self, item):
return self.tests[item]
#
# These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
#
TOTAL_TESTS = 114
TOTAL_SKIPS = 29
UNEXPECTED_PASSES = 1
UNEXPECTED_FAILURES = 25
def unit_test_list():
tests = TestList()
tests.add('failures/expected/crash.html', crash=True)
tests.add('failures/expected/exception.html', exception=True)
tests.add('failures/expected/device_failure.html', device_failure=True)
tests.add('failures/expected/timeout.html', timeout=True)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
tests.add('failures/expected/image.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image='image-pngtEXtchecksum\x00checksum-png')
tests.add('failures/expected/image_checksum.html',
actual_checksum='image_checksum_fail-checksum',
actual_image='image_checksum_fail-png')
tests.add('failures/expected/audio.html',
actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/keyboard.html', keyboard=True)
tests.add('failures/expected/missing_check.html',
expected_image='missing_check-png')
tests.add('failures/expected/missing_image.html', expected_image=None)
tests.add('failures/expected/missing_audio.html', expected_audio=None,
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/newlines_leading.html',
expected_text="\nfoo\n", actual_text="foo\n")
tests.add('failures/expected/newlines_trailing.html',
expected_text="foo\n\n", actual_text="foo\n")
tests.add('failures/expected/newlines_with_excess_CR.html',
expected_text="foo\r\r\r\n", actual_text="foo\n")
tests.add('failures/expected/testharness.html',
actual_text='This is a testharness.js-based test.\nFAIL: assert fired\n.Harness: the test ran to completion.\n\n', expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/text.html', actual_text='text_fail-png')
tests.add('failures/expected/crash_then_text.html')
tests.add('failures/expected/skip_text.html', actual_text='text diff')
tests.add('failures/flaky/text.html')
tests.add('failures/unexpected/missing_text.html', expected_text=None)
tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
tests.add('failures/unexpected/missing_image.html', expected_image=None)
tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
RenderView at (0,0) size 800x600
layer at (0,0) size 800x34
RenderBlock {HTML} at (0,0) size 800x34
RenderBody {BODY} at (8,8) size 784x18
RenderText {#text} at (0,0) size 133x18
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/pass.html')
tests.add('failures/unexpected/text-checksum.html',
actual_text='text-checksum_fail-txt',
actual_checksum='text-checksum_fail-checksum')
tests.add('failures/unexpected/text-image-checksum.html',
actual_text='text-image-checksum_fail-txt',
actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/checksum-with-matching-image.html',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/skip_pass.html')
tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
tests.add('failures/unexpected/text_then_crash.html')
tests.add('failures/unexpected/timeout.html', timeout=True)
tests.add('http/tests/passes/text.html')
tests.add('http/tests/passes/image.html')
tests.add('http/tests/ssl/text.html')
tests.add('passes/args.html')
tests.add('passes/error.html', error='stuff going to stderr')
tests.add('passes/image.html')
tests.add('passes/audio.html',
actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('passes/platform_image.html')
tests.add('passes/checksum_in_image.html',
expected_image='tEXtchecksum\x00checksum_in_image-checksum')
tests.add('passes/skipped/skip.html')
tests.add('passes/testharness.html',
actual_text='This is a testharness.js-based test.\nPASS: assert is fine\nHarness: the test ran to completion.\n\n', expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
# Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
# See https://bugs.webkit.org/show_bug.cgi?id=69444 .
tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
# Text output files contain "\r\n" on Windows. This may be
# helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
tests.add('passes/text.html',
expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
# For reftests.
tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
# This adds a different virtual reference to ensure that that also works.
tests.add('virtual/passes/reftest-expected.html', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('reftests/foo/test.html')
tests.add('reftests/foo/test-ref.html')
tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
# The following files shouldn't be treated as reftests
tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
tests.add('reftests/foo/reference/bar/common.html')
tests.add('reftests/foo/reftest/bar/shared.html')
tests.add('websocket/tests/passes/text.html')
# For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
tests.add('platform/test-mac-leopard/http/test.html')
tests.add('platform/test-win-win7/http/test.html')
# For testing if perf tests are running in a locked shard.
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
# For testing --pixel-test-directories.
tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/image_not_in_pixeldir.html',
actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
# For testing that virtual test suites don't expand names containing themselves
# See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
tests.add('passes/test-virtual-passes.html')
tests.add('passes/passes/test-virtual-passes.html')
return tests
# Here we use a non-standard location for the layout tests, to ensure that
# this works. The path contains a '.' in the name because we've seen bugs
# related to this before.
LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
PERF_TEST_DIR = '/test.checkout/PerformanceTests'
# Here we synthesize an in-memory filesystem from the test list
# in order to fully control the test output and to demonstrate that
# we don't need a real filesystem to run the tests.
def add_unit_tests_to_mock_filesystem(filesystem):
# Add the test_expectations file.
filesystem.maybe_make_directory('/mock-checkout/LayoutTests')
if not filesystem.exists('/mock-checkout/LayoutTests/TestExpectations'):
filesystem.write_text_file('/mock-checkout/LayoutTests/TestExpectations', """
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/crash_then_text.html [ Failure ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/audio.html [ Failure ]
Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
Bug(test) failures/expected/missing_check.html [ Missing Pass ]
Bug(test) failures/expected/missing_image.html [ Missing Pass ]
Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
Bug(test) failures/expected/missing_text.html [ Missing Pass ]
Bug(test) failures/expected/newlines_leading.html [ Failure ]
Bug(test) failures/expected/newlines_trailing.html [ Failure ]
Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/testharness.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
Bug(test) failures/expected/keyboard.html [ WontFix ]
Bug(test) failures/expected/exception.html [ WontFix ]
Bug(test) failures/expected/device_failure.html [ WontFix ]
Bug(test) failures/unexpected/pass.html [ Failure ]
Bug(test) passes/skipped/skip.html [ Skip ]
Bug(test) passes/text.html [ Pass ]
""")
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
== test.html test-ref.html
== multiple-match-success.html mismatching-ref.html
== multiple-match-success.html matching-ref.html
== multiple-match-failure.html mismatching-ref.html
== multiple-match-failure.html second-mismatching-ref.html
!= multiple-mismatch-success.html mismatching-ref.html
!= multiple-mismatch-success.html second-mismatching-ref.html
!= multiple-mismatch-failure.html mismatching-ref.html
!= multiple-mismatch-failure.html matching-ref.html
== multiple-both-success.html matching-ref.html
== multiple-both-success.html mismatching-ref.html
!= multiple-both-success.html second-mismatching-ref.html
== multiple-both-failure.html matching-ref.html
!= multiple-both-failure.html second-mismatching-ref.html
!= multiple-both-failure.html matching-ref.html
""")
# FIXME: This test was only being ignored because of missing a leading '/'.
# Fixing the typo causes several tests to assert, so disabling the test entirely.
# Add in a file should be ignored by port.find_test_files().
#files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
def add_file(test, suffix, contents):
dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
base = test.base
filesystem.maybe_make_directory(dirname)
filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
# Add each test and the expected output, if any.
test_list = unit_test_list()
for test in test_list.tests.values():
add_file(test, test.name[test.name.rfind('.'):], '')
if test.is_reftest:
continue
if test.actual_audio:
add_file(test, '-expected.wav', test.expected_audio)
continue
add_file(test, '-expected.txt', test.expected_text)
add_file(test, '-expected.png', test.expected_image)
filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
# Clear the list of written files so that we can watch what happens during testing.
filesystem.clear_written_files()
class TestPort(Port):
port_name = 'test'
default_port_name = 'test-mac-leopard'
"""Test implementation of the Port interface."""
ALL_BASELINE_VARIANTS = (
'test-linux-x86_64',
'test-mac-snowleopard', 'test-mac-leopard',
'test-win-win7', 'test-win-xp',
)
FALLBACK_PATHS = {
'xp': ['test-win-win7', 'test-win-xp'],
'win7': ['test-win-win7'],
'leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
'snowleopard': ['test-mac-snowleopard'],
'lucid': ['test-linux-x86_64', 'test-win-win7'],
}
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name == 'test':
return TestPort.default_port_name
return port_name
def __init__(self, host, port_name=None, **kwargs):
Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
self._tests = unit_test_list()
self._flakes = set()
# FIXME: crbug.com/279494. This needs to be in the "real layout tests
# dir" in a mock filesystem, rather than outside of the checkout, so
# that tests that want to write to a TestExpectations file can share
# this between "test" ports and "real" ports. This is the result of
# rebaseline_unittest.py having tests that refer to "real" port names
# and real builders instead of fake builders that point back to the
# test ports. rebaseline_unittest.py needs to not mix both "real" ports
# and "test" ports
self._generic_expectations_path = '/mock-checkout/LayoutTests/TestExpectations'
self._results_directory = None
self._operating_system = 'mac'
if self._name.startswith('test-win'):
self._operating_system = 'win'
elif self._name.startswith('test-linux'):
self._operating_system = 'linux'
version_map = {
'test-win-xp': 'xp',
'test-win-win7': 'win7',
'test-mac-leopard': 'leopard',
'test-mac-snowleopard': 'snowleopard',
'test-linux-x86_64': 'lucid',
}
self._version = version_map[self._name]
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
# FIXME: We override this just to keep the perf tests happy.
return [('blink', self.layout_tests_dir())]
def buildbot_archives_baselines(self):
return self._name != 'test-win-xp'
def default_pixel_tests(self):
return True
def _path_to_driver(self):
# This routine shouldn't normally be called, but it is called by
# the mock_drt Driver. We return something, but make sure it's useless.
return 'MOCK _path_to_driver'
def default_child_processes(self):
return 1
def check_build(self, needs_http, printer):
return test_run_results.OK_EXIT_STATUS
def check_sys_deps(self, needs_http):
return test_run_results.OK_EXIT_STATUS
def default_configuration(self):
return 'Release'
def diff_image(self, expected_contents, actual_contents):
diffed = actual_contents != expected_contents
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents or not expected_contents:
return (True, None)
if diffed:
return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), None)
return (None, None)
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
def perf_tests_dir(self):
return PERF_TEST_DIR
def webkit_base(self):
return '/test.checkout'
def _skipped_tests_for_unsupported_features(self, test_list):
return set(['failures/expected/skip_text.html',
'failures/unexpected/skip_pass.html',
'virtual/skipped'])
def name(self):
return self._name
def operating_system(self):
return self._operating_system
def _path_to_wdiff(self):
return None
def default_results_directory(self):
return '/tmp/layout-test-results'
def setup_test_run(self):
pass
def _driver_class(self):
return TestDriver
def start_http_server(self, additional_dirs=None, number_of_servers=None):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _path_to_lighttpd(self):
return "/usr/sbin/lighttpd"
def _path_to_lighttpd_modules(self):
return "/usr/lib/lighttpd"
def _path_to_lighttpd_php(self):
return "/usr/bin/php-cgi"
def _path_to_apache(self):
return "/usr/sbin/httpd"
def _path_to_apache_config_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
def path_to_generic_test_expectations_file(self):
return self._generic_expectations_path
def _port_specific_expectations_files(self):
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in ['test', 'test-win-xp']]
def all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self._all_systems():
for build_type in self._all_build_types():
test_configurations.append(TestConfiguration(
version=version,
architecture=architecture,
build_type=build_type))
return test_configurations
def _all_systems(self):
return (('leopard', 'x86'),
('snowleopard', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'))
def _all_build_types(self):
return ('debug', 'release')
def configuration_specifier_macros(self):
"""To avoid surprises when introducing new macros, these are intentionally fixed in time."""
return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'win7'], 'linux': ['lucid']}
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def virtual_test_suites(self):
return [
VirtualTestSuite('passes', 'passes', ['--virtual-arg'], use_legacy_naming=True),
VirtualTestSuite('skipped', 'failures/expected', ['--virtual-arg2'], use_legacy_naming=True),
]
class TestDriver(Driver):
"""Test/Dummy implementation of the driver interface."""
next_pid = 1
def __init__(self, *args, **kwargs):
super(TestDriver, self).__init__(*args, **kwargs)
self.started = False
self.pid = 0
def cmd_line(self, pixel_tests, per_test_args):
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
def run_test(self, driver_input, stop_when_done):
if not self.started:
self.started = True
self.pid = TestDriver.next_pid
TestDriver.next_pid += 1
start_time = time.time()
test_name = driver_input.test_name
test_args = driver_input.args or []
test = self._port._tests[test_name]
if test.keyboard:
raise KeyboardInterrupt
if test.exception:
raise ValueError('exception from ' + test_name)
if test.device_failure:
raise DeviceFailure('device failure in ' + test_name)
audio = None
actual_text = test.actual_text
crash = test.crash
web_process_crash = test.web_process_crash
if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
self._port._flakes.add(test_name)
actual_text = 'flaky text failure'
if 'crash_then_text.html' in test_name:
if test_name in self._port._flakes:
actual_text = 'text failure'
else:
self._port._flakes.add(test_name)
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
if 'text_then_crash.html' in test_name:
if test_name in self._port._flakes:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
else:
self._port._flakes.add(test_name)
actual_text = 'text failure'
if actual_text and test_args and test_name == 'passes/args.html':
actual_text = actual_text + ' ' + ' '.join(test_args)
if test.actual_audio:
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
crashed_pid = None
if crash:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
elif web_process_crash:
crashed_process_name = 'WebProcess'
crashed_pid = 2
crash_log = ''
if crashed_process_name:
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
if stop_when_done:
self.stop()
if test.actual_checksum == driver_input.image_hash:
image = None
else:
image = test.actual_image
return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid)
def stop(self):
self.started = False
|
lordmos/blink
|
Tools/Scripts/webkitpy/layout_tests/port/test.py
|
Python
|
mit
| 29,134 | 0.002643 |
import socket, sys,os,re
from struct import *
mymac=sys.argv[1]
rmac=sys.argv[2]
interface=sys.argv[3]
mode=sys.argv[4]
def address (a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
try:
s = socket.socket( socket.AF_PACKET , socket.SOCK_RAW , socket.ntohs(0x0003))
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
while True:
packet = s.recvfrom(65565)
packet = packet[0]
pack_length = 14
pack_header = packet[:pack_length]
pack = unpack('!6s6sH' , pack_header)
pack_protocol = socket.ntohs(pack[2])
#print 'Destination MAC : ' + address(packet[0:6]) + ' Source MAC : ' + address(packet[6:12])
#print rmac, interface , mode
router_mac=re.sub(r':',"",rmac)
pc_mac=re.sub(r':',"",mymac)
router_mac= router_mac[:-6]
if mymac == address(packet[0:6]) :
if rmac != address(packet[6:12]) and rmac != "01005e" and rmac != "ffffff" and rmac != "333300":
os.system("bash ./passive.sh '"+rmac+"' '"+interface+"' '"+mode+"' ")
elif mymac == address(packet[6:12]) :
if rmac != address(packet[0:6]) and rmac != "01005e" and rmac != "ffffff" and rmac != "333300":
os.system("bash ./passive.sh '"+rmac+"' '"+interface+"' '"+mode+"' ")
|
europa502/shARP_2.0
|
mac_decoder.py
|
Python
|
gpl-3.0
| 1,454 | 0.055021 |
import uuid
import base64
import re
def generate_key():
"""
generates a uuid, encodes it with base32 and strips it's padding.
this reduces the string size from 32 to 26 chars.
"""
return base64.b32encode(uuid.uuid4().bytes).strip('=').lower()[0:12]
def thousand_separator(x=0, sep='.', dot=','):
"""
creates a string of number separated by selected delimiters
"""
num, _, frac = str(x).partition(dot)
num = re.sub(r'(\d{3})(?=\d)', r'\1'+sep, num[::-1])[::-1]
if frac:
num += dot + frac
return num
def new_parser(passed_object, request_data):
"""
Maps passed request object from client into expected object.
Use this for creation of new object by passing an instantiated
empty object into the passed_object variable
"""
for item, value in request_data.values.iteritems():
if hasattr(passed_object, item) and value is not None:
try:
setattr(passed_object, item, value)
except:
setattr(passed_object, item, convert_to_date(value))
passed_object.id = generate_key()
return passed_object
def edit_parser(passed_object, request_data):
"""
Maps value from passed json object for data edit purposes.
You need to pass in object resulting from query into the
passed_object variable
"""
for item in request_data.values:
if item != "id" and hasattr(passed_object, item) and request_data.values.get(item) != None:
setattr(passed_object, item, request_data.values.get(item))
return passed_object
def convert_to_date(date_string):
from datetime import date
input = date_string.split("-")
return date(int(input[0]),int(input[1]),int(input[2]))
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
|
hsw5138/bis
|
backend/helpers.py
|
Python
|
mit
| 2,048 | 0.027344 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import sys
from reset_exclusions import *
from utils import *
from viewer import *
class Cleaner(object):
"""
The Cleaner class allows users to clean up their movie, TV show and music video collection by removing watched
items. The user can apply a number of conditions to cleaning, such as limiting cleaning to files with a given
rating, excluding a particular folder or only cleaning when a particular disk is low on disk space.
The main method to call is the ``clean_all()`` method. This method will invoke the subsequent checks and (re)move
your videos. Upon completion, you will receive a short summary of the cleaning results.
*Example*
``summary = Cleaner().clean_all()``
"""
# Constants to ensure correct (Gotham-compatible) JSON-RPC requests for Kodi
MOVIES = u"movies"
MUSIC_VIDEOS = u"musicvideos"
TVSHOWS = u"episodes"
CLEANING_TYPE_MOVE = u"0"
CLEANING_TYPE_DELETE = u"1"
DEFAULT_ACTION_CLEAN = u"0"
DEFAULT_ACTION_LOG = u"1"
STATUS_SUCCESS = 1
STATUS_FAILURE = 2
STATUS_ABORTED = 3
movie_filter_fields = [u"title", u"plot", u"plotoutline", u"tagline", u"votes", u"rating", u"time", u"writers",
u"playcount", u"lastplayed", u"inprogress", u"genre", u"country", u"year", u"director",
u"actor", u"mpaarating", u"top250", u"studio", u"hastrailer", u"filename", u"path", u"set",
u"tag", u"dateadded", u"videoresolution", u"audiochannels", u"videocodec", u"audiocodec",
u"audiolanguage", u"subtitlelanguage", u"videoaspect", u"playlist"]
episode_filter_fields = [u"title", u"tvshow", u"plot", u"votes", u"rating", u"time", u"writers", u"airdate",
u"playcount", u"lastplayed", u"inprogress", u"genre", u"year", u"director", u"actor",
u"episode", u"season", u"filename", u"path", u"studio", u"mpaarating", u"dateadded",
u"videoresolution", u"audiochannels", u"videocodec", u"audiocodec", u"audiolanguage",
u"subtitlelanguage", u"videoaspect", u"playlist"]
musicvideo_filter_fields = [u"title", u"genre", u"album", u"year", u"artist", u"filename", u"path", u"playcount",
u"lastplayed", u"time", u"director", u"studio", u"plot", u"dateadded",
u"videoresolution", u"audiochannels", u"videocodec", u"audiocodec", u"audiolanguage",
u"subtitlelanguage", u"videoaspect", u"playlist"]
supported_filter_fields = {
TVSHOWS: episode_filter_fields,
MOVIES: movie_filter_fields,
MUSIC_VIDEOS: musicvideo_filter_fields
}
methods = {
TVSHOWS: u"VideoLibrary.GetEpisodes",
MOVIES: u"VideoLibrary.GetMovies",
MUSIC_VIDEOS: u"VideoLibrary.GetMusicVideos"
}
properties = {
TVSHOWS: [u"file", u"showtitle"],
MOVIES: [u"file", u"title"],
MUSIC_VIDEOS: [u"file", u"artist"]
}
stacking_indicators = [u"part", u"pt", u"cd", u"dvd", u"disk", u"disc"]
progress = xbmcgui.DialogProgress()
monitor = xbmc.Monitor()
silent = True
exit_status = STATUS_SUCCESS
def __init__(self):
debug(u"{0} version {1} loaded.".format(ADDON.getAddonInfo(u"name").decode("utf-8"),
ADDON.getAddonInfo(u"version").decode("utf-8")))
def __is_canceled(self):
"""
Test if the progress dialog has been canceled by the user. If the cleaner was started as a service this will
always return False
:rtype: bool
:return: True if the user cancelled cleaning, False otherwise.
"""
if self.silent:
return False
elif self.progress.iscanceled():
debug(u"User canceled.", xbmc.LOGWARNING)
self.exit_status = self.STATUS_ABORTED
return True
def show_progress(self):
"""
Toggle the progress dialog on. Use before calling the cleaning method.
"""
self.silent = False
def hide_progress(self):
"""
Toggle the progress dialog off. Use before calling the cleaning method.
"""
self.silent = True
def clean(self, video_type):
"""
Clean all watched videos of the provided type.
:type video_type: unicode
:param video_type: The type of videos to clean (one of TVSHOWS, MOVIES, MUSIC_VIDEOS).
:rtype: (list, int, int)
:return: A list of the filenames that were cleaned, as well as the number of files cleaned and the return status.
"""
cleaned_files = []
count = 0
type_translation = {self.MOVIES: translate(32626), self.MUSIC_VIDEOS: translate(32627), self.TVSHOWS: translate(32628)}
if not self.silent:
# Cleaning <video type>
self.progress.update(0, translate(32629).format(type=type_translation[video_type]), *map(translate, (32615, 32615)))
self.monitor.waitForAbort(1)
if video_type == self.TVSHOWS:
clean_this_video_type = get_setting(clean_tv_shows)
elif video_type == self.MOVIES:
clean_this_video_type = get_setting(clean_movies)
elif video_type == self.MUSIC_VIDEOS:
clean_this_video_type = get_setting(clean_music_videos)
else:
debug(u"Incorrect video type specified: {0}".format(video_type), xbmc.LOGERROR)
return [], 0, self.STATUS_FAILURE
progress_percent = 0
if clean_this_video_type:
expired_videos = self.get_expired_videos(video_type)
if not self.silent:
amount = len(expired_videos)
debug(u"Found {0} videos that may need cleaning.".format(amount))
try:
increment = 1.0 / amount
except ZeroDivisionError:
self.progress.update(0, *map(translate, (32621, 32622, 32623))) # No watched videos found
if self.monitor.waitForAbort(2.5):
pass
for filename, title in expired_videos:
if not self.__is_canceled():
unstacked_path = self.unstack(filename)
if xbmcvfs.exists(unstacked_path[0]) and self.has_no_hard_links(filename):
if get_setting(cleaning_type) == self.CLEANING_TYPE_MOVE:
# No destination set, prompt user to set one now
if get_setting(holding_folder) == "":
if xbmcgui.Dialog().yesno(ADDON_NAME, *map(translate, (32521, 32522, 32523))):
xbmc.executebuiltin(u"Addon.OpenSettings({0})".format(ADDON_ID))
self.exit_status = self.STATUS_ABORTED
break
if get_setting(create_subdirs):
new_path = os.path.join(get_setting(holding_folder).encode("utf-8"),
title.encode("utf-8"))
else:
new_path = get_setting(holding_folder)
move_result = self.move_file(filename, new_path)
if move_result == 1:
debug(u"File(s) moved successfully.")
count += 1
if len(unstacked_path) > 1:
cleaned_files.extend(unstacked_path)
else:
cleaned_files.append(filename)
self.clean_related_files(filename, new_path)
self.delete_empty_folders(os.path.dirname(filename))
elif move_result == -1:
debug(u"Moving errors occurred. Skipping related files and directories.", xbmc.LOGWARNING)
xbmcgui.Dialog().ok(*map(translate, (32611, 32612, 32613, 32614)))
elif get_setting(cleaning_type) == self.CLEANING_TYPE_DELETE:
if self.delete_file(filename):
debug(u"File(s) deleted successfully.")
count += 1
if len(unstacked_path) > 1:
cleaned_files.extend(unstacked_path)
else:
cleaned_files.append(filename)
self.clean_related_files(filename)
self.delete_empty_folders(os.path.dirname(filename))
else:
debug(u"Not cleaning {0}.".format(filename), xbmc.LOGNOTICE)
if not self.silent:
progress_percent += increment * 100
debug(u"Progress percent is {percent}, amount is {amount} and increment is {increment}".format(percent=progress_percent, amount=amount, increment=increment))
title = title.encode("utf-8")
self.progress.update(int(progress_percent), translate(32616).format(amount=amount, type=type_translation[video_type]), translate(32617), u"[I]{0}[/I]".format(title))
self.monitor.waitForAbort(2)
else:
debug(u"We had {amt} {type} left to clean.".format(amt=(amount - count), type=type_translation[video_type]))
else:
debug(u"Cleaning of {0} is disabled. Skipping.".format(video_type))
if not self.silent:
self.progress.update(0, translate(32624).format(type=type_translation[video_type]), *map(translate, (32625, 32615)))
self.monitor.waitForAbort(2)
return cleaned_files, count, self.exit_status
def clean_all(self):
"""
Clean up any watched videos in the Kodi library, satisfying any conditions set via the addon settings.
:rtype: (unicode, int)
:return: A single-line (localized) summary of the cleaning results to be used for a notification, plus a status.
"""
debug(u"Starting cleaning routine.")
if get_setting(clean_when_idle) and xbmc.Player().isPlaying():
debug(u"Kodi is currently playing a file. Skipping cleaning.", xbmc.LOGWARNING)
return None, self.exit_status
results = {}
cleaning_results, cleaned_files = [], []
if not get_setting(clean_when_low_disk_space) or (get_setting(clean_when_low_disk_space) and disk_space_low()):
if not self.silent:
self.progress.create(ADDON_NAME, *map(translate, (32619, 32615, 32615)))
self.progress.update(0)
self.monitor.waitForAbort(2)
for video_type in [self.MOVIES, self.MUSIC_VIDEOS, self.TVSHOWS]:
if not self.__is_canceled():
cleaned_files, count, status = self.clean(video_type)
if count > 0:
cleaning_results.extend(cleaned_files)
results[video_type] = count
if not self.silent:
self.progress.close()
# Check if we need to perform any post-cleaning operations
if cleaning_results:
# Write cleaned file names to the log
Log().prepend(cleaning_results)
# Finally clean the library to account for any deleted videos.
if get_setting(clean_kodi_library):
self.monitor.waitForAbort(2) # Sleep 2 seconds to make sure file I/O is done.
if xbmc.getCondVisibility(u"Library.IsScanningVideo"):
debug(u"The video library is being updated. Skipping library cleanup.", xbmc.LOGWARNING)
else:
xbmc.executebuiltin(u"XBMC.CleanLibrary(video, false)")
return self.summarize(results), self.exit_status
def summarize(self, details):
"""
Create a summary from the cleaning results.
:type details: dict
:rtype: unicode
:return: A comma separated summary of the cleaning results.
"""
summary = u""
# Localize video types
for vid_type, amount in details.items():
if vid_type is self.MOVIES:
video_type = translate(32515)
elif vid_type is self.TVSHOWS:
video_type = translate(32516)
elif vid_type is self.MUSIC_VIDEOS:
video_type = translate(32517)
else:
video_type = ""
summary += u"{0:d} {1}, ".format(amount, video_type)
# strip the comma and space from the last iteration and add the localized suffix
return u"{0}{1}".format(summary.rstrip(u", "), translate(32518)) if summary else u""
def get_expired_videos(self, option):
"""
Find videos in the Kodi library that have been watched.
Respects any other conditions user enables in the addon's settings.
:type option: unicode
:param option: The type of videos to find (one of the globals MOVIES, MUSIC_VIDEOS or TVSHOWS).
:rtype: list
:return: A list of expired videos, along with a number of extra attributes specific to the video type.
"""
# A non-exhaustive list of pre-defined filters to use during JSON-RPC requests
# These are possible conditions that must be met before a video can be deleted
by_playcount = {u"field": u"playcount", u"operator": u"greaterthan", u"value": u"0"}
by_date_played = {u"field": u"lastplayed", u"operator": u"notinthelast", u"value": u"{0:f}".format(get_setting(expire_after))}
by_minimum_rating = {u"field": u"rating", u"operator": u"lessthan", u"value": u"{0:f}".format(get_setting(minimum_rating))}
by_no_rating = {u"field": u"rating", u"operator": u"isnot", u"value": u"0"}
by_progress = {u"field": u"inprogress", u"operator": u"false", u"value": u""}
by_exclusion1 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion1)}
by_exclusion2 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion2)}
by_exclusion3 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion3)}
by_exclusion4 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion4)}
by_exclusion5 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion5)}
# link settings and filters together
settings_and_filters = [
(get_setting(enable_expiration), by_date_played),
(get_setting(clean_when_low_rated), by_minimum_rating),
(get_setting(not_in_progress), by_progress),
(get_setting(exclusion_enabled) and get_setting(exclusion1) is not u"", by_exclusion1),
(get_setting(exclusion_enabled) and get_setting(exclusion2) is not u"", by_exclusion2),
(get_setting(exclusion_enabled) and get_setting(exclusion3) is not u"", by_exclusion3),
(get_setting(exclusion_enabled) and get_setting(exclusion4) is not u"", by_exclusion4),
(get_setting(exclusion_enabled) and get_setting(exclusion5) is not u"", by_exclusion5)
]
# Only check not rated videos if checking for video ratings at all
if get_setting(clean_when_low_rated):
settings_and_filters.append((get_setting(ignore_no_rating), by_no_rating))
enabled_filters = [by_playcount]
for s, f in settings_and_filters:
if s and f[u"field"] in self.supported_filter_fields[option]:
enabled_filters.append(f)
debug(u"[{0}] Filters enabled: {1}".format(self.methods[option], enabled_filters))
filters = {u"and": enabled_filters}
request = {
u"jsonrpc": u"2.0",
u"method": self.methods[option],
u"params": {
u"properties": self.properties[option],
u"filter": filters
},
u"id": 1
}
rpc_cmd = json.dumps(request)
response = xbmc.executeJSONRPC(rpc_cmd)
debug(u"[{0}] Response: {1}".format(self.methods[option], response.decode("utf-8")))
result = json.loads(response)
# Check the results for errors
try:
error = result[u"error"]
debug(u"An error occurred. {0}".format(error))
return None
except KeyError as ke:
if u"error" in ke:
pass # no error
else:
raise
debug(u"Building list of expired videos")
expired_videos = []
response = result[u"result"]
try:
debug(u"Found {0:d} watched {1} matching your conditions".format(response[u"limits"][u"total"], option))
debug(u"JSON Response: {0}".format(response))
for video in response[option]:
# Gather all properties and add it to this video's information
temp = []
for p in self.properties[option]:
temp.append(video[p])
expired_videos.append(temp)
except KeyError as ke:
if option in ke:
pass # no expired videos found
else:
debug(u"KeyError: {0} not found".format(ke), xbmc.LOGWARNING)
debug(u"{0}".format(response), xbmc.LOGWARNING)
raise
finally:
debug(u"Expired videos: {0}".format(expired_videos))
return expired_videos
def unstack(self, path):
"""Unstack path if it is a stacked movie. See http://kodi.wiki/view/File_stacking for more info.
:type path: unicode
:param path: The path that should be unstacked.
:rtype: list
:return: A list of paths that are part of the stack. If it is no stacked movie, a one-element list is returned.
"""
if path.startswith(u"stack://"):
debug(u"Unstacking {0}.".format(path))
return path.replace(u"stack://", u"").split(u" , ")
else:
debug(u"Unstacking {0} is not needed.".format(path))
return [path]
def get_stack_bare_title(self, filenames):
"""Find the common title of files part of a stack, minus the volume and file extension.
Example:
["Movie_Title_part1.ext", "Movie_Title_part2.ext"] yields "Movie_Title"
:type filenames: list
:param filenames: a list of file names that are part of a stack. Use unstack() to find these file names.
:rtype: str
:return: common title of file names part of a stack
"""
title = os.path.basename(os.path.commonprefix([f.encode("utf-8") for f in filenames])).decode("utf-8")
for e in self.stacking_indicators:
if title.endswith(e):
title = title[:-len(e)].rstrip(u"._-")
break
return title
def delete_file(self, location):
"""
Delete a file from the file system. Also supports stacked movie files.
Example:
success = delete_file(location)
:type location: unicode
:param location: the path to the file you wish to delete.
:rtype: bool
:return: True if (at least one) file was deleted successfully, False otherwise.
"""
debug(u"Attempting to delete {0}".format(location))
paths = self.unstack(location)
success = []
for p in paths:
if xbmcvfs.exists(p):
success.append(bool(xbmcvfs.delete(p)))
else:
debug(u"File {0} no longer exists.".format(p), xbmc.LOGERROR)
success.append(False)
return any(success)
def delete_empty_folders(self, location):
"""
Delete the folder if it is empty. Presence of custom file extensions can be ignored while scanning.
To achieve this, edit the ignored file types setting in the addon settings.
Example:
success = delete_empty_folders(path)
:type location: unicode
:param location: The path to the folder to be deleted.
:rtype: bool
:return: True if the folder was deleted successfully, False otherwise.
"""
if not get_setting(delete_folders):
debug(u"Deleting of empty folders is disabled.")
return False
folder = self.unstack(location)[0] # Stacked paths should have the same parent, use any
debug(u"Checking if {0} is empty".format(folder))
ignored_file_types = [file_ext.strip() for file_ext in get_setting(ignore_extensions).split(u",")]
debug(u"Ignoring file types {0}".format(ignored_file_types))
subfolders, files = xbmcvfs.listdir(folder)
debug(u"Contents of {dir}:\nSubfolders: {sub}\nFiles: {files}".format(dir=folder, sub=subfolders, files=files))
empty = True
try:
for f in files:
_, ext = os.path.splitext(f)
if ext and ext not in ignored_file_types: # ensure f is not a folder and its extension is not ignored
debug(u"Found non-ignored file type {0}".format(ext))
empty = False
break
except OSError as oe:
debug(u"Error deriving file extension. Errno {0}".format(oe.errno), xbmc.LOGERROR)
empty = False
# Only delete directories if we found them to be empty (containing no files or filetypes we ignored)
if empty:
debug(u"Directory is empty and will be removed")
try:
# Recursively delete any subfolders
for f in subfolders:
debug(u"Deleting file at {0}".format(os.path.join(folder, f)))
self.delete_empty_folders(os.path.join(folder, f))
# Delete any files in the current folder
for f in files:
debug(u"Deleting file at {0}".format(os.path.join(folder, f)))
xbmcvfs.delete(os.path.join(folder, f))
# Finally delete the current folder
return xbmcvfs.rmdir(folder)
except OSError as oe:
debug(u"An exception occurred while deleting folders. Errno {0}".format(oe.errno), xbmc.LOGERROR)
return False
else:
debug(u"Directory is not empty and will not be removed")
return False
def clean_related_files(self, source, dest_folder=None):
"""Clean files related to another file based on the user's preferences.
Related files are files that only differ by extension, or that share a prefix in case of stacked movies.
Examples of related files include NFO files, thumbnails, subtitles, fanart, etc.
:type source: unicode
:param source: Location of the file whose related files should be cleaned.
:type dest_folder: unicode
:param dest_folder: (Optional) The folder where related files should be moved to. Not needed when deleting.
"""
if get_setting(clean_related):
debug(u"Cleaning related files.")
path_list = self.unstack(source)
path, name = os.path.split(path_list[0]) # Because stacked movies are in the same folder, only check one
if source.startswith(u"stack://"):
name = self.get_stack_bare_title(path_list)
else:
name, ext = os.path.splitext(name)
debug(u"Attempting to match related files in {0} with prefix {1}".format(path, name))
for extra_file in xbmcvfs.listdir(path)[1]:
extra_file = unicode(extra_file, encoding="utf-8")
if extra_file.startswith(name):
debug(u"{0} starts with {1}.".format(extra_file, name))
extra_file_path = os.path.join(path, extra_file)
if get_setting(cleaning_type) == self.CLEANING_TYPE_DELETE:
if extra_file_path not in path_list:
debug(u"Deleting {0}.".format(extra_file_path))
xbmcvfs.delete(extra_file_path)
elif get_setting(cleaning_type) == self.CLEANING_TYPE_MOVE:
new_extra_path = os.path.join(dest_folder, os.path.basename(extra_file))
if new_extra_path not in path_list:
debug(u"Moving {0} to {1}.".format(extra_file_path, new_extra_path))
xbmcvfs.rename(extra_file_path, new_extra_path)
debug(u"Finished searching for related files.")
else:
debug(u"Cleaning of related files is disabled.")
def move_file(self, source, dest_folder):
"""Move a file to a new destination. Will create destination if it does not exist.
Example:
result = move_file(a, b)
:type source: unicode
:param source: the source path (absolute)
:type dest_folder: unicode
:param dest_folder: the destination path (absolute)
:rtype: int
:return: 1 if (all stacked) files were moved, 0 if not, -1 if errors occurred
"""
paths = self.unstack(source)
files_moved_successfully = 0
dest_folder = unicode(xbmc.makeLegalFilename(dest_folder), encoding="utf-8")
for p in paths:
debug(u"Attempting to move {0} to {1}.".format(p, dest_folder))
if xbmcvfs.exists(p):
if not xbmcvfs.exists(dest_folder):
if xbmcvfs.mkdirs(dest_folder):
debug(u"Created destination {0}.".format(dest_folder))
else:
debug(u"Destination {0} could not be created.".format(dest_folder), xbmc.LOGERROR)
return -1
new_path = os.path.join(dest_folder, os.path.basename(p))
if xbmcvfs.exists(new_path):
debug(u"A file with the same name already exists in the holding folder. Checking file sizes.")
existing_file = xbmcvfs.File(new_path)
file_to_move = xbmcvfs.File(p)
if file_to_move.size() > existing_file.size():
debug(u"This file is larger than the existing file. Replacing it with this one.")
existing_file.close()
file_to_move.close()
if bool(xbmcvfs.delete(new_path) and bool(xbmcvfs.rename(p, new_path))):
files_moved_successfully += 1
else:
return -1
else:
debug(u"This file isn't larger than the existing file. Deleting it instead of moving.")
existing_file.close()
file_to_move.close()
if bool(xbmcvfs.delete(p)):
files_moved_successfully += 1
else:
return -1
else:
debug(u"Moving {0} to {1}.".format(p, new_path))
move_success = bool(xbmcvfs.rename(p, new_path))
copy_success, delete_success = False, False
if not move_success:
debug(u"Move failed, falling back to copy and delete.", xbmc.LOGWARNING)
copy_success = bool(xbmcvfs.copy(p, new_path))
if copy_success:
debug(u"Copied successfully, attempting delete of source file.")
delete_success = bool(xbmcvfs.delete(p))
if not delete_success:
debug(u"Could not remove source file. Please remove the file manually.", xbmc.LOGWARNING)
else:
debug(u"Copying failed, please make sure you have appropriate permissions.", xbmc.LOGFATAL)
return -1
if move_success or (copy_success and delete_success):
files_moved_successfully += 1
else:
debug(u"File {0} is no longer available.".format(p), xbmc.LOGWARNING)
return 1 if len(paths) == files_moved_successfully else -1
def has_no_hard_links(self, filename):
"""
Tests the provided filename for hard links and only returns True if the number of hard links is exactly 1.
:param filename: The filename to check for hard links
:type filename: str
:return: True if the number of hard links equals 1, False otherwise.
:rtype: bool
"""
if get_setting(keep_hard_linked):
debug(u"Making sure the number of hard links is exactly one.")
is_hard_linked = all(i == 1 for i in map(xbmcvfs.Stat.st_nlink, map(xbmcvfs.Stat, self.unstack(filename))))
debug(u"No hard links detected." if is_hard_linked else u"Hard links detected. Skipping.")
else:
debug(u"Not checking for hard links.")
return True
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == u"log":
win = LogViewerDialog("JanitorLogViewer.xml", ADDON.getAddonInfo(u"path"))
win.doModal()
del win
elif len(sys.argv) > 1 and sys.argv[1] == u"reset":
reset_exclusions()
else:
cleaner = Cleaner()
if get_setting(default_action) == cleaner.DEFAULT_ACTION_LOG:
xbmc.executebuiltin(u"RunScript({0}, log)".format(ADDON_ID))
else:
cleaner.show_progress()
results, return_status = cleaner.clean_all()
if results:
# Videos were cleaned. Ask the user to view the log file.
# TODO: Listen to OnCleanFinished notifications and wait before asking to view the log
if xbmcgui.Dialog().yesno(translate(32514), results, translate(32519)):
xbmc.executebuiltin(u"RunScript({0}, log)".format(ADDON_ID))
elif return_status == cleaner.STATUS_ABORTED:
# Do not show cleaning results in case user aborted, e.g. to set holding folder
pass
else:
xbmcgui.Dialog().ok(ADDON_NAME.decode("utf-8"), translate(32520))
|
Anthirian/script.filecleaner
|
default.py
|
Python
|
gpl-3.0
| 30,945 | 0.004007 |
# file openpyxl/writer/strings.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Write the shared string table."""
# Python stdlib imports
try:
# Python 2
from StringIO import StringIO
BytesIO = StringIO
except ImportError:
# Python 3
from io import BytesIO, StringIO
# package imports
from openpyxl.shared.xmltools import start_tag, end_tag, tag, XMLGenerator
def create_string_table(workbook):
"""Compile the string table for a workbook."""
strings = set()
for sheet in workbook.worksheets:
for cell in sheet.get_cell_collection():
if cell.data_type == cell.TYPE_STRING and cell._value is not None:
strings.add(cell.value)
return dict((key, i) for i, key in enumerate(strings))
def write_string_table(string_table):
"""Write the string table xml."""
temp_buffer = StringIO()
doc = XMLGenerator(out=temp_buffer, encoding='utf-8')
start_tag(doc, 'sst', {'xmlns':
'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'uniqueCount': '%d' % len(string_table)})
strings_to_write = sorted(string_table.items(),
key=lambda pair: pair[1])
for key in [pair[0] for pair in strings_to_write]:
start_tag(doc, 'si')
if key.strip() != key:
attr = {'xml:space': 'preserve'}
else:
attr = {}
tag(doc, 't', attr, key)
end_tag(doc, 'si')
end_tag(doc, 'sst')
string_table_xml = temp_buffer.getvalue()
temp_buffer.close()
return string_table_xml
class StringTableBuilder(object):
def __init__(self):
self.counter = 0
self.dct = {}
def add(self, key):
key = key.strip()
try:
return self.dct[key]
except KeyError:
res = self.dct[key] = self.counter
self.counter += 1
return res
def get_table(self):
return self.dct
|
sbhowmik7/PSSEcompare
|
ext_libs/openpyxl/writer/strings.py
|
Python
|
gpl-3.0
| 3,077 | 0.0013 |
import os
import peewee
from rivr_peewee import Database
DATABASE_URL = os.environ.get('DATABASE_URL')
if DATABASE_URL and DATABASE_URL.startswith('postgres://'):
DATABASE_URL = DATABASE_URL.replace('postgres://', 'postgres+pool://')
# disable auto connection
EXTRA_OPTIONS = 'autoconnect=false'
if '?' in DATABASE_URL:
DATABASE_URL += '&' + EXTRA_OPTIONS
else:
DATABASE_URL += '?' + EXTRA_OPTIONS
os.environ['DATABASE_URL'] = DATABASE_URL
database = Database()
class Device(database.Model):
apns_token = peewee.CharField(max_length=64, unique=True)
def __repr__(self) -> str:
return '<Device {}>'.format(self.apns_token)
class Token(database.Model):
PUSH_SCOPE = 'push'
ALL_SCOPE = 'all'
device = peewee.ForeignKeyField(Device)
token = peewee.CharField(max_length=64, unique=True, primary_key=True)
scope = peewee.CharField(max_length=10, choices=(PUSH_SCOPE, ALL_SCOPE))
def __repr__(self) -> str:
return '<Token {} ({})>'.format(self.token, self.scope)
@property
def token_last_eight(self) -> str:
return self.token[-8:]
|
cocodelabs/api.palaverapp.com
|
palaverapi/models.py
|
Python
|
bsd-3-clause
| 1,143 | 0 |
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import mimetypes
import base64
import textwrap
import datetime
import io
from docutils.parsers.rst.directives.images import Image, Figure
from docutils.parsers.rst import directives
from docutils.core import publish_file
from snakemake.utils import format
from snakemake.logging import logger
class EmbeddedMixin(object):
"""
Replaces the URI of a directive with a base64-encoded version.
Useful for embedding images/figures in reports.
"""
def run(self):
"""
Image.run() handles most of the
"""
result = Image.run(self)
reference = directives.uri(self.arguments[0])
self.options['uri'] = data_uri(reference)
return result
# Create (and register) new image:: and figure:: directives that use a base64
# data URI instead of pointing to a filename.
class EmbeddedImage(Image, EmbeddedMixin):
pass
directives.register_directive('embeddedimage', EmbeddedImage)
class EmbeddedFigure(Figure, EmbeddedMixin):
pass
directives.register_directive('embeddedfigure', EmbeddedFigure)
def data_uri(file, defaultenc="utf8"):
"""Craft a base64 data URI from file with proper encoding and mimetype."""
mime, encoding = mimetypes.guess_type(file)
if mime is None:
mime = "text/plain"
logger.info("Could not detect mimetype for {}, assuming "
"text/plain.".format(file))
if encoding is None:
encoding = defaultenc
with open(file, "rb") as f:
data = base64.b64encode(f.read())
uri = ("data:{mime};charset={charset};filename={filename};base64,{data}"
"".format(filename=os.path.basename(file),
mime=mime,
charset=encoding,
data=data.decode()))
return uri
def report(text, path,
stylesheet=os.path.join(os.path.dirname(__file__), "report.css"),
defaultenc="utf8",
template=None,
metadata=None, **files):
outmime, _ = mimetypes.guess_type(path)
if outmime != "text/html":
raise ValueError("Path to report output has to be an HTML file.")
definitions = textwrap.dedent("""
.. role:: raw-html(raw)
:format: html
""")
metadata = textwrap.dedent("""
.. container::
:name: metadata
{metadata}{date}
""").format(metadata=metadata + " | " if metadata else "",
date=datetime.date.today().isoformat())
text = format(textwrap.dedent(text), stepout=3)
attachments = [textwrap.dedent("""
.. container::
:name: attachments
""")]
for name, _files in sorted(files.items()):
if not isinstance(_files, list):
_files = [_files]
links = []
for file in _files:
data = data_uri(file)
links.append(':raw-html:`<a href="{data}" download="{filename}" draggable="true">{filename}</a>`'.format(
data=data, filename=os.path.basename(file)))
links = "\n\n ".join(links)
attachments.append('''
.. container::
:name: {name}
{name}:
{links}
'''.format(name=name,
links=links))
text = definitions + text + "\n\n" + "\n\n".join(attachments) + metadata
overrides = dict()
if template is not None:
overrides["template"] = template
if stylesheet is not None:
overrides["stylesheet_path"] = stylesheet
html = open(path, "w")
publish_file(source=io.StringIO(text),
destination=html,
writer_name="html",
settings_overrides=overrides)
|
vangalamaheshh/snakemake
|
snakemake/report.py
|
Python
|
mit
| 3,806 | 0.000263 |
# Copyright (c) 2015 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
import six
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.extensions import l3
DNS_LABEL_MAX_LEN = 63
DNS_LABEL_REGEX = "[a-z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN
FQDN_MAX_LEN = 255
DNS_DOMAIN_DEFAULT = 'openstacklocal.'
class DNSDomainNotFound(n_exc.NotFound):
message = _("Domain %(dns_domain)s not found in the external DNS service")
class DuplicateRecordSet(n_exc.Conflict):
message = _("Name %(dns_name)s is duplicated in the external DNS service")
class ExternalDNSDriverNotFound(n_exc.NotFound):
message = _("External DNS driver %(driver)s could not be found.")
class InvalidPTRZoneConfiguration(n_exc.Conflict):
message = _("Value of %(parameter)s has to be multiple of %(number)s, "
"with maximum value of %(maximum)s and minimum value of "
"%(minimum)s")
def _validate_dns_name(data, max_len=FQDN_MAX_LEN):
msg = _validate_dns_format(data, max_len)
if msg:
return msg
request_dns_name = _get_request_dns_name(data)
if request_dns_name:
msg = _validate_dns_name_with_dns_domain(request_dns_name)
if msg:
return msg
def _validate_fip_dns_name(data, max_len=FQDN_MAX_LEN):
msg = validators.validate_string(data)
if msg:
return msg
if not data:
return
if data.endswith('.'):
msg = _("'%s' is a FQDN. It should be a relative domain name") % data
return msg
msg = _validate_dns_format(data, max_len)
if msg:
return msg
length = len(data)
if length > max_len - 3:
msg = _("'%(data)s' contains '%(length)s' characters. Adding a "
"domain name will cause it to exceed the maximum length "
"of a FQDN of '%(max_len)s'") % {"data": data,
"length": length,
"max_len": max_len}
return msg
def _validate_dns_domain(data, max_len=FQDN_MAX_LEN):
msg = validators.validate_string(data)
if msg:
return msg
if not data:
return
if not data.endswith('.'):
msg = _("'%s' is not a FQDN") % data
return msg
msg = _validate_dns_format(data, max_len)
if msg:
return msg
length = len(data)
if length > max_len - 2:
msg = _("'%(data)s' contains '%(length)s' characters. Adding a "
"sub-domain will cause it to exceed the maximum length of a "
"FQDN of '%(max_len)s'") % {"data": data,
"length": length,
"max_len": max_len}
return msg
def _validate_dns_format(data, max_len=FQDN_MAX_LEN):
# NOTE: An individual name regex instead of an entire FQDN was used
# because its easier to make correct. The logic should validate that the
# dns_name matches RFC 1123 (section 2.1) and RFC 952.
if not data:
return
try:
# Trailing periods are allowed to indicate that a name is fully
# qualified per RFC 1034 (page 7).
trimmed = data if not data.endswith('.') else data[:-1]
if len(trimmed) > 255:
raise TypeError(
_("'%s' exceeds the 255 character FQDN limit") % trimmed)
names = trimmed.split('.')
for name in names:
if not name:
raise TypeError(_("Encountered an empty component."))
if name.endswith('-') or name[0] == '-':
raise TypeError(
_("Name '%s' must not start or end with a hyphen.") % name)
if not re.match(DNS_LABEL_REGEX, name):
raise TypeError(
_("Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen.") % name)
# RFC 1123 hints that a TLD can't be all numeric. last is a TLD if
# it's an FQDN.
if len(names) > 1 and re.match("^[0-9]+$", names[-1]):
raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
except TypeError as e:
msg = _("'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s") % {
'data': data, 'reason': str(e)}
return msg
def _validate_dns_name_with_dns_domain(request_dns_name):
# If a PQDN was passed, make sure the FQDN that will be generated is of
# legal size
dns_domain = _get_dns_domain()
higher_labels = dns_domain
if dns_domain:
higher_labels = '.%s' % dns_domain
higher_labels_len = len(higher_labels)
dns_name_len = len(request_dns_name)
if not request_dns_name.endswith('.'):
if dns_name_len + higher_labels_len > FQDN_MAX_LEN:
msg = _("The dns_name passed is a PQDN and its size is "
"'%(dns_name_len)s'. The dns_domain option in "
"neutron.conf is set to %(dns_domain)s, with a "
"length of '%(higher_labels_len)s'. When the two are "
"concatenated to form a FQDN (with a '.' at the end), "
"the resulting length exceeds the maximum size "
"of '%(fqdn_max_len)s'"
) % {'dns_name_len': dns_name_len,
'dns_domain': cfg.CONF.dns_domain,
'higher_labels_len': higher_labels_len,
'fqdn_max_len': FQDN_MAX_LEN}
return msg
return
# A FQDN was passed
if (dns_name_len <= higher_labels_len or not
request_dns_name.endswith(higher_labels)):
msg = _("The dns_name passed is a FQDN. Its higher level labels "
"must be equal to the dns_domain option in neutron.conf, "
"that has been set to '%(dns_domain)s'. It must also "
"include one or more valid DNS labels to the left "
"of '%(dns_domain)s'") % {'dns_domain':
cfg.CONF.dns_domain}
return msg
def _get_dns_domain():
if not cfg.CONF.dns_domain:
return ''
if cfg.CONF.dns_domain.endswith('.'):
return cfg.CONF.dns_domain
return '%s.' % cfg.CONF.dns_domain
def _get_request_dns_name(data):
dns_domain = _get_dns_domain()
if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)):
return data
return ''
def convert_to_lowercase(data):
if isinstance(data, six.string_types):
return data.lower()
msg = _("'%s' cannot be converted to lowercase string") % data
raise n_exc.InvalidInput(error_message=msg)
validators.add_validator('dns_name', _validate_dns_name)
validators.add_validator('fip_dns_name', _validate_fip_dns_name)
validators.add_validator('dns_domain', _validate_dns_domain)
DNSNAME = 'dns_name'
DNSDOMAIN = 'dns_domain'
DNSASSIGNMENT = 'dns_assignment'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
DNSNAME: {'allow_post': True, 'allow_put': True,
'default': '',
'convert_to': convert_to_lowercase,
'validate': {'type:dns_name': FQDN_MAX_LEN},
'is_visible': True},
DNSASSIGNMENT: {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
l3.FLOATINGIPS: {
DNSNAME: {'allow_post': True, 'allow_put': False,
'default': '',
'convert_to': convert_to_lowercase,
'validate': {'type:fip_dns_name': FQDN_MAX_LEN},
'is_visible': True},
DNSDOMAIN: {'allow_post': True, 'allow_put': False,
'default': '',
'convert_to': convert_to_lowercase,
'validate': {'type:dns_domain': FQDN_MAX_LEN},
'is_visible': True},
},
attr.NETWORKS: {
DNSDOMAIN: {'allow_post': True, 'allow_put': True,
'default': '',
'convert_to': convert_to_lowercase,
'validate': {'type:dns_domain': FQDN_MAX_LEN},
'is_visible': True},
},
}
class Dns(extensions.ExtensionDescriptor):
"""Extension class supporting DNS Integration."""
@classmethod
def get_name(cls):
return "DNS Integration"
@classmethod
def get_alias(cls):
return "dns-integration"
@classmethod
def get_description(cls):
return "Provides integration with DNS."
@classmethod
def get_updated(cls):
return "2015-08-15T18:00:00-00:00"
def get_required_extensions(self):
return ["router"]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
cloudbase/neutron
|
neutron/extensions/dns.py
|
Python
|
apache-2.0
| 9,551 | 0.000419 |
#!/usr/bin/env python
from distutils.core import setup
setup(name = "quasi",
version = "0.87",
description = "A multiple-context Python shell",
author = "Ben Last",
author_email = "ben@benlast.com",
url = "http://quasi-shell.sourceforge.net/",
license = "BSD",
scripts = ["quasi.py"],
data_files = [("share/licenses/quasi", ["LICENSE"])],
extra_path = "quasi",
packages = ["."]
)
|
aur-archive/quasi
|
setup.py
|
Python
|
bsd-3-clause
| 448 | 0.049107 |
from serial import Serial
import time
import platform
import socket
serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600)
time.sleep(2)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('', 2222))
server.listen(1)
while True:
(client, address) = server.accept()
print('Connected')
while True:
data = client.recv(6)#.decode()
if 'CLOSE' in data: break
#print(data)
serialPort.write(data)
|
Mnenmenth/RobotCode
|
JoystickInput/JoystickServer.py
|
Python
|
apache-2.0
| 458 | 0.026201 |
#!/usr/bin/env python3
# Copyright 2015, 2016 Endless Mobile, Inc.
# This file is part of eos-event-recorder-daemon.
#
# eos-event-recorder-daemon is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at your
# option) any later version.
#
# eos-event-recorder-daemon is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with eos-event-recorder-daemon. If not, see
# <http://www.gnu.org/licenses/>.
import gzip
import http.server
import sys
class PrintingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_PUT(self):
print(self.path, flush=True)
content_encoding = self.headers['X-Endless-Content-Encoding']
print(content_encoding, flush=True)
content_length = int(self.headers['Content-Length'])
compressed_request_body = self.rfile.read(content_length)
decompressed_request_body = gzip.decompress(compressed_request_body)
print(len(decompressed_request_body), flush=True)
sys.stdout.buffer.write(decompressed_request_body)
sys.stdout.buffer.flush()
status_code_str = sys.stdin.readline()
status_code = int(status_code_str)
self.send_response(status_code)
self.end_headers()
# A metrics server that simply prints the requests it receives to stdout
class MockServer(http.server.HTTPServer):
def __init__(self):
SERVER_ADDRESS = ('localhost', 0)
super().__init__(SERVER_ADDRESS, PrintingHTTPRequestHandler)
if __name__ == '__main__':
mock_server = MockServer()
print(mock_server.server_port, flush=True)
mock_server.serve_forever()
|
endlessm/eos-event-recorder-daemon
|
tests/daemon/mock-server.py
|
Python
|
gpl-2.0
| 1,987 | 0.00151 |
#! /usr/bin/env python
#script that takes an optional argument for the date and target collection and calculates angular separation and elevation of each target from the moon.
import ephem, subprocess, operator, argparse
#host & port info
hostName="veritase.sao.arizona.edu"
portNum=""
#hostName="lucifer1.spa.umn.edu"
#portNum=33060
#dict for sorting/writing info
moonlightsources = {}
#setting up ephem observer object for veritas
veritas = ephem.Observer()
veritas.lat = '31:40.51'
veritas.lon = '-110:57.132'
veritas.elevation = 1268
#argument parser
parser = argparse.ArgumentParser(description='Takes optional arguments to specify date and target collection. If no arguments are specified, will calculate angular distances from the Moon at the current time for all moonlight targets')
parser.add_argument('--date',default=veritas.date, help='specify DATE (in UT) in the format "YYYY/MM/DD HH:MM" don\'t forget the quotation marks')
parser.add_argument('--targets',default='moonlight_targets',help='Specifies collection of targets. Multiple Useful values for TARGETS: moonlight_targets,reduced_HV_targets,moonlight_bright,primary_targets,secondary_targets,blazar_filler_targets')
parser.add_argument('--nocuts',help = 'displays results for all targets in the list, even if they fail the moon distance and elevation cuts', action = "store_true")
args = parser.parse_args()
#setting date/time to user-spefied value (or default to current date/time)
veritas.date = args.date
#letting user know the date and target collection used.
print
print "Date and time used (in UT): %s" %veritas.date
print
print "Calculating angular distances from the Moon for targets in %s collection..." %args.targets
#MySQL command, runs on command line through subprocess
targetList = args.targets.split(",")
#for collection in args.targets.split(","):
for n in range(0, len(targetList) ):
if n == 0:
execCMD = "SELECT tblObserving_Collection.source_id,ra,decl,epoch FROM tblObserving_Sources JOIN tblObserving_Collection ON tblObserving_Sources.source_id = tblObserving_Collection.source_id WHERE tblObserving_Collection.collection_id='%s'" %targetList[n]
else:
execCMD = execCMD + " OR tblObserving_Collection.collection_id='%s'" %targetList[n]
sqlOut = subprocess.Popen(["mysql","-h","%s" %(hostName),"-P","%s" %(portNum),"-u", "readonly", "-D","VERITAS", "--execute=%s" %(execCMD)], stdout=subprocess.PIPE)
#stores query results
QUERY, err = sqlOut.communicate()
if QUERY == "":
print
print "Query result is empty. Make sure date and target collection provided are valid. Going to crash now :("
#loop through all objects in the bright moonlight list
#calculating and printing out angular separation from moon
for count,source in enumerate(QUERY.rstrip().split("\n")):
#skip header in query results
if count == 0:
continue
#parsing through query results
sourceName=source.split("\t")[0]
sourceRA=source.split("\t")[1]
sourceDEC=source.split("\t")[2]
sourceEpoch=source.split("\t")[3]
#makes sure same epoch is used
veritas.epoch = float(sourceEpoch)
#Define ephem moon object and calculate position (ra, dec) and phase
TheMoon = ephem.Moon(veritas)
TheMoon.compute(veritas)
illum = TheMoon.moon_phase*100.
#Get angular separation of moon and target
degFromMoon = 180./ephem.pi * ephem.separation((TheMoon.ra,TheMoon.dec),(float(sourceRA),float(sourceDEC)))
#Define ehpem object for source, to get elevation
sourceobj = ephem.FixedBody()
sourceobj._ra = float(sourceRA)
sourceobj._dec = float(sourceDEC)
sourceobj.compute(veritas)
sourceALT = sourceobj.alt*180./ephem.pi
moonlightsources[sourceName]=[(degFromMoon,sourceALT)]
#end of for loop
sorted_sources = sorted(moonlightsources.iteritems(), key=operator.itemgetter(1), reverse=True)
#print sorted_sources
if not args.nocuts: #printing only targets that pass the cuts
print "Only showing targets with elevation > 20 degrees and moon distance > 10 degrees"
print
print "Source\t\t\tDegrees from Moon\tElevation"
print "--------------------------------------------------------------"
for s in sorted_sources:
if s[1][0][1] > 20 and s[1][0][0] > 10:
if len(s[0]) <=7:
print "%s\t\t\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
elif len(s[0]) <=15:
print "%s\t\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
else:
print "%s\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
else:#printing all targets, when cuts are disabled
print
print "Source\t\t\tDegrees from Moon\tElevation"
print "--------------------------------------------------------------"
for s in sorted_sources:
if len(s[0]) <=7:
print "%s\t\t\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
elif len(s[0]) <=15:
print "%s\t\t %0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
else:
print "%s\t %0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
print "--------------------------------------------------------------"
print "The Moon is %0.2f%% illuminated" % illum
print
|
mbuchove/analysis-tools-m
|
python/moonDist.py
|
Python
|
mit
| 5,073 | 0.027991 |
'''
This script will remove the directories if that contains only xml files.
'''
import os
srcpath = raw_input("Enter the source path : ")
for root, sub, files in os.walk(os.path.abspath(srcpath)):
if files:
files = [f for f in files if not f.endswith('.xml')]
if not files:
fpath = os.path.join(root)
os.system('rm -rf %s' % fpath)
print "removed", fpath
|
arulalant/CMIPs-Handler
|
scripts/mv/rm_dirs_contains_only_if_xml.py
|
Python
|
gpl-3.0
| 427 | 0.004684 |
# -*- coding: utf-8 -*-
import argparse
import time
import socket
import sys
import json
__prog__ = 'xbmc-command'
PROG = __prog__
__version__ = '0.4.1'
VERSION = __version__
class XBMC(object):
def __init__(self, host, port):
self.address = (host, port)
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__buffer = ""
self.__decode = json.JSONDecoder().raw_decode
self.settimeout(0)
def settimeout(self, timeout):
self.__timeout = timeout
self.__socket.settimeout(timeout if timeout > 0 else None)
def connect(self):
self.__socket.connect(self.address)
def close(self):
if self.__socket:
self.__socket.close()
def shutdown(self):
self.__socket.shutdown(socket.SHUT_RDWR)
def __getattr__(self, key):
return Rpc(self, key)
def send(self, req):
self.__socket.send(bytearray(req, 'utf-8'))
def recv(self, json_rpc_id):
start = time.time()
while True:
if self.__timeout > 0 and time.time() - start > self.__timeout:
raise CommandException("read timeout")
try:
data = self.__socket.recv(1024)
except socket.timeout:
raise CommandException("read timeout")
if not data:
return None
self.__buffer += data.decode('utf-8')
while True:
json_result = None
try:
json_result, index = self.__decode(self.__buffer)
self.__buffer = self.__buffer[index:]
except ValueError:
break
if json_result and 'id' in json_result and \
json_result['id'] == json_rpc_id:
return json_result
return None
class Rpc(object):
__REQ__ = '{"jsonrpc":"2.0", "method":"%s", "params":%s, "id":"%s"}'
def __init__(self, xbmc, method):
self.__xbmc = xbmc
self.__method = method
def __getattr__(self, key):
return Rpc(self.__xbmc, "%s.%s" % (self.__method, key))
def __call__(self, *args, **kwargs):
params = '{}'
ident = str(kwargs['id']) if 'id' in kwargs else self.__method
if args:
params = json.dumps(args[0])
elif 'params' in kwargs:
params = json.dumps(kwargs['params'])
self.__xbmc.send(Rpc.__REQ__ % (self.__method, params, ident))
class CommandException(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
class Command(object):
def __init__(self):
self.xbmc = None
def call(self, args):
raise NotImplementedError("Please Implement this method")
def run_command(self, args):
try:
self.xbmc.connect()
except socket.timeout:
raise CommandException("Unable to connect to host %s:%s" % \
(self.xbmc.address[0], self.xbmc.address[1]))
except socket.error as err:
self.xbmc.close()
raise CommandException("Could not open socket: " + err)
self.call(args)
def get_active_player_id(self):
self.xbmc.Player.GetActivePlayers()
result = self.xbmc.recv('Player.GetActivePlayers')
if not result:
raise CommandException('unable to receive active players')
if len(result['result']) <= 0:
return -1
for player in result['result']:
if player['type'] in ('audio', 'video'):
return player['playerid']
return result['result'][0]['playerid']
@property
def parser(self):
parser = argparse.ArgumentParser(add_help=False)
self.create_parser(parser)
parser.add_argument('--help', action='help',
help='show this help message and exit')
return parser
def create_parser(self, parser):
return parser
def parse_args(self, args):
return self.parser.parse_args(args)
@property
def short_description(self):
return ''
# vim: ft=python ts=8 sts=4 sw=4 et:
|
mychris/xbmc-command
|
xbmc_command/core.py
|
Python
|
gpl-3.0
| 4,244 | 0.001649 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLimma(RPackage):
"""Data analysis, linear models and differential expression
for microarray data."""
homepage = "https://www.bioconductor.org/packages/limma/"
url = "https://git.bioconductor.org/packages/limma"
list_url = homepage
version('3.32.10', git='https://git.bioconductor.org/packages/limma', commit='593edf28e21fe054d64137ae271b8a52ab05bc60')
version('3.32.6', 'df5dc2b85189a24e939efa3a8e6abc41')
depends_on('r@3.4.0:3.4.9', when='@3.32.10')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-limma/package.py
|
Python
|
lgpl-2.1
| 1,754 | 0.00114 |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath
cwd = dirname(realpath(__file__))
log_level = int(os.environ.get('PythonLogLevel', 10))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "Validation"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError, ValidationError
from auto_rest_validation_test import AutoRestValidationTest
from auto_rest_validation_test.models import (
Product,
ConstantProduct,
ChildProduct)
class ValidationTests(unittest.TestCase):
def test_constant_values(self):
client = AutoRestValidationTest(
"abc123",
base_url="http://localhost:3000")
client.api_version = "12-34-5678"
client.get_with_constant_in_path()
body = Product(child=ChildProduct())
product = client.post_with_constant_in_body(body=body)
self.assertIsNotNone(product)
def test_validation(self):
client = AutoRestValidationTest(
"abc123",
base_url="http://localhost:3000")
client.api_version = "12-34-5678"
try:
client.validation_of_method_parameters("1", 100)
except ValidationError as err:
self.assertEqual(err.rule, "min_length")
self.assertEqual(err.target, "resource_group_name")
try:
client.validation_of_method_parameters("1234567890A", 100)
except ValidationError as err:
self.assertEqual(err.rule, "max_length")
self.assertEqual(err.target, "resource_group_name")
try:
client.validation_of_method_parameters("!@#$", 100)
except ValidationError as err:
self.assertEqual(err.rule, "pattern")
self.assertEqual(err.target, "resource_group_name")
try:
client.validation_of_method_parameters("123", 105)
except ValidationError as err:
self.assertEqual(err.rule, "multiple")
self.assertEqual(err.target, "id")
try:
client.validation_of_method_parameters("123", 0)
except ValidationError as err:
self.assertEqual(err.rule, "minimum")
self.assertEqual(err.target, "id")
try:
client.validation_of_method_parameters("123", 2000)
except ValidationError as err:
self.assertEqual(err.rule, "maximum")
self.assertEqual(err.target, "id")
try:
tempproduct=Product(child=ChildProduct(), capacity=0)
client.validation_of_body("123", 150, tempproduct)
except ValidationError as err:
self.assertEqual(err.rule, "minimum_ex")
self.assertIn("capacity", err.target)
try:
tempproduct=Product(child=ChildProduct(), capacity=100)
client.validation_of_body("123", 150, tempproduct)
except ValidationError as err:
self.assertEqual(err.rule, "maximum_ex")
self.assertIn("capacity", err.target)
try:
tempproduct=Product(child=ChildProduct(),
display_names=["item1","item2","item3","item4","item5","item6","item7"])
client.validation_of_body("123", 150, tempproduct)
except ValidationError as err:
self.assertEqual(err.rule, "max_items")
self.assertIn("display_names", err.target)
client2 = AutoRestValidationTest(
"abc123",
base_url="http://localhost:3000")
client2.api_version = "abc"
try:
client2.validation_of_method_parameters("123", 150)
except ValidationError as err:
self.assertEqual(err.rule, "pattern")
self.assertEqual(err.target, "self.api_version")
if __name__ == '__main__':
unittest.main()
|
lmazuel/autorest
|
src/generator/AutoRest.Python.Tests/AcceptanceTests/validation_tests.py
|
Python
|
mit
| 5,290 | 0.002837 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from solidspy.solids_GUI import solids_GUI
solids_GUI()
|
jgomezc1/FEM_PYTHON
|
solidspy/__main__.py
|
Python
|
mit
| 119 | 0.008403 |
class Reverb:
pass
class ScatterLocation:
pass
class Chorus:
pass
class Vocoder:
pass
class ShuffleSound:
pass
|
ttm/musicLegacy
|
musicLegacy/effects.py
|
Python
|
mit
| 131 | 0.030534 |
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import collections
import itertools
import logging
from shot_detector.utils.dsl import DslOperatorMixin
from shot_detector.utils.dsl.dsl_kwargs import dsl_kwargs_decorator
class DslFilterMixin(DslOperatorMixin):
"""
Basic filter mixin to build Filter-DSL
"""
__logger = logging.getLogger(__name__)
@staticmethod
def dsl_kwargs_decorator(*dsl_rules):
"""
:param dsl_rules:
:return:
"""
return dsl_kwargs_decorator(*dsl_rules)
def __or__(self, other):
"""
:param Filter other:
:return:
"""
return self.apply_sequence([other])
def __ror__(self, other):
"""
:param Filter other:
:return:
"""
return self.apply_sequence([other])
def apply_sequence(self, others):
"""
:param others:
:return:
"""
filters = self.cast_to_apply_sequence(others)
filter_sequence = self.apply_filter_sequence(filters)
return filter_sequence
def apply_filter_sequence(self, filters):
"""
Extends current own `sequential_filters` with `filters`
or creates a new `FilterSequence`.
:param filters:
:return:
"""
from .filter_sequence import FilterSequence
if isinstance(self, FilterSequence):
self_filters = self.sequential_filters
joined_filters = itertools.chain(self_filters, filters)
filter_sequence = self
else:
joined_filters = itertools.chain([self], filters)
filter_sequence = FilterSequence
joined_filter_list = list(joined_filters)
filter_sequence = filter_sequence(
sequential_filters=joined_filter_list
)
return filter_sequence
# @staticmethod
def cast_to_apply_sequence(self, others):
"""
:param others:
:return:
"""
from .filter_cast_features import FilterCastFeatures
for other in others:
if isinstance(other, tuple):
other = DslFilterMixin.to_tuple(*other)
if not isinstance(other, DslFilterMixin):
other = FilterCastFeatures(
op_func=other,
parallel_filters=[self]
)
yield other
def apply_operator(self,
op_func=None,
others=None,
op_mode=None,
**kwargs):
"""
:param op_func:
:param others:
:param op_mode:
:param kwargs:
:return:
"""
filters = self.cast_to_apply_operator(others)
filter_operator = self.apply_filter_operator(
op_func=op_func,
filters=filters,
op_mode=op_mode,
)
return filter_operator
def apply_filter_operator(self,
op_func=None,
filters=None,
op_mode=None,
**kwargs):
"""
:param op_func:
:param filters:
:param op_mode:
:param kwargs:
:return:
"""
from .filter_operator import FilterOperator, FilterOperatorMode
fo_op_mode = FilterOperatorMode.LEFT
if op_mode is self.Operator.RIGHT:
fo_op_mode = FilterOperatorMode.RIGHT
# joined_filters = itertools.chain([self], filters)
filter_operator = FilterOperator(
op_func=op_func,
op_mode=fo_op_mode,
# parallel_filters=list(joined_filters),
**kwargs
)
if isinstance(self, FilterOperator) and filter_operator == self:
self_filters = self.parallel_filters
joined_filters = itertools.chain(self_filters, filters)
filter_operator = self
else:
joined_filters = itertools.chain([self], filters)
joined_filter_list = list(joined_filters)
filter_operator = filter_operator(
parallel_filters=joined_filter_list
)
return filter_operator
@classmethod
def to_tuple(cls, *args):
"""
:param args:
:return:
"""
from .filter_tuple import FilterTuple
filter_tuple = FilterTuple(
parallel_filters=list(args)
)
return filter_tuple
def cast_to_apply_operator(self, others):
"""
:param others:
:return:
"""
for other in others:
if not isinstance(other, DslFilterMixin):
other = self.scalar_to_filter(
value=other,
)
yield other
def to_filter(self, value):
"""
:param value:
:return:
"""
if isinstance(value, collections.Iterable):
return self.seq_to_filter(value)
return self.scalar_to_filter(value)
@staticmethod
def seq_to_filter(value):
"""
:param value:
:return:
"""
from .filter_cast_seq_value import FilterCastSeqValue
return FilterCastSeqValue(seq=value)
@staticmethod
def scalar_to_filter(value):
"""
:param value:
:return:
"""
from .filter_cast_scalar_value import FilterCastScalarValue
return FilterCastScalarValue(value=value)
def __contains__(self, item):
"""
:param Filter item:
:return:
"""
return self.intersect(item)
def i(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
return self.intersect(*args, **kwargs)
def intersect(self, other, threshold=0):
"""
:param other:
:param threshold:
:return:
"""
from .filter_intersection import FilterIntersection
return FilterIntersection(
parallel_filters=[self, other],
threshold=threshold
)
|
w495/python-video-shot-detector
|
shot_detector/filters/dsl/dsl_filter_mixin.py
|
Python
|
bsd-3-clause
| 6,383 | 0.00564 |
# -*- coding: utf-8 -*-
"""
FlowrouteNumbersLib.APIHelper
Copyright Flowroute, Inc. 2016
"""
import jsonpickle
import re
class APIHelper:
"""A Helper Class for various functions associated with API Calls.
This class contains static methods for operations that need to be
performed during API requests. All of the methods inside this class are
static methods, there is no need to ever initialise an instance of this
class.
"""
@staticmethod
def json_serialize(obj):
"""JSON Serialization of a given object.
Args:
obj (object): The object to serialise.
Returns:
str: The JSON serialized string of the object.
"""
if obj is None:
return None
# Resolve any Names if it's one of our objects that needs to have this called on
if isinstance(obj, list):
value = list()
for item in obj:
try:
value.append(item.resolve_names())
except (AttributeError, TypeError):
value.append(item)
obj = value
else:
try:
obj = obj.resolve_names()
except (AttributeError, TypeError):
obj = obj
return jsonpickle.encode(obj, False)
@staticmethod
def json_deserialize(json):
"""JSON Deerialization of a given string.
Args:
json (str): The JSON serialized string to deserialize.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if json is None:
return None
return jsonpickle.decode(json)
@staticmethod
def append_url_with_template_parameters(url,
parameters):
"""Replaces template parameters in the given url.
Args:
url (str): The query url string to replace the template parameters.
parameters (dict): The parameters to replace in the url.
Returns:
str: Url with replaced parameters.
"""
# Parameter validation
if url is None:
raise ValueError("url is null")
if parameters is None:
return url
# Iterate and replace parameters
for key in parameters:
element = parameters[key]
replace_value = ""
# Load parameter value
if element is None:
replace_value = ""
elif isinstance(element, list):
replace_value = "/".join(element)
else:
replace_value = str(element)
url = url.replace('{{{0}}}'.format(key),str(replace_value))
return url
@staticmethod
def append_url_with_query_parameters(url,
parameters):
"""Appends the given set of parameters to the given query string.
Args:
url (str): The query url string to append the parameters.
parameters (dict): The parameters to append.
Returns:
str: Url with appended query parameters.
"""
# Perform parameter validation
if url is None:
raise ValueError("url is null")
if parameters is None:
return url
# Does the query string already have parameters?
has_params = '?' in url
# Iterate and replace parameters
for key in parameters:
element = parameters[key]
# Ignore null values
if element is None:
continue
# If already has parameters, use the & to append new parameters
separator = '&' if has_params else '?'
if isinstance(element, list):
url = url + '{0}{1}[]={2}'.format(separator, key, '&{0}[]='.format(key).join(element))
else:
url = url + '{0}{1}={2}'.format(separator, key, str(parameters[key]))
# Indicate the url has params
has_params = True
return url
@staticmethod
def clean_url(url):
"""Validates and processes the given query Url to clean empty slashes.
Args:
url (str): The given query Url to process.
Returns:
str: Clean Url as string.
"""
# Ensure that the urls are absolute
regex = "^https?://[^/]+"
match = re.match(regex, url)
if match is None:
raise ValueError('Invalid Url format.')
# Remove redundant forward slashes
protocol = match.group(0)
query_url = url[len(protocol):]
query_url = re.sub("//+", "/", query_url);
return protocol + query_url
return query_url
@staticmethod
def form_encode(obj,
instanceName):
"""Encodes a model in a form-encoded manner such as person[Name]
Args:
obj (object): The given Object to form encode.
instanceName (string): The base name to appear before each entry
for this object.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
# Resolve the names first
value = APIHelper.resolve_name(obj)
retval = dict()
if value is None:
return None
# Loop through every item we need to send
for item in value:
if isinstance(value[item], list):
# Loop through each item in the list and add it by number
i = 0
for entry in value[item]:
retval.update(APIHelper.form_encode(entry, instanceName + "[" + item + "][" + str(i) + "]"))
i += 1
elif isinstance(value[item], dict):
# Loop through each item in the dictionary and add it
retval.update(APIHelper.form_encode(value[item], instanceName + "[" + item + "]"))
else:
# Add the current item
retval[instanceName + "[" + item + "]"] = value[item]
return retval
@staticmethod
def resolve_names(obj,
names,
retval):
"""Resolves parameters from their Model names to their API names.
Args:
obj (object): The given Object to resolve names for.
names (dict): A dictionary containing a mapping from model name
to API name.
retval (dict): The dictionary to return which may or may not be
empty (but must not be None).
Returns:
dict: A dictionary form of the model with properties in their API
formats.
"""
# Loop through all properties in this model
for name in names:
value = getattr(obj, name)
if isinstance(value, list):
# Loop through each item
retval[names[name]] = list()
for item in value:
retval[names[name]].append(APIHelper.resolve_name(item))
elif isinstance(value, dict):
# Loop through each item
retval[names[name]] = dict()
for key in value:
retval[names[name]][key] = APIHelper.resolve_name(value[key])
else:
retval[names[name]] = APIHelper.resolve_name(value)
# Return the result
return retval
@staticmethod
def resolve_name(value):
"""Resolves name for a given object
If the object needs to be recursively resolved, this method will
perform that recursive call.
Args:
value (object): A parameter to check if it needs to be recursively
resolved.
Returns:
object: A resolved parameter which may either be a dict or a
primative object.
"""
# Check if the item also has to be resolved
if value is not None and hasattr(value, "resolve_names") and callable(getattr(value, "resolve_names")):
return value.resolve_names()
else:
# Not an object that needs resolving
return value
|
flowroute/flowroute-numbers-python
|
FlowrouteNumbersLib/APIHelper.py
|
Python
|
mit
| 8,902 | 0.00483 |
from setuptools import setup, find_packages
import sys, os
version = '1.3'
long_description = """The raisin.restyler package is a part of Raisin, the web application
used for publishing the summary statistics of Grape, a pipeline used for processing and
analyzing RNA-Seq data."""
setup(name='raisin.restyler',
version=version,
description="A package used in the Raisin web application",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: POSIX :: Linux'],
keywords='RNA-Seq pipeline ngs transcriptome bioinformatics ETL',
author='Maik Roder',
author_email='maikroeder@gmail.com',
url='http://big.crg.cat/services/grape',
license='GPL',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages = ['raisin'],
package_data = {'raisin.restyler':['templates/*.pt']},
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'configobj',
'zope.pagetemplate'
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
rna-seq/raisin.restyler
|
setup.py
|
Python
|
gpl-3.0
| 1,531 | 0.005225 |
sum = 1
curr = 3
for width in xrange(3,1002,2):
inc = width - 1
sum = sum + curr #bottom right
curr = curr + inc
sum = sum + curr #bottom left
curr = curr + inc
sum = sum + curr #top left
curr = curr + inc
sum = sum + curr #top right
curr = curr + inc + 2
print sum
|
kbrose/project_euler
|
p20-29/p28.py
|
Python
|
unlicense
| 304 | 0.032895 |
"""
Defines a class that represents a regularly sampled lightcurve
"""
import sys
import numpy as np
import splreg
import pycs.gen.spl
import copy as pythoncopy
import scipy.optimize as spopt
class rslc():
"""
A regularly sampled lightcurve, typically obtained by regression.
To make such a rslc from a usual lightcurve object, look at the factory function below.
One idea is that we want to be able to add and subtract those, propagating errors.
There is no "microlensing" or similar stuff -- only time shifts.
"""
def __init__(self, jds, mags, magerrs, pad, pd, timeshift=0.0, name="Name", plotcolour="black"):
self.jds = jds
self.mags = mags
self.magerrs = magerrs
self.plotcolour = plotcolour
self.name = name
self.timeshift = timeshift
self.pad = pad
self.pd = pd
def __str__(self):
retstr = "[RS:%s]" % (self.name)
if self.timeshift != 0.0:
retstr += "(%.3f)" % (self.timeshift)
return retstr
def shifttime(self, timeshift):
self.timeshift += timeshift
def copy(self):
return pythoncopy.deepcopy(self)
def getjds(self):
return self.jds + self.timeshift
def getmags(self):
return self.mags
def getmagerrs(self):
return self.magerrs
def mask(self, maxmagerr = 0.1, target = 20.0):
self.magerrs[self.magerrs > maxmagerr] = target
def wtv(self, method = "weights"):
"""
Return some weighted average variation WAV.
Usuall called on a "difference" lightcurve.
"""
#return np.sum(np.fabs(self.mags[1:] - self.mags[:-1]))
#mask = self.magerrs < maxmagerr
if method == "weights":
dys = self.mags[1:] - self.mags[:-1]
dyws = 1.0 / (0.5*(self.magerrs[1:] + self.magerrs[:-1]))
out = np.sum(np.fabs(dys) * dyws) / np.sum(dyws)
if method == "simple":
out = np.sum(np.fabs(self.mags[1:] - self.mags[:-1]))
return out
def factory(l, pad=300, pd=2, plotcolour=None,knotstep=20.0, n=None, stab=True,stabext=300.0, stabgap=20.0, stabstep=5.0,
stabmagerr=-2.0, stabrampsize=0, stabrampfact=1.0, bokit=1, bokeps=2.0, boktests=5,
bokwindow=None, k=3, verbose=True):
"""
Give me a lightcurve, I return a regularly sampled light curve, by performing some spline regression.
!!! New: I also return the spline used for the regression
:param pad: the padding, in days
:param pd: the point density, in points per days.
The points live on a regular grid in julian days, 0.0, 0.1, 0.2, 0.3 ...
"""
if plotcolour == None:
plotcolour = l.plotcolour
name = l.object
jds = l.jds.copy()
timeshift = l.timeshift
mags = l.getmags(noml=True)
magerrs = l.getmagerrs()
minjd = np.round(jds[0] - pad)
maxjd = np.round(jds[-1] + pad)
npts = int(maxjd - minjd)*pd
rsjds = np.linspace(minjd, maxjd, npts) # rs for regularly sampled
# The regression itself
mean_mag = np.mean(mags)
def meanprior(query):
return (0.0 * query + mean_mag)
regfct,spline = splreg.splreg(jds, mags, magerrs,knotstep=knotstep, n=n, stab=stab, stabext=stabext, stabgap=stabgap, stabstep=stabstep,
stabmagerr=stabmagerr, stabrampsize=stabrampsize, stabrampfact=stabrampfact, bokit=bokit, bokeps=bokeps,
boktests=boktests,bokwindow=bokwindow, k=k, verbose=verbose)
(rsmags, rsmagerrs) = regfct(rsjds)
return rslc(rsjds, rsmags, rsmagerrs, pad, pd, timeshift=timeshift, name=name, plotcolour=plotcolour),spline
def subtract(rs1, rs2):
"""
I subtract rs2 from rs1.
This means I keep the jds and timeshift of rs1, and only change the mags and magerrs,
interpolating rs2.
I return a brand new rslc object, that has no timeshift (as we do not care about a timeshift, for a difference).
:param rs1:
:type rs1: rslc object
:param rs2:
:type rs2: rslc object
"""
newjds = rs1.getjds()
newmags = rs1.mags.copy()
newmagerrs = rs1.magerrs.copy()
newpad = rs1.pad
newpd = rs1.pd
newname = "%s(%+.1f)-%s(%+.1f)" % (rs1.name, rs1.timeshift, rs2.name, rs2.timeshift)
# We interpolate rs2 at the positions of rs1
newrs2mags = np.interp(rs1.getjds(), rs2.getjds(), rs2.mags, left=np.nan, right=np.nan)
newrs2magerrs = np.interp(rs1.getjds(), rs2.getjds(), rs2.magerrs, left=np.nan, right=np.nan)
# These arrays contain NaN at one of there extremas.
newmags -= newrs2mags
newmagerrs = np.sqrt(rs1.magerrs*rs1.magerrs + newrs2magerrs*newrs2magerrs)
# The NaN are now propagated in newmags and newmagerrs
# We cut them :
nanmask = np.isnan(newmags)
#nnan = np.sum(nanmask)
#print nnan/newpd
newjds = newjds[nanmask == False]
newmags = newmags[nanmask == False]
newmagerrs = newmagerrs[nanmask == False]
return rslc(newjds, newmags, newmagerrs, newpad, newpd, timeshift=0.0, name=newname, plotcolour="black")
def wtvdiff(rs1, rs2, method):
"""
Returns the wtv (weighted TV) of the difference between 2 curves.
This is symmetric (no change if you invert rs1 and rs2), up to some small numerical errors.
"""
out = subtract(rs1, rs2).wtv(method)
#print out
return float(out)
def bruteranges(step, radius, center):
"""
Auxiliary function for brute force exploration.
Prepares the "ranges" parameter to be passed to brute force optimizer
In other words, we draw a cube ...
radius is an int saying how many steps to go left and right of center.
center is an array of the centers, it can be of any lenght.
You make 2*radius + 1 steps in each direction !, so radius=2 means 5 steps thus 125 calls for 4 curves.
"""
low = - step * radius
up = step * (radius+1)
if center.shape == ():
c = float(center)
return [((c+low),(c+up),step)]
else:
return [((c+low),(c+up),step) for c in center]
def opt_ts(rslcs, method="weights", verbose=True):
"""
I optimize the timeshifts between the rslcs to minimize the wtv between them.
Note that even if the wtvdiff is only about two curves, we cannot split this into optimizing
AB AC AD in a row, as this would never calculate BC, and BC is not contained into AB + AC.
!!! New : I also return a spline to optimise the magshifts
:param rslcs: a list of rslc objects
"""
rslcsc = [rs.copy() for rs in rslcs] # We'll work on copies.
# No need for reverse combis, as wtvdiff is symmetric.
#couplelist = [couple for couple in [[rs1, rs2] for rs1 in rslcsc for rs2 in rslcsc] if couple[0] != couple[1]]
indexes = np.arange(len(rslcsc))
indlist = [c for c in [[i1, i2] for i1 in indexes for i2 in indexes] if c[1] > c[0]]
couplelist = [[rslcsc[i1], rslcsc[i2]] for (i1, i2) in indlist]
# So the elements in couplelist are the SAME as those from rslcsc
inishifts = np.array([rs.timeshift for rs in rslcsc[1:]]) # We won't move the first curve.
def errorfct(timeshifts):
if timeshifts.shape == ():
timeshifts = np.array([timeshifts])
for (rs, timeshift) in zip(rslcsc[1:], timeshifts):
rs.timeshift = timeshift
tvs = np.array([wtvdiff(rs1, rs2, method=method) for (rs1, rs2) in couplelist])
ret = np.sum(tvs)
#if verbose:
# print timeshifts, ret
return ret
if verbose:
print "Starting time shift optimization ..."
print "Initial pars (shifts, not delays) : ", inishifts
# Some brute force exploration, like for the dispersion techniques ...
res = spopt.brute(errorfct, bruteranges(5,3,inishifts), full_output = 0, finish=None)
# This would finish by default with fmin ... we do not want that.
if verbose:
print "Brute 1 shifts : %s" % res
print "Brute 1 errorfct : %f" % errorfct(res)
res = spopt.brute(errorfct, bruteranges(2.5,3,res), full_output = 0, finish=None)
if verbose:
print "Brute 2 shifts : %s" % res
print "Brute 2 errorfct : %f" % errorfct(res)
res = spopt.brute(errorfct, bruteranges(1.25,3,res), full_output = 0, finish=None)
if verbose:
print "Brute 3 shifts : %s" % res
print "Brute 3 errorfct : %f" % errorfct(res)
res = spopt.brute(errorfct, bruteranges(0.5,3,res), full_output = 0, finish=None)
if verbose:
print "Brute 4 shifts : %s" % res
print "Brute 4 errorfct : %f" % errorfct(res)
minout = spopt.fmin_powell(errorfct, res, xtol=0.001, full_output=1, disp=verbose)
#minout = spopt.fmin_bfgs(errorfct, inishifts, maxiter=None, full_output=1, disp=verbose, retall=0, callback=None)
popt = minout[0]
minwtv = errorfct(popt) # This sets popt, and the optimal ML and source.
if verbose:
print "Final shifts : %s" % popt
print "Final errorfct : %f" % minwtv
# We set the timeshifts of the originals :
for (origrs, rs) in zip(rslcs[1:], rslcsc[1:]):
origrs.timeshift = rs.timeshift
return minwtv
|
COSMOGRAIL/PyCS
|
pycs/spldiff/rslc.py
|
Python
|
gpl-3.0
| 8,490 | 0.049706 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/contrib/lite/toco/toco_flags.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.contrib.lite.toco import types_pb2 as tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/contrib/lite/toco/toco_flags.proto',
package='toco',
syntax='proto2',
serialized_pb=_b('\n-tensorflow/contrib/lite/toco/toco_flags.proto\x12\x04toco\x1a(tensorflow/contrib/lite/toco/types.proto\"\x92\x03\n\tTocoFlags\x12&\n\x0cinput_format\x18\x01 \x01(\x0e\x32\x10.toco.FileFormat\x12\'\n\routput_format\x18\x02 \x01(\x0e\x32\x10.toco.FileFormat\x12.\n\x14inference_input_type\x18\x0b \x01(\x0e\x32\x10.toco.IODataType\x12(\n\x0einference_type\x18\x04 \x01(\x0e\x32\x10.toco.IODataType\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_min\x18\x05 \x01(\x02\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_max\x18\x06 \x01(\x02\x12\x17\n\x0f\x64rop_fake_quant\x18\x07 \x01(\x08\x12!\n\x19reorder_across_fake_quant\x18\x08 \x01(\x08\x12\x18\n\x10\x61llow_custom_ops\x18\n \x01(\x08\x12\x1f\n\x17\x64rop_control_dependency\x18\x0c \x01(\x08\x12+\n#debug_disable_recurrent_cell_fusion\x18\r \x01(\x08*\\\n\nFileFormat\x12\x17\n\x13\x46ILE_FORMAT_UNKNOWN\x10\x00\x12\x17\n\x13TENSORFLOW_GRAPHDEF\x10\x01\x12\n\n\x06TFLITE\x10\x02\x12\x10\n\x0cGRAPHVIZ_DOT\x10\x03')
,
dependencies=[tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2.DESCRIPTOR,])
_FILEFORMAT = _descriptor.EnumDescriptor(
name='FileFormat',
full_name='toco.FileFormat',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FILE_FORMAT_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TENSORFLOW_GRAPHDEF', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TFLITE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAPHVIZ_DOT', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=502,
serialized_end=594,
)
_sym_db.RegisterEnumDescriptor(_FILEFORMAT)
FileFormat = enum_type_wrapper.EnumTypeWrapper(_FILEFORMAT)
FILE_FORMAT_UNKNOWN = 0
TENSORFLOW_GRAPHDEF = 1
TFLITE = 2
GRAPHVIZ_DOT = 3
_TOCOFLAGS = _descriptor.Descriptor(
name='TocoFlags',
full_name='toco.TocoFlags',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_format', full_name='toco.TocoFlags.input_format', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_format', full_name='toco.TocoFlags.output_format', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inference_input_type', full_name='toco.TocoFlags.inference_input_type', index=2,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inference_type', full_name='toco.TocoFlags.inference_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_ranges_min', full_name='toco.TocoFlags.default_ranges_min', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_ranges_max', full_name='toco.TocoFlags.default_ranges_max', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drop_fake_quant', full_name='toco.TocoFlags.drop_fake_quant', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reorder_across_fake_quant', full_name='toco.TocoFlags.reorder_across_fake_quant', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_custom_ops', full_name='toco.TocoFlags.allow_custom_ops', index=8,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drop_control_dependency', full_name='toco.TocoFlags.drop_control_dependency', index=9,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_disable_recurrent_cell_fusion', full_name='toco.TocoFlags.debug_disable_recurrent_cell_fusion', index=10,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=500,
)
_TOCOFLAGS.fields_by_name['input_format'].enum_type = _FILEFORMAT
_TOCOFLAGS.fields_by_name['output_format'].enum_type = _FILEFORMAT
_TOCOFLAGS.fields_by_name['inference_input_type'].enum_type = tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2._IODATATYPE
_TOCOFLAGS.fields_by_name['inference_type'].enum_type = tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2._IODATATYPE
DESCRIPTOR.message_types_by_name['TocoFlags'] = _TOCOFLAGS
DESCRIPTOR.enum_types_by_name['FileFormat'] = _FILEFORMAT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TocoFlags = _reflection.GeneratedProtocolMessageType('TocoFlags', (_message.Message,), dict(
DESCRIPTOR = _TOCOFLAGS,
__module__ = 'tensorflow.contrib.lite.toco.toco_flags_pb2'
# @@protoc_insertion_point(class_scope:toco.TocoFlags)
))
_sym_db.RegisterMessage(TocoFlags)
# @@protoc_insertion_point(module_scope)
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/contrib/lite/toco/toco_flags_pb2.py
|
Python
|
mit
| 8,125 | 0.004062 |
""" crypto.aes
AES Encryption Algorithm
The AES algorithm is just Rijndael algorithm restricted to the default
blockSize of 128 bits.
Copyright (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.rijndael import Rijndael
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
from crypto.errors import BadKeySizeError
class AES(Rijndael):
""" The AES algorithm is the Rijndael block cipher restricted to block
sizes of 128 bits and key sizes of 128, 192 or 256 bits
"""
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
""" Initialize AES, keySize is in bytes """
if not (keySize == 16 or keySize == 24 or keySize == 32) :
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
self.name = 'AES'
|
realms-team/basestation-fw
|
libs/smartmeshsdk-REL-1.3.0.1/external_libs/cryptopy/crypto/cipher/aes.py
|
Python
|
bsd-3-clause
| 1,002 | 0.015968 |
from ditutils.core import setup
setup(
name = 'morpheusapi',
packages = ['morpheusapi'],
version = '2.11.1',
description = 'A python wrapper for Morpheus APIs',
author = 'Adam Hicks',
author_email = 'thomas.adam.hicks@gmail.com',
url = 'https://github.com/tadamhicks/morpheus-python',
download_url = 'https://github.com/tadamhicks/morpheus-python/archive/2.11.1.tar.gz',
keywords = ['morpheus', 'api', 'morpheus data'],
classifiers = [],
)
|
tadamhicks/morpheus-python
|
setup.py
|
Python
|
mit
| 480 | 0.04375 |
#! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a loop back connection receiving itself what it sent.
#
# The purpose of this module is.. well... You can run the unit tests with it.
# and it was so easy to implement ;-)
#
# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: loop://[option[/option...]]
# options:
# - "debug" print diagnostic messages
from serial.serialutil import *
import threading
import time
import logging
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
class LoopbackSerial(SerialBase):
"""Serial port implementation that simulates a loop back connection in plain software."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
if self._isOpen:
raise SerialException("Port is already open.")
self.logger = None
self.buffer_lock = threading.Lock()
self.loop_buffer = bytearray()
self.cts = False
self.dsr = False
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
# not that there is anything to open, but the function applies the
# options found in the URL
self.fromURL(self.port)
# not that there anything to configure...
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""\
Set communication parameters on opened port. For the loop://
protocol all settings are ignored!
"""
# not that's it of any real use, but it helps in the unit tests
if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
if self.logger:
self.logger.info('_reconfigurePort()')
def close(self):
"""Close port"""
if self._isOpen:
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("loop://"): url = url[7:]
try:
# process options now, directly altering self
for option in url.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if not option:
pass
elif option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.loop')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: %r' % (option,))
except ValueError, e:
raise SerialException('expected a string in the form "[loop://][option[/option...]]": %s' % e)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
# attention the logged value can differ from return value in
# threaded environments...
self.logger.debug('inWaiting() -> %d' % (len(self.loop_buffer),))
return len(self.loop_buffer)
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self._isOpen: raise portNotOpenError
if self._timeout is not None:
timeout = time.time() + self._timeout
else:
timeout = None
data = bytearray()
while size > 0:
self.buffer_lock.acquire()
try:
block = to_bytes(self.loop_buffer[:size])
del self.loop_buffer[:size]
finally:
self.buffer_lock.release()
data += block
size -= len(block)
# check for timeout now, after data has been read.
# useful for timeout = 0 (non blocking) read
if timeout and time.time() > timeout:
break
return bytes(data)
def write(self, data):
"""\
Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self._isOpen: raise portNotOpenError
# ensure we're working with bytes
data = to_bytes(data)
# calculate aprox time that would be used to send the data
time_used_to_send = 10.0*len(data) / self._baudrate
# when a write timeout is configured check if we would be successful
# (not sending anything, not even the part that would have time)
if self._writeTimeout is not None and time_used_to_send > self._writeTimeout:
time.sleep(self._writeTimeout) # must wait so that unit test succeeds
raise writeTimeoutError
self.buffer_lock.acquire()
try:
self.loop_buffer += data
finally:
self.buffer_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushInput()')
self.buffer_lock.acquire()
try:
del self.loop_buffer[:]
finally:
self.buffer_lock.release()
def flushOutput(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushOutput()')
def sendBreak(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self._isOpen: raise portNotOpenError
def setBreak(self, level=True):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setBreak(%r)' % (level,))
def setRTS(self, level=True):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setRTS(%r) -> state of CTS' % (level,))
self.cts = level
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setDTR(%r) -> state of DSR' % (level,))
self.dsr = level
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getCTS() -> state of RTS (%r)' % (self.cts,))
return self.cts
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getDSR() -> state of DTR (%r)' % (self.dsr,))
return self.dsr
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getRI()')
return False
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getCD()')
return True
# - - - platform specific - - -
# None so far
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(LoopbackSerial, FileLike):
pass
else:
# io library present
class Serial(LoopbackSerial, io.RawIOBase):
pass
# simple client test
if __name__ == '__main__':
import sys
s = Serial('loop://')
sys.stdout.write('%s\n' % s)
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
s.close()
|
masahir0y/barebox-yamada
|
scripts/serial/urlhandler/protocol_loop.py
|
Python
|
gpl-2.0
| 9,646 | 0.002281 |
# Import the helper gateway class
from AfricasTalkingGateway import AfricasTalkingGateway, AfricasTalkingGatewayException
from twitment.search import ClassTwitter
# Specify your login credentials
class SMS(object):
def __init__(self):
pass
def send(self,num):
sendtweet_obj = ClassTwitter()
x = sendtweet_obj.wordFrequency.wordslist
username = "CATHERINERAKAMA"
apikey = "676dbd926bbb04fa69ce90ee81d3f5ffee2692aaf80eb5793bd70fe93e77dc2e"
# Specify the numbers that you want to send to in a comma-separated list
# Please ensure you include the country code (+254 for Kenya)
to = num
# And of course we want our recipients to know what we really do
message = x
# Create a new instance of our awesome gateway class
gateway = AfricasTalkingGateway(username, apikey)
# Any gateway errors will be captured by our custom Exception class below,
# so wrap the call in a try-catch block
try:
# Thats it, hit send and we'll take care of the rest.
results = gateway.sendMessage(to, message)
for recipient in results:
# status is either "Success" or "error message"
print 'Message sent to number=%s;status=%s' % (recipient['number'],
recipient[
'status'])
except AfricasTalkingGatewayException, e:
print 'Encountered an error while sending: %s' % str(e)
|
crakama/bc_7_twitment
|
twitment/sendSMS.py
|
Python
|
mit
| 1,600 | 0.005625 |
# Author: Bala Venkatesan
# License: Apache 2.0
########################################################################
# Wrote this file to separate out the loading of the data from the
# python file where the actual display happens
########################################################################
import pandas as pd
import csv
########################################################################
# Loading data
########################################################################
statefile = open('./annual_averages_by_state.csv', 'r')
csvreader = csv.reader(statefile)
########################################################################
# initializing a dataframe to parse only required data from file
########################################################################
columns = ["STATE",
"TOTAL_POPULATION",
"WORKFORCE",
"WORK_%_OF_POP",
"EMPLOYED",
"EMP_%_OF_POP",
"UNEMPLOYED",
"UNEMPLOMENT_RATE",
]
data = []
rowIndex = 0
########################################################################
# function that parses the state data for 2012 & 2013 and returns
# a DataFrame with the data read from the file
# the function cleans the data before returning the DataFrame
########################################################################
def state_data():
for row in csvreader:
#######################################################################################
# intialize a bunch of index variables for data clean up
# startat is used to push the iteration to the right in the case of states with 2 words
# stopat moves corresponding.
#######################################################################################
index = 0
startat = 0
stopat=10
statename = row[0]
# Initializing pandas series for DataFrame.
values = []
for x in enumerate(row):
print statename
print x
if(index == 0):
values.append(statename.upper())
else:
values.append(x.replace(",",""))
index = index + 1
data.insert(rowIndex,values)
df = pd.DataFrame(data,columns=columns)
return df
if __name__ == '__main__':
print state_data()
|
OSHADataDoor/OshaBokeh
|
bokehsamples/osha_files.py
|
Python
|
apache-2.0
| 2,378 | 0.004626 |
# coding: utf-8
# Copyright (c) 2012, SciELO <scielo-dev@googlegroups.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
class ISSNField(forms.CharField):
default_error_messages = {
'invalid': _('Enter a valid ISSN.')
}
regex = r'[0-9]{4}-[0-9]{3}[0-9X]{1}$'
def clean(self, value):
if value is not u'' and value is not None:
result = re.match(self.regex, value)
if result is None:
raise forms.ValidationError(self.error_messages['invalid'])
return value
|
scieloorg/scielo-django-extensions
|
scielo_extensions/formfields.py
|
Python
|
bsd-2-clause
| 1,891 | 0.006875 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSdmtools(RPackage):
"""Species Distribution Modelling Tools: Tools for processing data
associated with species distribution modelling exercises
This packages provides a set of tools for post processing the outcomes of
species distribution modeling exercises."""
homepage = "https://cloud.r-project.org/package=SDMTools"
url = "https://cloud.r-project.org/src/contrib/SDMTools_1.1-221.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/SDMTools"
version('1.1-221.1', sha256='3825856263bdb648ca018b27dc6ab8ceaef24691215c197f8d5cd17718b54fbb')
version('1.1-221', sha256='a6da297a670f756ee964ffd99c3b212c55c297d385583fd0e767435dd5cd4ccd')
version('1.1-20', sha256='d6a261ce8f487d5d03b1931039f528f2eb50fb9386e7aae40045c966ff6d4182')
version('1.1-13', sha256='02d94977bfa2f41f1db60e619335ac0ea8109dd98108ff9d21a412f7c4a14a2e')
version('1.1-12', sha256='6dc4a8a046e7fced190402f39a9bae6f863e08c320f0881367c022b2f220f14b')
version('1.1-11', sha256='1caf8fa1914ad6921d76e7b22a8c25cfe55892b0d21aef3b2a7b8f5b79b9388b')
depends_on('r-r-utils', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-sdmtools/package.py
|
Python
|
lgpl-2.1
| 1,363 | 0.005869 |
#/u/Goldensights
import praw
import time
import datetime
'''USER CONFIG'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
MAXPOSTS = 1000
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
PRINTFILE = 'messages.txt'
#This is the file, in the same directory as the .py file, where the messages are stored
SUBJECTLINE = "Newsletterly"
ITEMTYPE = 't4'
#The type of item to gather. t4 is a PM
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def work():
unread = r.get_unread(limit=MAXPOSTS)
results = []
for message in unread:
if ITEMTYPE in message.fullname:
print(message.id, message.subject, end=" ")
if SUBJECTLINE.lower() in message.subject.lower():
print(message.body)
messagedate = datetime.datetime.utcfromtimestamp(message.created_utc)
messagedate = datetime.datetime.strftime(messagedate, "%B %d %Y %H:%M UTC")
results += [message.fullname + " : " + message.author.name, messagedate, message.body, "\n\n"]
else:
print()
message.mark_as_read()
logfile = open(PRINTFILE, "a")
for result in results:
print(result, file=logfile)
logfile.close()
while True:
try:
work()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
time.sleep(WAIT)
|
tehp/reddit
|
MessageArchiveSimple/messagearchivesimple.py
|
Python
|
mit
| 1,943 | 0.021101 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""while_v2 and gradient.
This is a version of while_loop that emits a single While op, as well as the
gradient function for While ops produced by while_loop. This will eventually
replace the current tf.while_loop implementation once it reaches feature and
performance parity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2 as util
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
# pylint: disable=protected-access
# TODO(b/79881896): Handle external control dependencies. tf.while_loop allows
# control dependencies on external nodes with at least 1 output.
# Another idea is to create const nodes outside the loop and add control edges
# to them and then pass those in as data inputs. This should probably be
# handled in the CapturingGraph itself.
def while_loop(cond,
body,
loop_vars,
shape_invariants=None,
maximum_iterations=None,
name=None,
return_same_structure=True):
"""Like tf.while_loop, except emits a single While op."""
maximum_iterations = _validate_and_convert_to_tensor(maximum_iterations)
# Keep the original loop_vars around to know which args were TensorArrays.
orig_loop_vars = loop_vars
# Cache its length since we use it at multiple places below.
len_orig_loop_vars = len(orig_loop_vars)
# Convert TensorArrays to their flow variables. These get converted back to
# TensorArrays before calling `cond` and `body`. See `wrapped_cond` and
# `wrapped_body` below.
loop_vars = list(_tensor_array_to_flow(orig_loop_vars))
loop_vars = nest.map_structure(
ops.internal_convert_to_tensor_or_indexed_slices, loop_vars)
if shape_invariants is not None:
nest.assert_same_structure(orig_loop_vars, shape_invariants)
else:
shape_invariants = nest.map_structure(lambda t: t.shape, loop_vars)
if not name:
name = "while"
with ops.name_scope(name) as scope:
with ops.name_scope(None):
cond_name = util.unique_fn_name(scope, "cond")
body_name = util.unique_fn_name(scope, "body")
loop_counter = constant_op.constant(
0,
dtype=maximum_iterations.dtype
if maximum_iterations is not None else None,
name="loop_counter")
# Add loop counter needed for computing gradients.
loop_vars = [loop_counter] + loop_vars
shape_invariants = type(shape_invariants)([tensor_shape.scalar()
]) + shape_invariants
# Automatic control dependencies are added in defuns, but not in v1
# graphs. Propagate that behavior here.
add_control_dependencies = ops.get_default_graph()._add_control_dependencies
# Build a `cond` wrapper that can handle the extra counter loop_var.
def wrapped_cond(loop_counter, *args):
# Convert the flow variables in `args` to TensorArrays. `args` should
# already have the same structure as `orig_loop_vars` but currently there
# is no nest.zip so we call `_pack_sequence_as` which flattens both
# `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
# and packs it into the structure of `orig_loop_vars`.
if maximum_iterations is None:
return cond(*_pack_sequence_as(orig_loop_vars, args))
else:
return math_ops.logical_and(
loop_counter < maximum_iterations,
cond(*_pack_sequence_as(orig_loop_vars, args)))
cond_graph = func_graph_module.func_graph_from_py_func(
cond_name,
wrapped_cond,
loop_vars, {},
signature=_build_signature(loop_vars, shape_invariants),
func_graph=util.WhileCondFuncGraph(cond_name),
add_control_dependencies=add_control_dependencies)
# Add external_captures of cond to the list of loop vars.
# Note that external tensors will be treated as loop invariants, i.e.,
# the value of that tensor in each iteration is the same as it was at the
# beginning of the loop execution.
loop_vars = loop_vars + cond_graph.external_captures
shape_invariants = shape_invariants + type(shape_invariants)(
[t.shape for t in cond_graph.external_captures])
def wrapped_body(loop_counter, *args):
"""Loop body augmented with counter update.
Args:
loop_counter: Loop counter which needs to be incremented in the body.
*args: List of args
args[:len_orig_loop_vars] - Args for the original loop body.
args[len_orig_loop_vars:] - External captures of cond. These get
passed through as is.
Returns:
A list of tensors the same length as args.
"""
# Convert the flow variables in `args` to TensorArrays. `args` should
# already have the same structure as `orig_loop_vars` but currently there
# is no nest.zip so we call `_pack_sequence_as` which flattens both
# `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
# and packs it into the structure of `orig_loop_vars`.
outputs = body(
*_pack_sequence_as(orig_loop_vars, args[:len_orig_loop_vars]))
if not nest.is_sequence(outputs):
outputs = [outputs]
# Compare the structure of input and output of body converting the
# top-level tuples to list to be compatible with legacy while_loop.
nest.assert_same_structure(list(outputs), list(orig_loop_vars))
outputs = _tensor_array_to_flow(outputs)
# Return the external_captures of cond_graph as is, i.e., treat them as
# loop invariants.
# TODO(srbs): Update lowering code to create _Enter nodes with
# is_constant=True for inputs that are directly passed to outputs.
return [loop_counter + 1] + list(outputs) + list(
args[len_orig_loop_vars:])
body_graph = func_graph_module.func_graph_from_py_func(
body_name,
wrapped_body,
loop_vars, {},
signature=_build_signature(loop_vars, shape_invariants),
func_graph=util.WhileBodyFuncGraph(body_name),
add_control_dependencies=add_control_dependencies)
# Add external captures of body to the list of loop vars.
# Note that external tensors will be treated as loop invariants, i.e.,
# the value of that tensor in each iteration is the same as it was at the
# beginning of the loop execution.
loop_vars = loop_vars + body_graph.external_captures
# TODO(srbs): Update lowering code to create _Enter nodes with
# is_constant=True for inputs that are directly passed to outputs.
body_graph.outputs.extend(body_graph.internal_captures)
# Capture `external_captures` of `body_graph` in `cond_graph` so that it
# expects to receive those as arguments.
# TODO(b/118457764): Dedup tensors that are captured in both the cond and
# body. This logic already exists in cond_v2.
with cond_graph.as_default():
for external_capture in body_graph.external_captures:
assert external_capture not in cond_graph.captures, (
"Looks like both cond and body are capturing the same tensor %s. "
"This is not supported yet. For now consider passing,"
" this as a loop variable." % str(external_capture))
cond_graph.capture(external_capture)
# Make sure that the shapes of the loop outputs are compatible with the
# shape invariants, or the shapes of the loop vars if the invariants are not
# specified.
num_flattened_outputs = len(nest.flatten(orig_loop_vars))
_check_shapes_compat(
body_graph.outputs[1:1 + num_flattened_outputs],
nest.flatten(shape_invariants[1:1 + len_orig_loop_vars]),
nest.flatten(loop_vars[1:1 + len_orig_loop_vars]))
flattened_loop_vars = nest.flatten(loop_vars)
_check_num_inputs_outputs(cond_graph, body_graph,
len(flattened_loop_vars))
outputs = gen_functional_ops._while(
flattened_loop_vars,
util.create_new_tf_function(cond_graph),
util.create_new_tf_function(body_graph),
output_shapes=[t.shape for t in body_graph.outputs],
name=scope)
_copy_handle_data(body_graph.outputs, outputs)
util.maybe_set_lowering_attr(outputs[0].op)
_maybe_set_maximum_iterations_attr(outputs[0].op, maximum_iterations)
# Return identities for each output of the While op, rather than the output
# of the While op directly. This makes pruning work if the output of
# while_loop() is fetched: the lowering pass converts the While outputs into
# IdentityN outputs, which if fetched will cause all ops in the body to be
# run (since it takes all exit ops as input). After lowering, each output
# identity op will end up with only the appropriate exit op as input.
outputs = tuple(array_ops.identity(t) for t in outputs)
# First var is loop counter.
outputs = _pack_sequence_as(orig_loop_vars,
outputs[1:1 + num_flattened_outputs])
if return_same_structure:
return outputs
flattened_outputs = nest.flatten(outputs)
if len(flattened_outputs) == 1:
return flattened_outputs[0]
else:
return outputs
@ops.RegisterGradient("While")
def _WhileGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of a While op produced by while_loop."""
# Note that op is not always the same as while_op because the gradient tape,
# for eager mode compatibility, forgets information about the proper op. Since
# the loop cannot run in eager mode, however, we can safely introspect into
# the graph here.
while_op = op.outputs[0].op
cond_graph = _get_graph(while_op, "cond")
body_graph = _get_graph(while_op, "body")
orig_num_params = len(body_graph.outputs)
maximum_iterations = op.get_attr(
"_maximum_iterations") if _is_in_xla_context() else None
assert not _is_in_xla_context() or maximum_iterations is not None
# Set the incoming gradient of non-trainable inputs to None. It is possible
# that we receive non-None gradients for non-trainable types in nested while
# loops because we accumulate outputs of the inner while as variant tensors
# which are trainable and hence receive zeros_like tensors in the gradient
# pass. The non-trainable tensors then receive the popped zeros tensor from
# this zeros variant. The gradient for the loop vars corresponding to these
# tensors is None or zeros (this happens only if the loop var is accumulated
# as well) in _grad_fn so we reset these.
# TODO(b/118712257): Remove the IsTrainable filter once we can handle None
# output grads in _grad_fn.
grads = [
None if not _is_trainable(output) else grad
for grad, output in zip(grads, body_graph.outputs)
]
# We compute the gradient for the sub-graph between trainable ys and xs
# with non-None incoming gradients. We later pad the None's to the list of
# outputs.
ys, xs, non_none_grads = zip(*[(y, x, grad) for (y, x, grad) in zip(
body_graph.outputs, body_graph.inputs, grads) if grad is not None])
body_grad_graph, args = _create_grad_func(
ys, xs, non_none_grads, cond_graph, body_graph,
util.unique_grad_fn_name(body_graph.name), op, maximum_iterations)
if body_grad_graph.while_op_needs_rewrite:
# Modify 'op' to output the intermediate accumulators needed by the grad
# function.
# NOTE(skyewm): if there are any active sessions, this modification to `op`
# may make them unrunnable!
cond_graph.name += "_rewritten"
body_graph.name += "_rewritten"
new_inputs = body_grad_graph.empty_tensor_lists
new_outputs = body_graph.outputs[orig_num_params:]
while_op._set_func_attr("cond", util.create_new_tf_function(cond_graph))
while_op._set_func_attr("body", util.create_new_tf_function(body_graph))
while_op._set_type_list_attr("T", body_graph.output_types)
while_op._set_shape_list_attr("output_shapes", body_graph.output_shapes)
while_op._add_while_inputs(new_inputs)
while_op._add_outputs([t.dtype for t in new_outputs],
[t.shape for t in new_outputs])
_copy_handle_data(new_outputs, op.outputs[orig_num_params:])
captured_inputs = _resolve_grad_captures(body_graph, body_grad_graph,
while_op)
loop_vars = args + captured_inputs
def grad_cond(counter, max_iters, *unused_args):
return counter < max_iters
grad_cond_name = util.unique_grad_fn_name(op.get_attr("cond").name)
cond_grad_graph = func_graph_module.func_graph_from_py_func(
grad_cond_name, grad_cond, loop_vars, {},
func_graph=util.WhileCondFuncGraph(grad_cond_name))
_check_num_inputs_outputs(cond_grad_graph, body_grad_graph, len(loop_vars))
outputs = gen_functional_ops._while(
loop_vars,
util.create_new_tf_function(cond_grad_graph),
util.create_new_tf_function(body_grad_graph),
output_shapes=[t.shape for t in body_grad_graph.outputs],
name="%s_grad" % while_op.name)
_copy_handle_data(body_grad_graph.outputs, outputs)
util.maybe_set_lowering_attr(outputs[0].op)
_maybe_set_maximum_iterations_attr(outputs[0].op, maximum_iterations)
# See comment in while_loop.
outputs = [array_ops.identity(t) for t in outputs]
# Set None as the output gradient for tensors with None input gradient.
# outputs[0] is the loop counter.
# outputs[1] is the total number of loop iterations.
index = 2
none_padded_outputs = []
for g in grads:
if g is None:
none_padded_outputs.append(None)
else:
none_padded_outputs.append(outputs[index])
index += 1
return none_padded_outputs
def _is_trainable(tensor):
"""Returns whether the given tensor is trainable."""
if not gradients_impl.IsTrainable(tensor):
return False
# Special case: untrainable accumulator output. The gradients algorithm
# doesn't know about tensor lists of untrainable elements. In theory the
# tensor list gradient functions should return None as appropriate, but
# because we can't return None from the gradient function we filter out
# untrainable accumulator output here to avoid computing the gradient at all.
if tensor.op.type == "TensorListPopBack" and tensor.value_index == 0:
assert tensor.dtype == dtypes.variant
element_type = tensor.op.get_attr("element_dtype")
return gradients_impl.IsTrainable(element_type)
return True
def _validate_and_convert_to_tensor(maximum_iterations):
"""Checks that `maximum_iterations` is valid.
In XLA context, `maximum_iterations` is required and must be statically
inferable, e.g. output tensor of a Const node.
Args:
maximum_iterations: The maximum_iterations passed to while_loop.
Returns:
A scalar valued tensor of type int32 or None.
Raises:
ValueError: If `maximum_iterations` is invalid.
"""
if _is_in_xla_context():
if maximum_iterations is None:
raise ValueError("maximum_iterations is None. It is required and must "
"be statically known (e.g. a constant value or known "
"shape dimension) when building while_loop in XLA "
"context.")
if isinstance(maximum_iterations, ops.Tensor):
# Get the constant value from the `maximum_iterations` tensor to avoid
# capturing a Const tensor from outside this graph.
maximum_iterations = tensor_util.constant_value(maximum_iterations)
if maximum_iterations is None:
raise ValueError("maximum_iterations must be statically known (e.g. a "
"constant value or known shape dimension) when "
"building while_loop in XLA context.")
if maximum_iterations is not None:
# EmptyTensorList expects `max_num_elements` to be of type int32.
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.shape.ndims != 0:
raise ValueError("maximum_iterations must be a scalar, saw shape: %s" %
maximum_iterations.shape)
return maximum_iterations
# TODO(srbs): Pull this into common utils for cond_v2 and while_v2.
def _get_graph(while_op, func_attr_name):
"""Returns `FuncGraph` for the given function attribute.
Args:
while_op: The While Operation.
func_attr_name: string
Returns:
`FuncGraph`
"""
# TODO(srbs): Handle TensorShapeProto in function_def_to_graph.input_shapes.
input_shapes = [
tensor_shape.TensorShape(s) for s in while_op.get_attr("output_shapes")
]
func_name = while_op.get_attr(func_attr_name).name
fdef = while_op.graph._get_function(func_name).definition
# `while_op.graph` may not be the same as `ops.get_default_graph()` e.g.
# if the `while_op` is in the body of another if/while/defun. We build the
# `func_graph` with `while_op.graph` as its `outer_graph`. This resembles how
# the `FuncGraph` was built in the forward pass. We need this so that we can
# appropriately capture references to outer tensors in the nested grad graphs.
with while_op.graph.as_default():
func_graph = function_def_to_graph.function_def_to_graph(fdef, input_shapes)
func_graph._while = while_op
return func_graph
def _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op,
max_iters):
"""Builds and returns the gradient FuncGraph of `func_graph` and its args.
The returned grad_func_graph must be called with the returned
args + grad_func_graph.captures.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grads: The incoming grads for `ys`.
cond_graph: FuncGraph for the forward cond function.
body_graph: FuncGraph for the forward body function.
name: Name of the returned gradient function.
while_op: The forward While op.
max_iters: the maximum number of iterations, or None if no limit.
Returns:
2-tuple of (grad_func_graph, args).
"""
assert len(ys) == len(grads)
total_iters = while_op.outputs[0]
counter = constant_op.constant(
0, dtype=total_iters.dtype, name="grad_counter")
args = [counter, total_iters] + list(grads)
# Note: The returned function does not have `args` in the list of
# `external_captures`.
grad_func_graph = func_graph_module.func_graph_from_py_func(
name,
lambda *args: _grad_fn(ys, xs, args, body_graph),
args, {},
func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph,
max_iters))
# Add the popped accumulators to the list of outputs.
for internal_capture in grad_func_graph.internal_captures:
if internal_capture in grad_func_graph.popped_tensor_lists:
grad_func_graph.outputs.append(
grad_func_graph.popped_tensor_lists[internal_capture])
elif internal_capture.dtype == dtypes.resource:
grad_func_graph.outputs.append(internal_capture)
else:
raise ValueError("Tensor %s is in list of internal_captures but is"
" neither a resource nor is in popped_tensor_lists." %
str(internal_capture))
return grad_func_graph, args
def _grad_fn(ys, xs, args, func_graph):
"""Computes the gradient of `func_graph` in the current graph.
This function builds the gradient graph of the corresponding forward-pass
`func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
args: The input arguments.
args[0] - Loop counter
args[1] - Total number of iterations.
args[2:] - Incoming gradients for `ys`.
func_graph: function.FuncGraph. The corresponding forward-pass function.
Returns:
The output gradient Tensors.
"""
grad_ys = args[2:]
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# after the forward While op has been rewritten in _resolve_grad_captures.
# TODO(srbs): Mark GradientsHelper as public?
grad_outs = gradients_impl._GradientsHelper(
ys, xs, grad_ys=grad_ys, src_graph=func_graph,
unconnected_gradients="zero")
# TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there
# is a tf.StopGradient in the loop body.
assert all(g is not None for g in grad_outs)
counter = args[0]
total_iters = args[1]
return [counter + 1, total_iters] + grad_outs
def _resolve_grad_captures(body_graph, body_grad_graph, while_op):
"""Returns the tensors to pass as captured inputs to `body_grad_graph`.
`body_grad_graph` may have external references to:
1. Its outer graph containing the input gradients. These are left as-is.
2. Accumulators captured from the forward-pass graph. These should have been
added as `while_op` outputs after the gradient graph was built. We replace
these with the corresponding output of `while_op`, i.e. a tensor in
`body_graph.outer_graph`. In the case of nested control flow or functions,
the gradient logic handling `body_grad_graph.outer_graph` will make sure
the tensor from `body_graph.outer_graph` is also correctly captured.
Args:
body_graph: FuncGraph. The forward-pass body function.
body_grad_graph: FuncGraph. The body gradients function.
while_op: The forward-pass While Operation calling `body_graph`.
Returns:
A list of input tensors to be passed as the captured inputs to
`body_grad_graph`.
"""
new_capture_inputs = []
for t in body_grad_graph.external_captures:
# All values captured by gradient computation should be from the forward
# graph or a captured resource variable (note that input gradients are
# regular non-captured inputs).
if t.graph == body_graph:
# Captured accumulator
t = while_op.outputs[t.graph.outputs.index(t)]
# Note: We rely on the capturing logic of the gradient While op graph to
# correctly capture the tensors in `body_graph.outer_graph`. Both cond_v2
# and while_v2 handle this while building their gradient functions.
assert t.graph == body_graph.outer_graph
else:
# Captured resource variable
assert t.dtype == dtypes.resource
new_capture_inputs.append(t)
return new_capture_inputs
def _get_accumulator(tensor):
r"""Returns TensorList if any containing accumulated values of tensor.
We try to find a pattern of the form:
input_tl tensor
\ /
(TensorListPushBack)
|
output_tl
which satisfies the following conditions:
1. input_tl must be in tensor.graph.inputs.
2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.
3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).
output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is
returned if such a pattern is found else None is returned.
Args:
tensor: The Tensor to be accumulated.
Returns:
A variant tensor in the same graph as `tensor` or None if no accumulator is
found.
"""
assert isinstance(tensor.graph, func_graph_module.FuncGraph)
def get_func_graph_output(t):
"""Returns t or Identity(t) whichever exists in graph outputs else None."""
if t in tensor.graph.outputs:
return t
# tf.defun adds an Identity for each output, check whether that is the case.
identity_op = t.consumers()[0]
if (identity_op.type == "Identity" and
identity_op.outputs[0] in tensor.graph.outputs):
return identity_op.outputs[0]
return None
for consumer in tensor.consumers():
# Find the consumer that is a TensorListPushBack node whose TensorList input
# is in the list of function inputs.
if (consumer.type != "TensorListPushBack" or
consumer.inputs[0] not in tensor.graph.inputs):
continue
output = get_func_graph_output(consumer.outputs[0])
if output is None:
# The TensorList output of `consumer` is not in the list of function
# outputs.
continue
accum_input_idx = tensor.graph.inputs.index(consumer.inputs[0])
accum_output_idx = tensor.graph.outputs.index(output)
if accum_input_idx == accum_output_idx:
return output
return None
class _WhileBodyGradFuncGraph(util.WhileBodyFuncGraph):
"""FuncGraph for the gradient function of the body of a While op.
Contains the logic for capturing the tensors from the body of the forward
While op which is as follows:
1. If the tensor is of resource type (these are not accumulated):
a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop
inputs and outputs at the same index.
b. Lookup the corresponding resource tensor in the forward outer graph and
try to capture that.
2. If the tensor is not of resource type:
a. Create an accumulator for that tensor and output it from the forward
pass. Note this also requires adding it as an input to the forward pass.
b. Capture the accumulator from the forward pass in this FuncGraph. This
will later be resolved to the correct output of the forward While op.
c. Pop a value from the captured placeholder and use it as the captured
value for the forward pass tensor.
This only allows capturing tensors in the forward graph. A ValueError is
raised if an attempt is made to capture a tensor not in the forward graph.
To manually capture capture a tensor that is not in the forward graph, call
`capture` with `whitelisted=True`.
Note: The `captures` dict does not contain the forward tensor since it is not
directly captured. It contains the accumulator corresponding to this forward
tensor.
Attributes:
while_op_needs_rewrite: True if any non-resource intermediates were
captured, meaning the forward While op needs to be rewritten to output the
corresponding accumulators.
empty_tensor_lists: list of EmptyTensorList tensors to be used as initial
input to the new accumulators in the forward graph.
popped_tensor_lists: dict from the captured accumulator placeholder to the
TensorList obtained after popping the intermediate tensor from it. The
values of this dict need to be added to the list of outputs.
"""
def __init__(self, name, forward_cond_graph, forward_body_graph, max_iters):
super(_WhileBodyGradFuncGraph, self).__init__(name)
self.empty_tensor_lists = []
self.popped_tensor_lists = {}
# FuncGraph for the body of the forward While op.
self._forward_graph = forward_body_graph
# FuncGraph for the cond of the forward While op.
self._forward_cond_graph = forward_cond_graph
self._maximum_iterations = max_iters
# Dict from forward intermediate tensor to its indirectly captured tensor
# in this graph. Indirect capturing happens in two ways:
# 1. For non-resource tensors we capture their accumulators from the forward
# outer graph and pop values from that accumulator inside this graph
# using TensorListPopBack.
# 2. For resource tensors we directly capture their corresponding tensor
# in the forward outer graph.
self._indirect_captures = {}
@property
def while_op_needs_rewrite(self):
return self.empty_tensor_lists
def capture(self, tensor, name=None, whitelisted=False):
"""Selectively captures external tensors.
If `whitelisted` is False only allows capturing tensors in the
`_forward_graph`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
whitelisted: If False (default), only allows capturing tensors from the
forward graph.
Returns:
The placeholder in this graph for the tensor.
Raises:
ValueError: If attempting to capture an external tensor not in the forward
graph with `whitelisted` set to False.
"""
if (not whitelisted and tensor.graph is not self and
tensor.graph != self._forward_graph):
raise ValueError("Attempting to capture tensor", str(tensor),
" which is not in the forward graph but in ",
_graph_name(tensor.graph), ".")
return super(_WhileBodyGradFuncGraph, self).capture(tensor, name)
def _capture_helper(self, tensor, name):
if tensor.graph is not self._forward_graph:
return super(_WhileBodyGradFuncGraph, self)._capture_helper(tensor, name)
while tensor.op.type == "Identity":
# We do not accumulate the output of identity nodes so we try to capture
# the input of the Identity node instead.
tensor = tensor.op.inputs[0]
captured_tensor = self._indirect_captures.get(tensor)
if captured_tensor is not None:
return captured_tensor
if tensor.dtype == dtypes.resource:
# Resource-type tensors are not accumulated.
# If a resource tensor exists in the loop body it must either be a loop
# input or an output of a nested While op inside the loop body which
# had captured the external resource.
if tensor in self._forward_graph.inputs:
index = self._forward_graph.inputs.index(tensor)
elif tensor.op.type == "While":
# Captured resources occur at the same index in the lists of inputs and
# outputs of a while op. So we lookup the input of `tensor.op` at the
# same index as the index of `tensor` in the `tensor.op.outputs`.
index = self._forward_graph.inputs.index(
tensor.op.inputs[tensor.value_index])
else:
raise ValueError(
"Taking gradient of a while loop which creates"
" a resource in its body is not supported: %s" % str(tensor))
# This must be a loop invariant.
assert self._forward_graph.inputs[index] == self._forward_graph.outputs[
index], "Resource tensors must be loop invariants %s." % str(
self._forward_graph._while.inputs[index])
tensor_in_outer_graph = self._forward_graph._while.inputs[index]
self._indirect_captures[tensor] = self.capture(
tensor_in_outer_graph, whitelisted=True)
return self._indirect_captures[tensor]
# Create or find an existing accumulator output for `tensor` in the forward
# graph, and fetch from this accumulator in the gradient graph to get the
# raw intermediate value.
accumulator = _get_accumulator(tensor)
if accumulator is None:
# Create the initial empty tensor list.
with self._forward_graph.outer_graph.as_default():
tensor_list = list_ops.empty_tensor_list(
element_dtype=tensor.dtype, element_shape=tensor.shape,
max_num_elements=self._maximum_iterations)
self.empty_tensor_lists.append(tensor_list)
# Push the intermediate tensor to the tensor list. This captures
# `tensor_list`.
with self._forward_graph.as_default():
accumulator = list_ops.tensor_list_push_back(tensor_list, tensor)
# Add the modified tensor list to the list of outputs. This output will be
# all the accumulated values.
self._forward_graph.outputs.append(accumulator)
# Capture in the cond graph as well so the forward cond and body inputs
# match.
with self._forward_cond_graph.as_default():
self._forward_cond_graph.capture(tensor_list)
# Capture the accumulator tensor list in the gradient graph directly from
# the forward graph -- we'll later modify this to capture the final list
# output by the forward While op instead.
captured_accumulator = super(_WhileBodyGradFuncGraph, self)._capture_helper(
accumulator, name)
# Pop the intermediate value from the tensor list in the gradient graph.
new_tensor_list, captured_tensor = list_ops.tensor_list_pop_back(
captured_accumulator, element_dtype=tensor.dtype)
self._indirect_captures[tensor] = captured_tensor
self.popped_tensor_lists[captured_accumulator] = new_tensor_list
return captured_tensor
def _check_shapes_compat(output_tensors, shape_invariants, input_tensors):
for (t, shape, input_t) in zip(output_tensors, shape_invariants,
input_tensors):
if not control_flow_ops._ShapeLessThanOrEqual(t.shape, shape):
raise ValueError(
"Input tensor '%s' enters the loop with shape %s, but has "
"shape %s after one iteration. To allow the shape to vary across "
"iterations, use the `shape_invariants` argument of tf.while_loop to "
"specify a less-specific shape." % (input_t.name, shape, t.shape))
def _check_num_inputs_outputs(cond_graph, body_graph, num_flattened_loop_vars):
"""Checks the number of inputs/outputs of `cond_graph` and `body_graph`."""
assert len(cond_graph.inputs) == num_flattened_loop_vars, (
"cond_graph takes %d inputs; Expected: %d" % (len(cond_graph.inputs),
num_flattened_loop_vars))
assert len(cond_graph.outputs) == 1, (
"cond_graph has %d outputs; Expected: 1" % len(cond_graph.outputs))
assert len(body_graph.inputs) == num_flattened_loop_vars, (
"body_graph takes %d inputs; Expected: %d" % (len(cond_graph.inputs),
num_flattened_loop_vars))
assert len(body_graph.outputs) == num_flattened_loop_vars, (
"body_graph has %d outputs; Expected: %d" % (len(body_graph.outputs),
num_flattened_loop_vars))
def _copy_handle_data(src_tensors, tgt_tensors):
for src_t, tgt_t in zip(src_tensors, tgt_tensors):
custom_gradient.copy_handle_data(src_t, tgt_t)
def _maybe_set_maximum_iterations_attr(op, maximum_iterations):
if control_flow_util.IsInXLAContext(op):
# Store the maximum_iterations to use in the gradient pass.
op._set_attr( # pylint: disable=protected-access
"_maximum_iterations",
attr_value_pb2.AttrValue(
i=tensor_util.constant_value(maximum_iterations)))
# TODO(srbs): This method should be in control_flow_util but that introduces
# a circular dependency ops -> control_flow_util -> ops.
def _is_in_xla_context():
"""Returns whether the current context is inside an XLA context."""
outer_graph = ops.get_default_graph()
# The `_control_flow_context` is not copied when building a FuncGraph so
# we look it up from the base graph.
while isinstance(outer_graph, func_graph_module.FuncGraph):
outer_graph = outer_graph.outer_graph
cur_ctxt = outer_graph._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(cur_ctxt) is not None
def _graph_name(graph):
if isinstance(graph, func_graph_module.FuncGraph):
return graph.name
return "Base"
def _pack_sequence_as(structure_with_tas, loop_vars):
"""Like `nest.pack_sequence_as` but also replaces flows with TensorArrays."""
def flow_to_tensor_array(flow, ta): # pylint: disable=missing-docstring
if isinstance(ta, tensor_array_ops.TensorArray):
# pylint: disable=protected-access
new_ta = tensor_array_ops.TensorArray(
dtype=ta.dtype,
handle=ta.handle,
flow=flow,
infer_shape=ta._infer_shape,
colocate_with_first_write_call=ta._colocate_with_first_write_call)
new_ta._colocate_with = ta._colocate_with
new_ta._element_shape = ta._element_shape
# pylint: enable=protected-access
return new_ta
return flow
flattened_loop_vars = [
flow_to_tensor_array(*z)
for z in zip(nest.flatten(loop_vars), nest.flatten(structure_with_tas))
]
return nest.pack_sequence_as(structure_with_tas, flattened_loop_vars)
def _tensor_array_to_flow(loop_vars):
def f(maybe_ta):
if isinstance(maybe_ta, tensor_array_ops.TensorArray):
return maybe_ta.flow
return maybe_ta
return nest.map_structure(f, loop_vars)
def _build_signature(loop_vars, shape_invariants):
return nest.pack_sequence_as(loop_vars, [
tensor_spec.TensorSpec(s, t.dtype, name=t.op.name)
for s, t in zip(nest.flatten(shape_invariants), nest.flatten(loop_vars))
])
# pylint: enable=protected-access
|
Bismarrck/tensorflow
|
tensorflow/python/ops/while_v2.py
|
Python
|
apache-2.0
| 38,134 | 0.007159 |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404
from django import forms
from django.db.models import Max, Avg, Sum
from opos.models import Customers
from opos.forms import CustomerAddForm, CustomerForm
def is_staff (user):
if user.is_staff or user.is_superuser:
return True
else:
return False
@user_passes_test (is_staff)
def dashboard (request):
c = {}
c['curdebt'] = Customers.objects.all().aggregate(Sum('curdebt'))['curdebt__sum']
c['maxdebt'] = Customers.objects.all().aggregate(Sum('maxdebt'))['maxdebt__sum']
c['highestcurdebt'] = Customers.objects.all().aggregate(Max('curdebt'))['curdebt__max']
from opos.sql import get_total_sale
c['totalsale'] = get_total_sale ()[0]
return render (request, "dashboard.html", c)
@user_passes_test (is_staff)
def customersales(request, customerpk):
from opos.sql import get_customer_ticketlines
customer = get_object_or_404 (Customers, pk=customerpk)
ticketlines = get_customer_ticketlines (customer.pk)
c = {}
c['customer'] = customer
c['ticketlines'] = ticketlines
return render (request, "customer-sales.html", c)
@user_passes_test (is_staff)
def customers (request):
customers = Customers.objects.all ()
c = {}
c['customers'] = customers
return render (request, "customers.html", c)
@user_passes_test (is_staff)
def customeradd (request):
if request.method == 'POST':
form = CustomerAddForm (request.POST)
if form.is_valid ():
form.save ()
return redirect ('customers')
c = {}
c['customeredit'] = CustomerAddForm ()
return render (request, "customer-add.html", c)
@user_passes_test (is_staff)
def customeredit (request, customerpk):
customer = get_object_or_404 (Customers, pk=customerpk)
if request.method == 'POST':
form = CustomerForm (request.POST, instance=customer)
if form.is_valid ():
form.save ()
return redirect ('customers')
else:
form = CustomerForm (instance=customer)
c = {}
c['customer'] = customer
form.fields['id'] = forms.CharField (widget=forms.widgets.HiddenInput())
c['customeredit'] = form
return render (request, "customer-edit.html", c)
def selfdebtcheck (request):
c = {}
if request.method == 'POST':
card = 'c' + request.POST.get("card")
try:
customer = Customers.objects.get (card=card)
except:
return render (request, "self-debtcheck.html", c)
customer = get_object_or_404 (Customers, card=card)
c['customer'] = customer
c['leftdebt'] = customer.maxdebt - customer.curdebt
return render (request, "self-debtshow.html", c)
else:
return render (request, "self-debtcheck.html", c)
|
kradalby/O2
|
opos/views.py
|
Python
|
mit
| 2,687 | 0.04131 |
from ajenti.api import *
from ajenti.com import *
class DebianNetworkCfg(Plugin):
implements(IConfigurable)
name = 'Network'
id = 'network'
platform = ['Debian', 'Ubuntu']
def list_files(self):
dir = '/etc/network/'
return [dir+'*', dir+'*/*', dir+'*/*/*']
|
DVSBA/ajenti
|
plugins/network/recovery.py
|
Python
|
lgpl-3.0
| 305 | 0.006557 |
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
# First, we will create lists of dictionaries containing the pages
# we want to add into each category.
# Then we will create a dictionary of dictionaries for our categories.
# This might seem a little bit confusing, but it allows us to iterate
# through each data structure, and add the data to our models.
python_pages = [
{"title": "Official Python Tutorial",
"url": "http://docs.python.org/2/tutorial/",
"views": 32},
{"title": "How to Think like a Computer Scientist",
"url": "http://www.greenteapress.com/thinkpython/",
"views": 16},
{"title": "Learn Python in 10 Minutes",
"url": "http://www.korokithakis.net/tutorials/python/",
"views": 8}]
django_pages = [
{"title": "Official Django Tutorial",
"url": "https://docs.djangoproject.com/en/1.9/intro/tutorial01/",
"views": 32},
{"title": "Django Rocks",
"url": "http://www.djangorocks.com/",
"views": 16},
{"title": "How to Tango with Django",
"url":"http://www.tangowithdjango.com/",
"views": 8}]
other_pages = [
{"title": "Bottle",
"url": "http://bottlepy.org/docs/dev/",
"views": 32},
{"title": "Flask",
"url": "http://flask.pocoo.org",
"views": 16} ]
cats = {"Python": {"pages": python_pages, "views":128, "likes":64},
"Django": {"pages": django_pages, "views":64, "likes":32},
"Other Frameworks": {"pages": other_pages, "views":32, "likes":16},
"Python User Group": {"pages": [], "views": 34, "likes": 16},
"Pascal": {"pages": [], "views": 32, "likes": 16},
"Perl": {"pages": [], "views": 32, "likes": 16},
"Php": {"pages": [], "views": 32, "likes": 16},
"Prolog": {"pages": [], "views": 32, "likes": 16},
"Programming": {"pages": [], "views": 32, "likes": 16}
}
# The code below goes through the cats dictionary, then adds each category,
# and then adds all the associated pages for that category.
# if you are using Python 2.x then use cats.iteritems() see
# http://docs.quantifiedcode.com/python-anti-patterns/readability/
# for more information about how to iterate over a dictionary properly.
for cat, cat_data in cats.items():
c = add_cat(cat, cat_data["views"], cat_data["likes"])
for p in cat_data["pages"]:
add_page(c, p["title"], p["url"],p["views"])
# print out the categories we have added
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print("- {0} -{1}".format(str(c),str(p)))
def add_cat(name, views, likes):
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
def add_page(cat, title, url, views=0):
p = Page.objects.get_or_create(category=cat, title=title)[0]
p.url = url
p.views = views
p.save()
return p
# Start execution here!
if __name__ == '__main__':
print("Starting Rango population script...")
populate()
|
cclai999/rango
|
tango_with_django_project/populate_rango.py
|
Python
|
mit
| 3,340 | 0.00479 |
# -*- coding: utf-8 -*-
import codecs
all_tables = {
'ktgg': ['main'],
'zgcpwsw': ['title', 'casecode'],
#
# # 'ktgg',
# # 'cdfy_sfgk',
# # 'newktgg',
# # 'zyktgg',
# # 'zgcpwsw',
# # 'itslaw',
# # 'qyxg_zgcpwsw',
# # 'qyxg_wscpws',
#
'zhixing': ['pname', 'case_code'],
'dishonesty': ['pname', 'case_code', 'exe_code'],
'recruit': ['pubdate_doublet', 'company_name', 'job_functions', 'source'],
'xgxx_shangbiao': ['applicant_name', 'application_no'],
'shgy_zhaobjg': ['title'],
'shgy_zhongbjg': ['title'],
'rmfygg': ['notice_content', 'notice_time', 'notice_type'],
'overseas_investment': ['certificate_no'],
'qyxx_wanfang_zhuanli': ['application_code'],
'tddy': ['landno', 'land_location', 'mortgage_right_name'],
'tdzr': ['land_location', 'landno', 'original_usename'],
'dcos': ['company_name', 'certificate_num'],
'qyxx_enterpriseQualificationForeign': ['company_name', 'certificate_no', 'issue_date'],
'qyxx_gcjljz': ['company_name', 'certificate_no'],
'qyxx_jzsgxkz': ['company_name', 'certificate_no'],
'qyxx_miit_jlzzdwmd': ['company_name', 'certificate_no'],
'qyxx_food_prod_cert': ['company_name', 'certificate_no'],
'qyxx_haiguanzongshu': ['company_name', 'customs_code'],
'qyxx_gmpauth_prod_cert': ['company_name', 'certificate_no'],
'qyxx_hzp_pro_prod_cert': ['company_name', 'certificate_no'],
'qyxx_medi_jy_prod_cert': ['company_name', 'certificate_no'],
'qyxx_medi_pro_prod_cert': ['company_name', 'certificate_no'],
'qyxx_industrial_production_permit': ['company_name', 'certificate_no'],
'qyxx_nyscqyzzcx': ['company_name', 'validdate'],
'qyxx_tk': ['company_name', 'certificate_no'],
'qyxx_ck': ['company_name', 'certificate_no'],
'xzcf': ['name', 'public_date', 'punish_code'],
'rjzzq': ['copyright_nationality', 'regnum', 'regdate'],
'qyxx_finance_xkz': ['company_name', 'issue_date', 'id_serial_num'],
'qylogo': ['company_full_name'],
'ssgs_zjzx': ['_id'],
'simutong': ['financing_side', 'invest_side', 'invest_time'],
'tddkgs': ['title', 'main'],
'shgy_tdcr': ['project_name', 'project_location', 'electron_supervise'],
'qyxx_zhuanli': ['application_code', 'reg_effect_date'],
'zhuanli_zhuanyi': ['application_code', 'reg_effect_date'],
'zpzzq': ['copyright_owner', 'regnum'],
'zuzhijigoudm': ['jgdm', 'jgmc'],
# 'sfpm_taobao':['title','auctioneer','disposal_unit'],
# 'domain_name_website_info':['organizer_name','site_certificate_no','domain_name']
}
tablename='SIMUTONG'
if __name__ == '__main__':
# fok = codecs.open(tablename+'_mysql', 'r', encoding='utf-8')
# fdup = codecs.open(tablename+'_dup', 'r', encoding='utf-8')
#
# foks=fok.read()
# for i in fdup.readlines():
# if i.strip() not in foks:
# print i
# break
#
# fdup.seek(0)
# all_list=[]
#
# for i in fdup.readlines():
# all_list.append(i.strip())
# print len(all_list)
# print len(set(all_list))
a=1
b=0
try:
a/b
except Exception as e:
print str(e)
|
mefly2012/platform
|
test/test.py
|
Python
|
apache-2.0
| 3,173 | 0.001891 |
#
# Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Converters between SCIM JSON representation and Keystone"""
import functools
ROLE_SEP = '#'
_EXT_SCHEMA = 'urn:scim:schemas:extension:keystone:%s'
DEFAULT_VERSION = '1.0'
def get_schema(BASE_SCHEMA, path):
if 'v2' in path:
version = '2.0'
else:
version = '1.0'
return BASE_SCHEMA % version
def _remove_dict_nones(f):
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
return dict(filter(lambda x: x[1], res.items()))
return wrapper
@_remove_dict_nones
def user_key2scim(ref, path, schema=True):
ref = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)] if schema
else None,
'id': ref.get('id', None),
'userName': ref.get('name', None),
'displayName': ref.get('description', None),
'active': ref.get('enabled', None),
'emails': [{'value': ref['email']}] if 'email' in ref else None,
get_schema(_EXT_SCHEMA, path): {
'domain_id': ref.get('domain_id', None)
}
}
return ref
def listusers_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(user_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def user_scim2key(scim, path):
return {
'domain_id': scim.get(get_schema(_EXT_SCHEMA, path), {})
.get('domain_id', None),
'email': scim.get('emails', [{}])[0].get('value', None),
'id': scim.get('id', None),
'enabled': scim.get('active', None),
'name': scim.get('userName', None),
'description': scim.get('displayName', None),
'password': scim.get('password', None)
}
@_remove_dict_nones
def role_scim2key(scim):
keystone = {}
keystone['id'] = scim.get('id', None)
if scim.get('domain_id', None):
keystone['name'] = '%s%s%s' % (
scim.get('domain_id'), ROLE_SEP, scim.get('name', None))
else:
keystone['name'] = scim.get('name', None)
return keystone
@_remove_dict_nones
def role_key2scim(ref, path=DEFAULT_VERSION, schema=True):
scim = {
'schemas': [get_schema(_EXT_SCHEMA, path)] if schema else None,
'id': ref.get('id', None)
}
dom_name = ref.get('name', '')
if dom_name.find(ROLE_SEP) > -1:
(domain, name) = dom_name.split(ROLE_SEP, 1)
else:
(domain, name) = (None, dom_name)
scim['name'] = name
scim['domain_id'] = domain
return scim
def listroles_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(role_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def group_scim2key(scim, path):
return {
'domain_id': scim.get(get_schema(_EXT_SCHEMA, path), {})
.get('domain_id', None),
'id': scim.get('id', None),
'name': scim.get('displayName', None)
}
@_remove_dict_nones
def group_key2scim(ref, path, schema=True):
return {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)] if schema
else None,
'id': ref.get('id', None),
'displayName': ref.get('name', None),
get_schema(_EXT_SCHEMA, path): {
'domain_id': ref.get('domain_id', None)
}
}
def listgroups_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(group_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def organization_key2scim(ref, path, schema=True):
return {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)] if schema
else None,
'id': ref.get('id', None),
'name': ref.get('name', None),
'description': ref.get('description', None),
'active': ref.get('enabled', None),
'is_default': ref.get('is_default', None),
get_schema(_EXT_SCHEMA, path): {
'domain_id': ref.get('domain_id', None)
}
}
def listorganizations_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(organization_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def organization_scim2key(scim, path):
return {
'domain_id': scim.get(get_schema(_EXT_SCHEMA, path), {})
.get('domain_id', None),
'id': scim.get('id', None),
'enabled': scim.get('active', None),
'name': scim.get('name', None),
'description': scim.get('description', None),
'is_default': scim.get('is_default', None)
}
|
ging/fiware-keystone-scim
|
keystone_scim/contrib/scim/converter.py
|
Python
|
apache-2.0
| 6,230 | 0.000482 |
"""
Evaluates the estimated results of the Segmentation dataset against the
ground truth (human annotated data).
"""
from joblib import Parallel, delayed
import logging
import mir_eval
import numpy as np
import os
import pandas as pd
import six
import sys
# Local stuff
import msaf
import msaf.input_output as io
import msaf.algorithms as algorithms
from msaf import jams2
from msaf import utils
def print_results(results):
"""Print all the results.
Parameters
----------
results: pd.DataFrame
Dataframe with all the results
"""
res = results.mean()
logging.info("Results:\n%s" % res)
def compute_results(ann_inter, est_inter, ann_labels, est_labels, bins,
est_file):
"""Compute the results using all the available evaluations.
Return
------
results : dict
Contains the results of all the evaluations for the given file.
Keys are the following:
track_name : Name of the track
ds_name : Name of the data set
HitRate_3F : F-measure of hit rate at 3 seconds
HitRate_3P : Precision of hit rate at 3 seconds
HitRate_3R : Recall of hit rate at 3 seconds
HitRate_0.5F : F-measure of hit rate at 0.5 seconds
HitRate_0.5P : Precision of hit rate at 0.5 seconds
HitRate_0.5R : Recall of hit rate at 0.5 seconds
HitRate_t3F : F-measure of hit rate at 3 seconds (trimmed)
HitRate_t3P : Precision of hit rate at 3 seconds (trimmed)
HitRate_t3F : Recall of hit rate at 3 seconds (trimmed)
HitRate_t0.5F : F-measure of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5P : Precision of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5R : Recall of hit rate at 0.5 seconds (trimmed)
DevA2E : Median deviation of annotation to estimation
DevE2A : Median deviation of estimation to annotation
D : Information gain
PWF : F-measure of pair-wise frame clustering
PWP : Precision of pair-wise frame clustering
PWR : Recall of pair-wise frame clustering
Sf : F-measure normalized entropy score
So : Oversegmentation normalized entropy score
Su : Undersegmentation normalized entropy score
"""
logging.info("Evaluating %s" % os.path.basename(est_file))
res = {}
### Boundaries ###
# Hit Rate
res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False)
res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False)
res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True)
res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True)
# Information gain
res["D"] = compute_information_gain(ann_inter, est_inter, est_file,
bins=bins)
# Median Deviations
res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=False)
res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=True)
### Labels ###
if est_labels is not None and len(est_labels) != 0:
try:
# Align labels with intervals
ann_labels = list(ann_labels)
est_labels = list(est_labels)
ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter,
ann_labels)
est_inter, est_labels = mir_eval.util.adjust_intervals(
est_inter, est_labels, t_min=0, t_max=ann_inter.max())
# Pair-wise frame clustering
res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise(
ann_inter, ann_labels, est_inter, est_labels)
# Normalized Conditional Entropies
res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce(
ann_inter, ann_labels, est_inter, est_labels)
except:
logging.warning("Labeling evaluation failed in file: %s" %
est_file)
return {}
# Names
base = os.path.basename(est_file)
res["track_id"] = base[:-5]
res["ds_name"] = base.split("_")[0]
return res
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
# Get the ds_prefix
ds_prefix = os.path.basename(est_file).split("_")[0]
# Get context
if ds_prefix in msaf.prefix_dict.keys():
context = msaf.prefix_dict[ds_prefix]
else:
context = "function"
try:
# TODO: Read hierarchical annotations
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(ref_file, annotation_id=0,
exclude_levels=["function"])
else:
ref_inter, ref_labels = jams2.converters.load_jams_range(
ref_file, "sections", annotator=annotator_id, context=context)
except:
logging.warning("No references for file: %s" % ref_file)
return {}
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
if len(est_inter) == 0:
logging.warning("No estimations for file: %s" % est_file)
return {}
# Compute the results and return
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times)
# Build trees
ref_tree = mir_eval.segment.tree.SegmentTree(ref_times, ref_labels,
ref_levels)
est_tree = mir_eval.segment.tree.SegmentTree(est_times, est_labels)
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.segment.hmeasure(ref_tree, est_tree, window=100)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.segment.hmeasure(ref_tree, est_tree, window=150)
res["t_recall30"], res["t_precision30"], res["t_measure30"] = \
mir_eval.segment.hmeasure(ref_tree, est_tree, window=300)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file)
def compute_information_gain(ann_inter, est_inter, est_file, bins):
"""Computes the information gain of the est_file from the annotated
intervals and the estimated intervals."""
ann_times = utils.intervals_to_times(ann_inter)
est_times = utils.intervals_to_times(est_inter)
try:
D = mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
except:
logging.warning("Couldn't compute the Information Gain for file "
"%s" % est_file)
D = 0
return D
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0):
"""Processes a single track.
Parameters
----------
file_struct : object (FileStruct) or str
File struct or full path of the audio file to be evaluated.
boundaries_id : str
Identifier of the boundaries algorithm.
labels_id : str
Identifier of the labels algorithm.
config : dict
Configuration of the algorithms to be evaluated.
annotator_id : int
Number identifiying the annotator.
Returns
-------
one_res : dict
Dictionary of the results (see function compute_results).
"""
# Convert to file_struct if string is passed
if isinstance(file_struct, six.string_types):
file_struct = io.FileStruct(file_struct)
est_file = file_struct.est_file
ref_file = file_struct.ref_file
# Sanity check
assert os.path.basename(est_file)[:-4] == \
os.path.basename(ref_file)[:-4], "File names are different %s --- %s" \
% (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4])
try:
one_res = compute_gt_results(est_file, ref_file, boundaries_id,
labels_id, config,
annotator_id=annotator_id)
except:
logging.warning("Could not compute evaluations for %s. Error: %s" %
(est_file, sys.exc_info()[1]))
one_res = []
return one_res
def get_results_file_name(boundaries_id, labels_id, config, ds_name,
annotator_id):
"""Based on the config and the dataset, get the file name to store the
results."""
if ds_name == "*":
ds_name = "All"
utils.ensure_dir(msaf.results_dir)
file_name = os.path.join(msaf.results_dir, "results_%s" % ds_name)
file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id)
file_name += "_annotatorE%d" % (annotator_id)
sorted_keys = sorted(config.keys(), key=str.lower)
for key in sorted_keys:
file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_"))
# Check for max file length
if len(file_name) > 255 - len(msaf.results_ext):
file_name = file_name[:255 - len(msaf.results_ext)]
return file_name + msaf.results_ext
def process(in_path, boundaries_id=msaf.DEFAULT_BOUND_ID,
labels_id=msaf.DEFAULT_LABEL_ID, ds_name="*", annot_beats=False,
framesync=False, feature="hpcp", hier=False, save=False,
n_jobs=4, annotator_id=0, config=None):
"""Main process.
Parameters
----------
in_path : str
Path to the dataset root folder.
boundaries_id : str
Boundaries algorithm identifier (e.g. siplca, cnmf)
labels_id : str
Labels algorithm identifier (e.g. siplca, cnmf)
ds_name : str
Name of the dataset to be evaluated (e.g. SALAMI). * stands for all.
annot_beats : boolean
Whether to use the annotated beats or not.
framesync: str
Whether to use framesync features or not (default: False -> beatsync)
feature: str
String representing the feature to be used (e.g. hpcp, mfcc, tonnetz)
hier : bool
Whether to compute a hierarchical or flat segmentation.
save: boolean
Whether to save the results into the SQLite database.
n_jobs: int
Number of processes to run in parallel. Only available in collection
mode.
annotator_id : int
Number identifiying the annotator.
config: dict
Dictionary containing custom configuration parameters for the
algorithms. If None, the default parameters are used.
Return
------
results : pd.DataFrame
DataFrame containing the evaluations for each file.
"""
# Set up configuration based on algorithms parameters
if config is None:
config = io.get_configuration(feature, annot_beats, framesync,
boundaries_id, labels_id)
# Hierarchical segmentation
config["hier"] = hier
# Remove actual features
config.pop("features", None)
# Sanity check for hierarchical evaluation
if hier:
try:
from mir_eval.segment import tree
except:
logging.error("An experimental mir_eval version is needed to "
"evaluate hierarchical segments. Please, download it"
" from: https://github.com/urinieto/mir_eval")
return []
# Get out file in case we want to save results
out_file = get_results_file_name(boundaries_id, labels_id, config, ds_name,
annotator_id)
# All evaluations
results = pd.DataFrame()
if os.path.isfile(in_path):
# Single File mode
evals = [process_track(in_path, boundaries_id, labels_id, config,
annotator_id=annotator_id)]
else:
# Collection mode
# If out_file already exists, do not compute new results
if os.path.exists(out_file):
logging.info("Results already exists, reading from file %s" %
out_file)
results = pd.read_csv(out_file)
print_results(results)
return results
# Get files
file_structs = io.get_dataset_files(in_path, ds_name)
logging.info("Evaluating %d tracks..." % len(file_structs))
# Evaluate in parallel
evals = Parallel(n_jobs=n_jobs)(delayed(process_track)(
file_struct, boundaries_id, labels_id, config,
annotator_id=annotator_id) for file_struct in file_structs[:])
# Aggregate evaluations in pandas format
for e in evals:
if e != []:
results = results.append(e, ignore_index=True)
logging.info("%d tracks analyzed" % len(results))
# Print results
print_results(results)
# Save all results
if save:
logging.info("Writing results in %s" % out_file)
results.to_csv(out_file)
return results
|
guiquanz/msaf
|
msaf/eval.py
|
Python
|
mit
| 14,384 | 0.000834 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from telemetry.core import util
from telemetry.results import buildbot_output_formatter
from telemetry.results import chart_json_output_formatter
from telemetry.results import csv_output_formatter
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import gtest_progress_reporter
from telemetry.results import html_output_formatter
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results import progress_reporter
# Allowed output formats. The default is the first item in the list.
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'csv', 'gtest', 'json',
'chartjson', 'csv-pivot-table', 'none')
# Filenames to use for given output formats.
_OUTPUT_FILENAME_LOOKUP = {
'html': 'results.html',
'csv': 'results.csv',
'json': 'results.json',
'chartjson': 'results-chart.json',
'csv-pivot-table': 'results-pivot-table.csv'
}
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--chartjson', action='store_true',
help='Output Chart JSON. Ignores --output-format.')
group.add_option('--output-format', action='append', dest='output_formats',
choices=_OUTPUT_FORMAT_CHOICES, default=[],
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
default=None,
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-dir', default=util.GetBaseDir(),
help='Where to save output data after the run.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace. Use '
'with html, buildbot, csv-pivot-table output formats.')
group.add_option('--reset-results', action='store_true',
help='Delete all stored results.')
group.add_option('--upload-results', action='store_true',
help='Upload the results to cloud storage.')
group.add_option('--upload-bucket', default='internal',
choices=['public', 'partner', 'internal'],
help='Storage bucket to use for the uploaded results. '
'Defaults to internal. Supported values are: '
'public, partner, internal')
group.add_option('--results-label',
default=None,
help='Optional label to use for the results of a run .')
group.add_option('--suppress_gtest_report',
default=False,
help='Whether to suppress GTest progress report.')
parser.add_option_group(group)
def ProcessCommandLineArgs(parser, args):
# TODO(ariblue): Delete this flag entirely at some future data, when the
# existence of such a flag has been long forgotten.
if args.output_file:
parser.error('This flag is deprecated. Please use --output-dir instead.')
try:
os.makedirs(args.output_dir)
except OSError:
# Do nothing if the output directory already exists. Existing files will
# get overwritten.
pass
args.output_dir = os.path.expanduser(args.output_dir)
def _GetOutputStream(output_format, output_dir):
assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
assert output_format not in ('gtest', 'none'), (
'Cannot set stream for \'gtest\' or \'none\' output formats.')
if output_format == 'buildbot':
return sys.stdout
assert output_format in _OUTPUT_FILENAME_LOOKUP, (
'No known filename for the \'%s\' output format' % output_format)
output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
open(output_file, 'a').close() # Create file if it doesn't exist.
return open(output_file, 'r+')
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
if suppress_gtest_report:
return progress_reporter.ProgressReporter()
return gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
def CreateResults(benchmark_metadata, options,
value_can_be_added_predicate=lambda v: True):
"""
Args:
options: Contains the options specified in AddResultsOptions.
"""
if not options.output_formats:
options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]
output_formatters = []
for output_format in options.output_formats:
if output_format == 'none' or output_format == "gtest" or options.chartjson:
continue
output_stream = _GetOutputStream(output_format, options.output_dir)
if output_format == 'csv':
output_formatters.append(csv_output_formatter.CsvOutputFormatter(
output_stream))
elif output_format == 'csv-pivot-table':
output_formatters.append(
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'buildbot':
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'html':
# TODO(chrishenry): We show buildbot output so that users can grep
# through the results easily without needing to open the html
# file. Another option for this is to output the results directly
# in gtest-style results (via some sort of progress reporter),
# as we plan to enable gtest-style output for all output formatters.
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, benchmark_metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream, benchmark_metadata))
elif output_format == 'chartjson':
output_formatters.append(
chart_json_output_formatter.ChartJsonOutputFormatter(
output_stream, benchmark_metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
# TODO(chrishenry): This is here to not change the output of
# gtest. Let's try enabling skipped tests summary for gtest test
# results too (in a separate patch), and see if we break anything.
output_skipped_tests_summary = 'gtest' in options.output_formats
reporter = _GetProgressReporter(output_skipped_tests_summary,
options.suppress_gtest_report)
return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter,
output_dir=options.output_dir,
value_can_be_added_predicate=value_can_be_added_predicate)
|
hefen1/chromium
|
tools/telemetry/telemetry/results/results_options.py
|
Python
|
bsd-3-clause
| 7,550 | 0.009669 |
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER FEC CMTE ID', 'number': '2'},
{'name': 'COMMITTEE NAME', 'number': '3'},
{'name': 'STREET 1', 'number': '4'},
{'name': 'STREET 2', 'number': '5'},
{'name': 'CITY', 'number': '6'},
{'name': 'STATE', 'number': '7'},
{'name': 'ZIP', 'number': '8'},
{'name': 'ORGANIZATION TYPE', 'number': '9'},
{'name': 'RPTCODE', 'number': '10'},
{'name': 'OF ELECTION', 'number': '11-'},
{'name': 'STATE (OF ELECTION)', 'number': '12'},
{'name': 'COVERAGE FROM', 'number': '13-'},
{'name': 'COVERAGE TO', 'number': '14-'},
{'name': 'TOTAL COSTS', 'number': '15'},
{'name': 'FILER', 'number': '16-'},
{'name': 'SIGNED', 'number': '17-'},
{'name': 'TITLE', 'number': '18'},
]
self.fields_names = self.hash_names(self.fields)
|
h4ck3rm1k3/FEC-Field-Documentation
|
fec/version/v2/F7.py
|
Python
|
unlicense
| 1,133 | 0.001765 |
"""Test energy_profiler module."""
import unittest
from physalia.energy_profiler import AndroidUseCase
# pylint: disable=missing-docstring
class TestEnergyProfiler(unittest.TestCase):
def test_empty_android_use_case(self):
# pylint: disable=no-self-use
use_case = AndroidUseCase(
name="Test",
app_apk="no/path",
app_pkg="no.package",
app_version="0.0.0",
run=None,
prepare=None,
cleanup=None
)
use_case.run()
|
TQRG/physalia
|
physalia/tests/test_energy_profiler.py
|
Python
|
mit
| 534 | 0.001873 |
"""
Setup file for led-controller
author: Luis Garcia Rodriguez 2017
Licence: GPLv3
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='led-controller',
version='2.0.0',
description='A simple interface for controlling LEDs in circadian experiments',
# The project's main homepage.
url='https://github.com/polygonaltree/Led-control',
# Author details
author='Luis Garcia Rodriguez',
author_email='luis.garcia@uni-muenster.de',
license='GPLv3',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"]
install_requires=['pyside2'],
entry_points={
'console_scripts': [
'led-controller=gui:main',
],
},
)
|
PolygonalTree/Led-control
|
led-control/setup.py
|
Python
|
agpl-3.0
| 909 | 0.0011 |
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.http import HttpResponse, HttpRequest
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from ....cart.app import cart_app
from ....cart.models import CART_SESSION_KEY
from ....cart.tests import TestCart
from ....contrib.delivery.simplepost.models import PostShippingType
from ....order import handler as order_handler
from ....order.models import Order
from ....payment import ConfirmationFormNeeded
from ....payment.tests import TestPaymentProvider
from ....product.tests import DeadParrot
from ..common.decorators import require_order
from ..common.views import prepare_order, reactivate_order
from . import views
urlpatterns = patterns('',
url(r'^cart/', include(cart_app.urls)),
url(r'^checkout/', include('satchless.contrib.checkout.multistep.urls')),
url(r'^order/', include('satchless.order.urls')),
)
class TestPaymentProviderWithConfirmation(TestPaymentProvider):
def confirm(self, order):
raise ConfirmationFormNeeded(action='http://test.payment.gateway.example.com')
class CheckoutTest(TestCase):
urls = 'satchless.contrib.checkout.multistep.tests'
def _setup_settings(self, custom_settings):
original_settings = {}
for setting_name, value in custom_settings.items():
if hasattr(settings, setting_name):
original_settings[setting_name] = getattr(settings, setting_name)
setattr(settings, setting_name, value)
return original_settings
def _teardown_settings(self, original_settings, custom_settings=None):
custom_settings = custom_settings or {}
for setting_name, value in custom_settings.items():
if setting_name in original_settings:
setattr(settings, setting_name, value)
else:
delattr(settings, setting_name)
def setUp(self):
self.macaw = DeadParrot.objects.create(slug='macaw',
species="Hyacinth Macaw")
self.cockatoo = DeadParrot.objects.create(slug='cockatoo',
species="White Cockatoo")
self.macaw_blue = self.macaw.variants.create(color='blue', looks_alive=False)
self.macaw_blue_fake = self.macaw.variants.create(color='blue', looks_alive=True)
self.cockatoo_white_a = self.cockatoo.variants.create(color='white', looks_alive=True)
self.cockatoo_white_d = self.cockatoo.variants.create(color='white', looks_alive=False)
self.cockatoo_blue_a = self.cockatoo.variants.create(color='blue', looks_alive=True)
self.cockatoo_blue_d = self.cockatoo.variants.create(color='blue', looks_alive=False)
self.custom_settings = {
'SATCHLESS_DELIVERY_PROVIDERS': ['satchless.contrib.delivery.simplepost.PostDeliveryProvider'],
'SATCHLESS_ORDER_PARTITIONERS': ['satchless.contrib.order.partitioner.simple'],
'SATCHLESS_PAYMENT_PROVIDERS': [TestPaymentProviderWithConfirmation],
'SATCHLESS_DJANGO_PAYMENT_TYPES': ['dummy'],
'PAYMENT_VARIANTS': {'dummy': ('payments.dummy.DummyProvider', {'url': '/', })},
}
self.original_settings = self._setup_settings(self.custom_settings)
order_handler.init_queues()
self.anon_client = Client()
PostShippingType.objects.create(price=12, typ='polecony', name='list polecony')
PostShippingType.objects.create(price=20, typ='list', name='List zwykly')
def tearDown(self):
self._teardown_settings(self.original_settings, self.custom_settings)
order_handler.init_queues()
def _test_status(self, url, method='get', *args, **kwargs):
status_code = kwargs.pop('status_code', 200)
client = kwargs.pop('client_instance', Client())
data = kwargs.pop('data', {})
response = getattr(client, method)(url, data=data, follow=False)
self.assertEqual(response.status_code, status_code,
'Incorrect status code for: %s, (%s, %s)! Expected: %s, received: %s. HTML:\n\n%s' % (
url.decode('utf-8'), args, kwargs, status_code, response.status_code,
response.content.decode('utf-8')))
return response
def _get_or_create_cart_for_client(self, client, typ='satchless_cart'):
self._test_status(reverse('satchless-cart-view'), client_instance=self.anon_client)
return TestCart.objects.get(pk=self.anon_client.session[CART_SESSION_KEY % typ], typ=typ)
def _get_order_from_session(self, session):
order_pk = session.get('satchless_order', None)
if order_pk:
return Order.objects.get(pk=order_pk)
return None
def _get_order_items(self, order):
order_items = set()
for group in order.groups.all():
order_items.update(group.items.values_list('product_variant', 'quantity'))
return order_items
def test_order_from_cart_view_creates_proper_order(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order = self._get_order_from_session(self.anon_client.session)
self.assertNotEqual(order, None)
order_items = self._get_order_items(order)
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
def test_order_is_updated_after_cart_changes(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order = self._get_order_from_session(self.anon_client.session)
order_items = self._get_order_items(order)
# compare cart and order
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
# update cart
cart.add_item(self.macaw_blue, 100)
cart.add_item(self.macaw_blue_fake, 100)
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
old_order = order
order = self._get_order_from_session(self.anon_client.session)
# order should be reused
self.assertEqual(old_order.pk, order.pk)
self.assertNotEqual(order, None)
order_items = self._get_order_items(order)
# compare cart and order
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
def test_prepare_order_creates_order_and_redirects_to_checkout_when_cart_is_not_empty(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order_pk = self.anon_client.session.get('satchless_order', None)
order = Order.objects.get(pk=order_pk)
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def test_prepare_order_redirects_to_cart_when_cart_is_empty(self):
self._get_or_create_cart_for_client(self.anon_client)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
# 'satchless_cart' is taken from multistep/urls.py:
# url(r'^prepare-order/$', prepare_order, {'typ': 'satchless_cart'}...)
self.assertRedirects(response, reverse('satchless-cart-view'))
def test_prepare_order_redirects_to_checkout_when_order_exists(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def _create_cart(self, client):
cart = self._get_or_create_cart_for_client(client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
return cart
def _create_order(self, client):
self._create_cart(client)
self._test_status(reverse(prepare_order), method='post',
client_instance=client, status_code=302)
return self._get_order_from_session(client.session)
def test_order_is_deleted_when_all_cart_items_are_deleted(self):
order = self._create_order(self.anon_client)
for cart_item in order.cart.items.all():
self.assertTrue(Order.objects.filter(pk=order.pk).exists())
order.cart.replace_item(cart_item.variant, 0)
self.assertFalse(Order.objects.filter(pk=order.pk).exists())
def test_checkout_view(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=200)
group = order.groups.get()
dtypes = order_handler.get_delivery_types(group)
dtype = dtypes[0][0]
df = response.context['delivery_formset']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
data[df.add_prefix('INITIAL_FORMS')] = '1'
data[df.add_prefix('MAX_NUM_FORMS')] = ''
data[df.add_prefix('TOTAL_FORMS')] = '1'
for form in df.forms:
data[form.add_prefix('delivery_type')] = dtype
data[form.add_prefix('id')] = group.id
self._test_status(reverse(views.checkout, kwargs={'order_token':
order.token}),
data=data, status_code=302,
client_instance=self.anon_client, method='post')
self.assertEqual(order.groups.get().delivery_type, dtype)
def test_delivery_details_view(self):
order = self._create_order(self.anon_client)
group = order.groups.get()
dtypes = order_handler.get_delivery_types(group)
group.delivery_type = dtypes[0][0]
group.save()
self._test_status(reverse(views.delivery_details,
kwargs={'order_token': order.token}),
client_instance=self.anon_client, method='get')
def test_delivery_details_view_redirects_to_checkout_when_delivery_type_is_missing(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.delivery_details,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='get')
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def test_payment_view_redirects_to_payment_choice_view_when_payment_type_is_missing(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.payment_details,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='get')
self.assertRedirects(response, reverse(views.payment_choice,
kwargs={'order_token':
order.token}))
def test_checkout_views_redirects_to_confirmation_page_when_order_has_payment_pending_status(self):
order = self._create_order(self.anon_client)
order.set_status('payment-pending')
self._test_status(reverse(views.payment_details,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='get')
def test_reactive_order_view_changes_order_status_to_checkout(self):
order = self._create_order(self.anon_client)
order.set_status('payment-failed')
self._test_status(reverse(reactivate_order,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='post')
self.assertEqual(Order.objects.get(pk=order.pk).status, 'checkout')
def test_reactive_order_view_redirects_to_checkout_for_correct_order(self):
order = self._create_order(self.anon_client)
order.set_status('payment-failed')
response = self._test_status(reverse(reactivate_order,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='post')
self.assertRedirects(response, reverse('satchless-checkout', args=(order.token,)))
def test_require_order_decorator(self):
def assertRedirects(response, path):
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], path)
def view_factory(status):
@require_order(status=status)
def view(request, order_token):
return HttpResponse()
return view
request = HttpRequest()
order = self._create_order(self.anon_client)
# decorator should not redirect if status is correct
for status, name in Order.STATUS_CHOICES:
view = view_factory(status)
order.set_status(status)
self.assertTrue(view(request, order_token=order.token).status_code, 200)
view = view_factory('non-existing-status')
order.set_status('payment-pending')
assertRedirects(view(request, order_token=order.token),
reverse('satchless-checkout-confirmation', args=(order.token,)))
order.set_status('checkout')
assertRedirects(view(request, order_token=order.token),
reverse('satchless-checkout', args=(order.token,)))
for status in ('payment-failed', 'delivery', 'payment-complete', 'cancelled'):
order.set_status(status)
assertRedirects(view(request, order_token=order.token),
reverse('satchless-order-view', args=(order.token,)))
assertRedirects(view(request, order_token='non-existing-order-token'),
reverse('satchless-cart-view'))
|
fusionbox/satchless
|
satchless/contrib/checkout/multistep/tests.py
|
Python
|
bsd-3-clause
| 16,546 | 0.00278 |
from tornado.web import RequestHandler
class BaseHandler(RequestHandler):
def initialize(self):
_settings = self.application.settings
self.db = self.application.db
#self.redis = _settings["redis"]
self.log = _settings["log"]
|
code-shoily/tornado-cljs
|
handlers/base.py
|
Python
|
mit
| 264 | 0.003788 |
# Generated by Django 3.1.4 on 2020-12-06 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0023_auto_20201202_0349'),
]
operations = [
migrations.AlterField(
model_name='review',
name='status',
field=models.CharField(choices=[('PENDING', 'Pending'), ('REQUESTED', 'Requested'), ('CANCELLED', 'Cancelled'), ('ACCEPTED', 'Accepted'), ('DECLINED', 'Declined'), ('COMPLETED', 'Completed'), ('EXTRACTING', 'Retrieval in progress'), ('EXTRACTED', 'Retrieved'), ('FAILED', 'Retrieval failed'), ('REGISTERED', 'Registered')], default='PENDING', help_text='The status of the review.', max_length=16),
),
]
|
stencila/hub
|
manager/projects/migrations/0024_auto_20201206_0819.py
|
Python
|
apache-2.0
| 746 | 0.00134 |
import random
import time
from collections import OrderedDict
from plenum.common.util import randomString
try:
import ujson as json
except ImportError:
import json
import pytest
from plenum.recorder.recorder import Recorder
TestRunningTimeLimitSec = 350
def test_add_to_recorder(recorder):
last_check_time = recorder.get_now_key()
time.sleep(1)
msg1, frm1 = 'm1', 'f1'
msg2, frm2 = 'm2', 'f2'
recorder.add_incoming(msg1, frm1)
time.sleep(3)
recorder.add_incoming(msg2, frm2)
time.sleep(2.1)
msg3, to1, to11 = 'm3', 't1', 't11'
msg4, to2 = 'm4', 't2'
recorder.add_outgoing(msg3, to1, to11)
time.sleep(.4)
recorder.add_outgoing(msg4, to2)
time.sleep(.5)
recorder.add_disconnecteds('a', 'b', 'c')
i = 0
for k, v in recorder.store.iterator(include_value=True):
assert int(k.decode()) > int(last_check_time)
if i == 0:
assert v.decode() == json.dumps([[Recorder.INCOMING_FLAG, msg1, frm1]])
if i == 1:
assert v.decode() == json.dumps([[Recorder.INCOMING_FLAG, msg2, frm2]])
assert int(k) - int(last_check_time) >= 3 * Recorder.TIME_FACTOR
if i == 2:
assert v.decode() == json.dumps([[Recorder.OUTGOING_FLAG, msg3, to1, to11]])
assert int(k) - int(last_check_time) >= 2.1 * Recorder.TIME_FACTOR
if i == 3:
assert v.decode() == json.dumps([[Recorder.OUTGOING_FLAG, msg4, to2]])
assert int(k) - int(last_check_time) >= .4 * Recorder.TIME_FACTOR
if i == 4:
assert v.decode() == json.dumps([[Recorder.DISCONN_FLAG, 'a', 'b', 'c']])
assert int(k) - int(last_check_time) >= .5 * Recorder.TIME_FACTOR
last_check_time = k.decode()
i += 1
def test_get_list_from_recorder(recorder):
msg1, frm1 = 'm1', 'f1'
msg2, frm2 = 'm2', 'f2'
msg3, to1, to11 = 'm3', 't1', 't11'
# Decrease resolution
recorder.TIME_FACTOR = 1
time.sleep(1)
recorder.add_outgoing(msg3, to1, to11)
recorder.add_incoming(msg1, frm1)
recorder.add_incoming(msg2, frm2)
recorder.add_disconnecteds('a', 'b', 'c')
for k, v in recorder.store.iterator(include_value=True):
assert v.decode() == json.dumps([
[Recorder.OUTGOING_FLAG, 'm3', 't1', 't11'],
[Recorder.INCOMING_FLAG, 'm1', 'f1'],
[Recorder.INCOMING_FLAG, 'm2', 'f2'],
[Recorder.DISCONN_FLAG, 'a', 'b', 'c']
])
def test_register_play_targets(recorder):
l1 = []
l2 = []
def add1(arg):
l1.append(arg)
def add2(arg):
l2.append(arg)
assert not recorder.replay_targets
recorder.register_replay_target('1', add1)
assert len(recorder.replay_targets) == 1
with pytest.raises(AssertionError):
recorder.register_replay_target('1', add2)
def test_recorded_parsings(recorder):
incoming = [[randomString(10), randomString(6)] for i in
range(3)]
outgoing = [[randomString(10), randomString(6)] for i in
range(5)]
for m, f in incoming:
recorder.add_incoming(m, f)
time.sleep(0.01)
for m, f in outgoing:
recorder.add_outgoing(m, f)
time.sleep(0.01)
with pytest.raises(AssertionError):
recorder.get_parsed(incoming[0], only_incoming=True, only_outgoing=True)
combined = incoming + outgoing
def sublist(lst1, lst2):
ls1 = [element for element in lst1 if element in lst2]
ls2 = [element for element in lst2 if element in lst1]
return ls1 == ls2
for k, v in recorder.store.iterator(include_value=True):
p = Recorder.get_parsed(v)
assert sublist([i[1:] for i in p] , combined)
p = Recorder.get_parsed(v, only_incoming=True)
if p:
assert sublist(p, incoming)
for i in p:
incoming.remove(i)
p = Recorder.get_parsed(v, only_outgoing=True)
if p:
assert sublist(p, outgoing)
for i in p:
outgoing.remove(i)
assert not incoming
assert not outgoing
def test_recorder_get_next_incoming_only(recorder):
incoming_count = 100
incoming = [(randomString(100), randomString(6)) for _ in
range(incoming_count)]
while incoming:
recorder.add_incoming(*incoming.pop())
time.sleep(random.choice([0, 1]) + random.random())
recorded_incomings = OrderedDict()
keys = []
for k, v in recorder.store.iterator(include_value=True):
v = Recorder.get_parsed(v)
keys.append(int(k))
recorded_incomings[int(k)] = v
assert len(recorded_incomings) == incoming_count
assert sorted(keys) == keys
max_time_to_run = incoming_count * 2 + 10
recorder.start_playing()
start = time.perf_counter()
while recorder.is_playing and (time.perf_counter() < start + max_time_to_run):
vals = recorder.get_next()
if vals:
check = recorded_incomings.popitem(last=False)[1]
assert check == vals
else:
time.sleep(0.01)
assert len(recorded_incomings) == 0
assert not recorder.is_playing
def test_recorder_get_next(recorder):
incoming_count = 100
outgoing_count = 50
incoming = [(randomString(100), randomString(6)) for _ in range(incoming_count)]
outgoing = [(randomString(100), randomString(6)) for _ in range(outgoing_count)]
while incoming or outgoing:
if random.choice([0, 1]) and outgoing:
recorder.add_outgoing(*outgoing.pop())
time.sleep(random.choice([0, 1]) + random.random())
elif incoming:
recorder.add_incoming(*incoming.pop())
time.sleep(random.choice([0, 1]) + random.random())
else:
continue
recorded_incomings = OrderedDict()
for k, v in recorder.store.iterator(include_value=True):
v = Recorder.get_parsed(v, only_incoming=True)
if v:
recorded_incomings[int(k)] = v
assert len(recorded_incomings) == incoming_count
max_time_to_run = incoming_count * 2 + 10
recorder.start_playing()
start = time.perf_counter()
while recorder.is_playing and (time.perf_counter() < start + max_time_to_run):
vals = recorder.get_next()
if vals:
inc = Recorder.filter_incoming(vals)
if inc:
assert recorded_incomings.popitem(last=False)[1] == inc
else:
time.sleep(0.01)
assert len(recorded_incomings) == 0
assert not recorder.is_playing
|
evernym/plenum
|
plenum/test/recorder/test_recorder.py
|
Python
|
apache-2.0
| 6,601 | 0.001666 |
#_*_ coding: utf-8 _*_
t1 = ()
print type(t1)
t3 = 1,2,3
print type(t3)
r1 = (1)
print r1
print type(r1)
r1 = (1,)
print r1
print type(r1)
t = (1,2,3)
print t*2
print t+('aaa','bbb')
print t
print
print t[0], t[1:3]
print len(t)
print 1 in t
print range(1,3)
t= (12345,54321,'hhh')
u = t,(1,2,3,4,5)
print u
t2 = [1,2,3]
u2 = t2,(1,2,4)
print u2
t3 = {1:'ggg',2:'hhh'}
u3 = t3,(1,2,3)
print u3
x,y,z=1,2,3
print x
print y
print z
t = 1,2,'hello'
x,y,z = t
|
JaeGyu/PythonEx_1
|
20160124_1.py
|
Python
|
mit
| 473 | 0.07611 |
# -*- coding: utf-8 -*-
from .__version__ import __version__
|
GjjvdBurg/HugoPhotoSwipe
|
hugophotoswipe/__init__.py
|
Python
|
gpl-3.0
| 62 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Animal(object):
def run(self):
print('Animal running...')
class Dog(Animal):
def run(self):
print('Dog running...')
def shout(self):
print('Dog wang wang...')
class Cat(Animal):
def run(self):
print('Cat running...')
def shout(self):
print('Cat miao miao...')
class Pig(Animal):
def run(self):
print('Pig running slowly...')
def run_twice(animal):
animal.run()
animal.run()
dog = Dog()
cat = Cat()
print(dog.run())
print(cat.run())
print(run_twice(Animal()))
print(run_twice(Dog()))
print(run_twice(Cat()))
print(run_twice(Pig()))
|
henryneu/Python
|
sample/exten.py
|
Python
|
apache-2.0
| 614 | 0.034202 |
#!/usr/bin/env python
def pos_neg(a, b, negative):
if negative:
return a < 0 and b < 0
else:
return (a < 0 and b > 0) or (a > 0 and b < 0)
if __name__ == "__main__":
# run some tests if run as script
# (from the codingbat site -- not all, I got bored)
assert pos_neg(1, -1, False) is True
assert pos_neg(-1, 1, False) is True
assert pos_neg(-4, -5, True) is True
assert pos_neg(-4, -5, False) is False
assert pos_neg(-4, -5, True) is True
assert pos_neg(-6, -6, False) is False
assert pos_neg(-2, -1, False) is False
assert pos_neg(1, 2, False) is False
assert pos_neg(-5, 6, True) is False
assert pos_neg(-5, -5, True) is True
print "all tests passed"
|
weidnem/IntroPython2016
|
Solutions/Session01/codingbat/Warmup-1/pos_neg.py
|
Python
|
unlicense
| 736 | 0.002717 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import threading
import time
from collections import defaultdict
from contextlib import contextmanager
from pants.base.exceptions import TaskError
from pants.base.project_tree import Dir, File, Link
from pants.build_graph.address import Address
from pants.engine.addressable import SubclassesOf
from pants.engine.fs import FileContent, FilesContent, Path, PathGlobs, Snapshot
from pants.engine.isolated_process import _Snapshots, create_snapshot_rules
from pants.engine.nodes import Return, State, Throw
from pants.engine.rules import RuleIndex, SingletonRule, TaskRule
from pants.engine.selectors import (Select, SelectDependencies, SelectProjection, SelectTransitive,
SelectVariant, constraint_for)
from pants.engine.struct import HasProducts, Variants
from pants.engine.subsystem.native import Function, TypeConstraint, TypeId
from pants.util.contextutil import temporary_file_path
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecutionRequest(datatype('ExecutionRequest', ['roots'])):
"""Holds the roots for an execution, which might have been requested by a user.
To create an ExecutionRequest, see `LocalScheduler.build_request` (which performs goal
translation) or `LocalScheduler.execution_request`.
:param roots: Roots for this request.
:type roots: list of tuples of subject and product.
"""
class ExecutionResult(datatype('ExecutionResult', ['error', 'root_products'])):
"""Represents the result of a single execution."""
@classmethod
def finished(cls, root_products):
"""Create a success or partial success result from a finished run.
Runs can either finish with no errors, satisfying all promises, or they can partially finish
if run in fail-slow mode producing as many products as possible.
:param root_products: List of ((subject, product), State) tuples.
:rtype: `ExecutionResult`
"""
return cls(error=None, root_products=root_products)
@classmethod
def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `ExecutionResult`
"""
return cls(error=error, root_products=None)
class ExecutionError(Exception):
pass
class WrappedNativeScheduler(object):
def __init__(self, native, build_root, work_dir, ignore_patterns, rule_index):
self._native = native
# TODO: The only (?) case where we use inheritance rather than exact type unions.
has_products_constraint = SubclassesOf(HasProducts)
self._root_subject_types = sorted(rule_index.roots)
# Create the ExternContext, and the native Scheduler.
self._tasks = native.new_tasks()
self._register_rules(rule_index)
self._scheduler = native.new_scheduler(
self._tasks,
self._root_subject_types,
build_root,
work_dir,
ignore_patterns,
Snapshot,
_Snapshots,
FileContent,
FilesContent,
Path,
Dir,
File,
Link,
has_products_constraint,
constraint_for(Address),
constraint_for(Variants),
constraint_for(PathGlobs),
constraint_for(Snapshot),
constraint_for(_Snapshots),
constraint_for(FilesContent),
constraint_for(Dir),
constraint_for(File),
constraint_for(Link),
)
def _root_type_ids(self):
return self._to_ids_buf(sorted(self._root_subject_types))
def graph_trace(self):
with temporary_file_path() as path:
self._native.lib.graph_trace(self._scheduler, bytes(path))
with open(path) as fd:
for line in fd.readlines():
yield line.rstrip()
def assert_ruleset_valid(self):
raw_value = self._native.lib.validator_run(self._scheduler)
value = self._from_value(raw_value)
if isinstance(value, Exception):
raise ValueError(str(value))
def _to_value(self, obj):
return self._native.context.to_value(obj)
def _from_value(self, val):
return self._native.context.from_value(val)
def _to_id(self, typ):
return self._native.context.to_id(typ)
def _to_key(self, obj):
return self._native.context.to_key(obj)
def _from_id(self, cdata):
return self._native.context.from_id(cdata)
def _from_key(self, cdata):
return self._native.context.from_key(cdata)
def _to_constraint(self, type_or_constraint):
return TypeConstraint(self._to_id(constraint_for(type_or_constraint)))
def _to_ids_buf(self, types):
return self._native.to_ids_buf(types)
def _to_utf8_buf(self, string):
return self._native.context.utf8_buf(string)
def _register_rules(self, rule_index):
"""Record the given RuleIndex on `self._tasks`."""
registered = set()
for product_type, rules in rule_index.rules.items():
# TODO: The rules map has heterogeneous keys, so we normalize them to type constraints
# and dedupe them before registering to the native engine:
# see: https://github.com/pantsbuild/pants/issues/4005
output_constraint = self._to_constraint(product_type)
for rule in rules:
key = (output_constraint, rule)
if key in registered:
continue
registered.add(key)
if type(rule) is SingletonRule:
self._register_singleton(output_constraint, rule)
elif type(rule) is TaskRule:
self._register_task(output_constraint, rule)
else:
raise ValueError('Unexpected Rule type: {}'.format(rule))
def _register_singleton(self, output_constraint, rule):
"""Register the given SingletonRule.
A SingletonRule installed for a type will be the only provider for that type.
"""
self._native.lib.tasks_singleton_add(self._tasks,
self._to_value(rule.value),
output_constraint)
def _register_task(self, output_constraint, rule):
"""Register the given TaskRule with the native scheduler."""
input_selects = rule.input_selectors
func = rule.func
self._native.lib.tasks_task_begin(self._tasks, Function(self._to_id(func)), output_constraint)
for selector in input_selects:
selector_type = type(selector)
product_constraint = self._to_constraint(selector.product)
if selector_type is Select:
self._native.lib.tasks_add_select(self._tasks, product_constraint)
elif selector_type is SelectVariant:
key_buf = self._to_utf8_buf(selector.variant_key)
self._native.lib.tasks_add_select_variant(self._tasks,
product_constraint,
key_buf)
elif selector_type is SelectDependencies:
self._native.lib.tasks_add_select_dependencies(self._tasks,
product_constraint,
self._to_constraint(selector.dep_product),
self._to_utf8_buf(selector.field),
self._to_ids_buf(selector.field_types))
elif selector_type is SelectTransitive:
self._native.lib.tasks_add_select_transitive(self._tasks,
product_constraint,
self._to_constraint(selector.dep_product),
self._to_utf8_buf(selector.field),
self._to_ids_buf(selector.field_types))
elif selector_type is SelectProjection:
self._native.lib.tasks_add_select_projection(self._tasks,
self._to_constraint(selector.product),
TypeId(self._to_id(selector.projected_subject)),
self._to_utf8_buf(selector.field),
self._to_constraint(selector.input_product))
else:
raise ValueError('Unrecognized Selector type: {}'.format(selector))
self._native.lib.tasks_task_end(self._tasks)
def visualize_graph_to_file(self, filename):
self._native.lib.graph_visualize(self._scheduler, bytes(filename))
def visualize_rule_graph_to_file(self, filename):
self._native.lib.rule_graph_visualize(
self._scheduler,
self._root_type_ids(),
bytes(filename))
def rule_graph_visualization(self):
with temporary_file_path() as path:
self.visualize_rule_graph_to_file(path)
with open(path) as fd:
for line in fd.readlines():
yield line.rstrip()
def rule_subgraph_visualization(self, root_subject_type, product_type):
root_type_id = TypeId(self._to_id(root_subject_type))
product_type_id = TypeConstraint(self._to_id(constraint_for(product_type)))
with temporary_file_path() as path:
self._native.lib.rule_subgraph_visualize(
self._scheduler,
root_type_id,
product_type_id,
bytes(path))
with open(path) as fd:
for line in fd.readlines():
yield line.rstrip()
def invalidate(self, filenames):
filenames_buf = self._native.context.utf8_buf_buf(filenames)
return self._native.lib.graph_invalidate(self._scheduler, filenames_buf)
def graph_len(self):
return self._native.lib.graph_len(self._scheduler)
def exec_reset(self):
self._native.lib.execution_reset(self._scheduler)
def add_root_selection(self, subject, product):
self._native.lib.execution_add_root_select(self._scheduler, self._to_key(subject),
self._to_constraint(product))
def run_and_return_stat(self):
return self._native.lib.execution_execute(self._scheduler)
def visualize_to_dir(self):
return self._native.visualize_to_dir
def to_keys(self, subjects):
return list(self._to_key(subject) for subject in subjects)
def pre_fork(self):
self._native.lib.scheduler_pre_fork(self._scheduler)
def root_entries(self, execution_request):
raw_roots = self._native.lib.execution_roots(self._scheduler)
try:
roots = []
for root, raw_root in zip(execution_request.roots,
self._native.unpack(raw_roots.nodes_ptr,
raw_roots.nodes_len)):
if raw_root.state_tag is 0:
state = None
elif raw_root.state_tag is 1:
state = Return(self._from_value(raw_root.state_value))
elif raw_root.state_tag is 2:
state = Throw(self._from_value(raw_root.state_value))
elif raw_root.state_tag is 3:
state = Throw(self._from_value(raw_root.state_value))
else:
raise ValueError(
'Unrecognized State type `{}` on: {}'.format(raw_root.state_tag, raw_root))
roots.append((root, state))
finally:
self._native.lib.nodes_destroy(raw_roots)
return roots
class LocalScheduler(object):
"""A scheduler that expands a product Graph by executing user defined Rules."""
def __init__(self,
work_dir,
goals,
rules,
project_tree,
native,
include_trace_on_error=True,
graph_lock=None):
"""
:param goals: A dict from a goal name to a product type. A goal is just an alias for a
particular (possibly synthetic) product.
:param rules: A set of Rules which is used to compute values in the product graph.
:param project_tree: An instance of ProjectTree for the current build root.
:param work_dir: The pants work dir.
:param native: An instance of engine.subsystem.native.Native.
:param include_trace_on_error: Include the trace through the graph upon encountering errors.
:type include_trace_on_error: bool
:param graph_lock: A re-entrant lock to use for guarding access to the internal product Graph
instance. Defaults to creating a new threading.RLock().
"""
self._products_by_goal = goals
self._project_tree = project_tree
self._include_trace_on_error = include_trace_on_error
self._product_graph_lock = graph_lock or threading.RLock()
self._run_count = 0
# Create the ExternContext, and the native Scheduler.
self._execution_request = None
# Validate and register all provided and intrinsic tasks.
rules = list(rules) + create_snapshot_rules()
rule_index = RuleIndex.create(rules)
self._scheduler = WrappedNativeScheduler(native,
project_tree.build_root,
work_dir,
project_tree.ignore_patterns,
rule_index)
# If configured, visualize the rule graph before asserting that it is valid.
if self._scheduler.visualize_to_dir() is not None:
rule_graph_name = 'rule_graph.dot'
self.visualize_rule_graph_to_file(os.path.join(self._scheduler.visualize_to_dir(), rule_graph_name))
self._scheduler.assert_ruleset_valid()
def trace(self):
"""Yields a stringified 'stacktrace' starting from the scheduler's roots."""
with self._product_graph_lock:
for line in self._scheduler.graph_trace():
yield line
def visualize_graph_to_file(self, filename):
"""Visualize a graph walk by writing graphviz `dot` output to a file.
:param iterable roots: An iterable of the root nodes to begin the graph walk from.
:param str filename: The filename to output the graphviz output to.
"""
with self._product_graph_lock:
self._scheduler.visualize_graph_to_file(filename)
def visualize_rule_graph_to_file(self, filename):
self._scheduler.visualize_rule_graph_to_file(filename)
def build_request(self, goals, subjects):
"""Translate the given goal names into product types, and return an ExecutionRequest.
:param goals: The list of goal names supplied on the command line.
:type goals: list of string
:param subjects: A list of Spec and/or PathGlobs objects.
:type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or
:class:`pants.engine.fs.PathGlobs` objects.
:returns: An ExecutionRequest for the given goals and subjects.
"""
return self.execution_request([self._products_by_goal[goal_name] for goal_name in goals],
subjects)
def execution_request(self, products, subjects):
"""Create and return an ExecutionRequest for the given products and subjects.
The resulting ExecutionRequest object will contain keys tied to this scheduler's product Graph, and
so it will not be directly usable with other scheduler instances without being re-created.
An ExecutionRequest for an Address represents exactly one product output, as does SingleAddress. But
we differentiate between them here in order to normalize the output for all Spec objects
as "list of product".
:param products: A list of product types to request for the roots.
:type products: list of types
:param subjects: A list of Spec and/or PathGlobs objects.
:type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or
:class:`pants.engine.fs.PathGlobs` objects.
:returns: An ExecutionRequest for the given products and subjects.
"""
return ExecutionRequest(tuple((s, p) for s in subjects for p in products))
@contextmanager
def locked(self):
with self._product_graph_lock:
yield
def root_entries(self, execution_request):
"""Returns the roots for the given ExecutionRequest as a list of tuples of:
((subject, product), State)
"""
with self._product_graph_lock:
if self._execution_request is not execution_request:
raise AssertionError(
"Multiple concurrent executions are not supported! {} vs {}".format(
self._execution_request, execution_request))
return self._scheduler.root_entries(execution_request)
def invalidate_files(self, filenames):
"""Calls `Graph.invalidate_files()` against an internal product Graph instance."""
# NB: Watchman will never trigger an invalidation event for the root directory that
# is being watched. Instead, we treat any invalidation of a path directly in the
# root directory as an invalidation of the root.
if any(os.path.dirname(f) in ('', '.') for f in filenames):
filenames = tuple(filenames) + ('', '.')
with self._product_graph_lock:
invalidated = self._scheduler.invalidate(filenames)
logger.debug('invalidated %d nodes for: %s', invalidated, filenames)
return invalidated
def node_count(self):
with self._product_graph_lock:
return self._scheduler.graph_len()
def _execution_add_roots(self, execution_request):
if self._execution_request is not None:
self._scheduler.exec_reset()
self._execution_request = execution_request
for subject, product in execution_request.roots:
self._scheduler.add_root_selection(subject, product)
def pre_fork(self):
self._scheduler.pre_fork()
def schedule(self, execution_request):
"""Yields batches of Steps until the roots specified by the request have been completed.
This method should be called by exactly one scheduling thread, but the Step objects returned
by this method are intended to be executed in multiple threads, and then satisfied by the
scheduling thread.
"""
with self._product_graph_lock:
start_time = time.time()
# Reset execution, and add any roots from the request.
self._execution_add_roots(execution_request)
# Execute in native engine.
execution_stat = self._scheduler.run_and_return_stat()
# Receive execution statistics.
runnable_count = execution_stat.runnable_count
scheduling_iterations = execution_stat.scheduling_iterations
if self._scheduler.visualize_to_dir() is not None:
name = 'run.{}.dot'.format(self._run_count)
self._run_count += 1
self.visualize_graph_to_file(os.path.join(self._scheduler.visualize_to_dir(), name))
logger.debug(
'ran %s scheduling iterations and %s runnables in %f seconds. '
'there are %s total nodes.',
scheduling_iterations,
runnable_count,
time.time() - start_time,
self._scheduler.graph_len()
)
def execute(self, execution_request):
"""Executes the requested build and returns the resulting root entries.
TODO: Merge with `schedule`.
TODO2: Use of TaskError here is... odd.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The result of the run.
:rtype: :class:`Engine.Result`
"""
try:
self.schedule(execution_request)
return ExecutionResult.finished(self._scheduler.root_entries(execution_request))
except TaskError as e:
return ExecutionResult.failure(e)
def products_request(self, products, subjects):
"""Executes a request for multiple products for some subjects, and returns the products.
:param list products: A list of product type for the request.
:param list subjects: A list of subjects for the request.
:returns: A dict from product type to lists of products each with length matching len(subjects).
"""
request = self.execution_request(products, subjects)
result = self.execute(request)
if result.error:
raise result.error
# State validation.
unknown_state_types = tuple(
type(state) for _, state in result.root_products if type(state) not in (Throw, Return)
)
if unknown_state_types:
State.raise_unrecognized(unknown_state_types)
# Throw handling.
# TODO: See https://github.com/pantsbuild/pants/issues/3912
throw_root_states = tuple(state for root, state in result.root_products if type(state) is Throw)
if throw_root_states:
if self._include_trace_on_error:
cumulative_trace = '\n'.join(self.trace())
raise ExecutionError('Received unexpected Throw state(s):\n{}'.format(cumulative_trace))
if len(throw_root_states) == 1:
raise throw_root_states[0].exc
else:
raise ExecutionError('Multiple exceptions encountered:\n {}'
.format('\n '.join('{}: {}'.format(type(t.exc).__name__, str(t.exc))
for t in throw_root_states)))
# Everything is a Return: we rely on the fact that roots are ordered to preserve subject
# order in output lists.
product_results = defaultdict(list)
for (_, product), state in result.root_products:
product_results[product].append(state.value)
return product_results
def product_request(self, product, subjects):
"""Executes a request for a single product for some subjects, and returns the products.
:param class product: A product type for the request.
:param list subjects: A list of subjects for the request.
:returns: A list of the requested products, with length match len(subjects).
"""
return self.products_request([product], subjects)[product]
|
landism/pants
|
src/python/pants/engine/scheduler.py
|
Python
|
apache-2.0
| 21,934 | 0.008252 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ChooseFastestBranchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ChooseFastestBranchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_ds(size):
dataset = dataset_ops.Dataset.range(size)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
return optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
dataset, [branch_0, branch_1],
ratio_numerator=10)
for size in [100, 1000]:
self.run_core_tests(lambda: build_ds(size), None, size // 10) # pylint: disable=cell-var-from-loop
def testWithCapture(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithPrefetch(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithMoreOutputThanInput(self):
def build_ds():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=100)
self.run_core_tests(build_ds, None, 1000)
if __name__ == "__main__":
test.main()
|
kevin-coder/tensorflow-fork
|
tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_branch_dataset_serialization_test.py
|
Python
|
apache-2.0
| 3,611 | 0.008031 |
from interfaces.labels_map import LabelsMap
from helpers.python_ext import to_str
class LTS:
def __init__(self,
init_states,
model_by_signal:dict,
tau_model:LabelsMap,
state_name:str,
input_signals,
output_signals):
self._output_models = model_by_signal
self._tau_model = tau_model
self._init_states = set(init_states)
self._state_name = state_name
self._output_signals = output_signals # TODO: duplication with _output_models?
self._input_signals = input_signals
@property
def state_name(self):
return self._state_name
@property
def input_signals(self):
return self._input_signals
@property
def output_signals(self):
return self._output_signals
@property
def init_states(self):
return self._init_states
@property
def states(self):
# states = set(k[self._state_name] for k in self._tau_model)
# return the range of tau \cup init_states
states = set(map(lambda l_v: l_v[1], self._tau_model.items()))
states.update(self.init_states)
return states
@property
def tau_model(self) -> LabelsMap:
return self._tau_model
@property
def model_by_signal(self):
return self._output_models
@property
def output_models(self) -> dict:
return self._output_models
def __str__(self):
return 'LTS:\n' \
' inputs: {inputs}\n' \
' outputs: {outputs}\n' \
' init_states: {init}\n' \
' states: {states}\n' \
' output_models: {output_models}'.format(init=str(self._init_states),
states=str(self.states),
output_models=str(self.model_by_signal),
inputs=to_str(self._input_signals),
outputs=to_str(self._output_signals))
|
5nizza/party-elli
|
interfaces/LTS.py
|
Python
|
mit
| 2,142 | 0.004202 |
# Copyright 2013 IBM Corp
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import testtools
from tempest.lib import base as test
from tempest.lib import decorators
from tempest.tests.lib import base
class TestSkipBecauseDecorator(base.TestCase):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@decorators.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False)
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestIdempotentIdDecorator(base.TestCase):
def _test_helper(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
"""Docstring"""
pass
return foo
def _test_helper_without_doc(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
pass
return foo
def test_positive(self):
_id = str(uuid.uuid4())
foo = self._test_helper(_id)
self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_positive_without_doc(self):
_id = str(uuid.uuid4())
foo = self._test_helper_without_doc(_id)
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_idempotent_id_not_str(self):
_id = 42
self.assertRaises(TypeError, self._test_helper, _id)
def test_idempotent_id_not_valid_uuid(self):
_id = '42'
self.assertRaises(ValueError, self._test_helper, _id)
class TestSkipUnlessAttrDecorator(base.TestCase):
def _test_skip_unless_attr(self, attr, expected_to_skip=True):
class TestFoo(test.BaseTestCase):
expected_attr = not expected_to_skip
@decorators.skip_unless_attr(attr)
def test_foo(self):
pass
t = TestFoo('test_foo')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException,
t.test_foo())
else:
try:
t.test_foo()
except Exception:
raise testtools.TestCase.failureException()
def test_skip_attr_does_not_exist(self):
self._test_skip_unless_attr('unexpected_attr')
def test_skip_attr_false(self):
self._test_skip_unless_attr('expected_attr')
def test_no_skip_for_attr_exist_and_true(self):
self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
|
nuagenetworks/tempest
|
tempest/tests/lib/test_decorators.py
|
Python
|
apache-2.0
| 4,381 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.