text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
class WordDictionary(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.root = {}
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
node = self.root
for c in word:
if c not in node:
node[c] = {}
node = node[c]
node['#'] = '#'
def search(self, word):
"""
Returns if the word is in the data structure. A word could
contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
def find(word, node):
if not word:
return '#' in node
c, word = word[0], word[1:]
if c != '.':
return c in node and find(word, node[c])
return any(find(word, d) for d in node.values() if d != '#')
return find(word, self.root)
|
ChuanleiGuo/AlgorithmsPlayground
|
LeetCodeSolutions/python/211_Add_and_Search_Word_Data_structure_design.py
|
Python
|
mit
| 1,004 | 0.000996 |
from fastapi.testclient import TestClient
from docs_src.metadata.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {
"title": "ChimichangApp",
"description": "\nChimichangApp API helps you do awesome stuff. 🚀\n\n## Items\n\nYou can **read items**.\n\n## Users\n\nYou will be able to:\n\n* **Create users** (_not implemented_).\n* **Read users** (_not implemented_).\n",
"termsOfService": "http://example.com/terms/",
"contact": {
"name": "Deadpoolio the Amazing",
"url": "http://x-force.example.com/contact/",
"email": "dp@x-force.example.com",
},
"license": {
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
"version": "0.0.1",
},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operationId": "read_items_items__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_items():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [{"name": "Katana"}]
|
tiangolo/fastapi
|
tests/test_tutorial/test_metadata/test_tutorial001.py
|
Python
|
mit
| 1,611 | 0.000622 |
import sys
from numpy import *
from scipy import signal
import scipy.io.wavfile
from matplotlib import pyplot
import sklearn.decomposition
def main():
# First load the audio data, the audio data on this example is obtained from http://www.ism.ac.jp/~shiro/research/blindsep.html
rate, source = scipy.io.wavfile.read('/Users/nareshshah/blind_source_data/X_rsm2.wav')
# The 2 sources are stored in left and right channels of the audio
source_1, source_2 = source[:, 0], source[:, 1]
data = c_[source_1, source_2]
# Normalize the audio from int16 range to [-1, 1]
data = data / 2.0 ** 15
# Perform Fast ICA on the data to obtained separated sources
fast_ica = sklearn.decomposition.FastICA( n_components=2 )
separated = fast_ica.fit_transform( data )
# Check, data = separated X mixing_matrix + mean
assert allclose( data, separated.dot( fast_ica.mixing_.T ) + fast_ica.mean_ )
# Map the separated result into [-1, 1] range
max_source, min_source = 1.0, -1.0
max_result, min_result = max(separated.flatten()), min(separated.flatten())
separated = map( lambda x: (2.0 * (x - min_result))/(max_result - min_result) + -1.0, separated.flatten() )
separated = reshape( separated, (shape(separated)[0] / 2, 2) )
# Store the separated audio, listen to them later
scipy.io.wavfile.write( '/Users/nareshshah/blind_source_data/separated_1.wav', rate, separated[:, 0] )
scipy.io.wavfile.write( '/Users/nareshshah/blind_source_data/separated_2.wav', rate, separated[:, 1] )
# Plot the original and separated audio data
fig = pyplot.figure( figsize=(10, 8) )
fig.canvas.set_window_title( 'Blind Source Separation' )
ax = fig.add_subplot(221)
ax.set_title('Source #1')
ax.set_ylim([-1, 1])
ax.get_xaxis().set_visible( False )
pyplot.plot( data[:, 0], color='r' )
ax = fig.add_subplot(223)
ax.set_ylim([-1, 1])
ax.set_title('Source #2')
ax.get_xaxis().set_visible( False )
pyplot.plot( data[:, 1], color='r' )
ax = fig.add_subplot(222)
ax.set_ylim([-1, 1])
ax.set_title('Separated #1')
ax.get_xaxis().set_visible( False )
pyplot.plot( separated[:, 0], color='g' )
ax = fig.add_subplot(224)
ax.set_ylim([-1, 1])
ax.set_title('Separated #2')
ax.get_xaxis().set_visible( False )
pyplot.plot( separated[:, 1], color='g' )
pyplot.show()
|
nareshshah139/CocktailPartyAlgorithm1
|
CocktailPartyAlgorithm.py
|
Python
|
mit
| 2,277 | 0.039087 |
#!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from toughradius.radiusd.settings import *
import logging
import datetime
def process(req=None,user=None,radiusd=None,**kwargs):
if not req.get_acct_status_type() == STATUS_TYPE_STOP:
return
runstat=radiusd.runstat
store = radiusd.store
runstat.acct_stop += 1
ticket = req.get_ticket()
if not ticket.nas_addr:
ticket.nas_addr = req.source[0]
_datetime = datetime.datetime.now()
online = store.get_online(ticket.nas_addr,ticket.acct_session_id)
if not online:
session_time = ticket.acct_session_time
stop_time = _datetime.strftime( "%Y-%m-%d %H:%M:%S")
start_time = (_datetime - datetime.timedelta(seconds=int(session_time))).strftime( "%Y-%m-%d %H:%M:%S")
ticket.acct_start_time = start_time
ticket.acct_stop_time = stop_time
ticket.start_source= STATUS_TYPE_STOP
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
else:
store.del_online(ticket.nas_addr,ticket.acct_session_id)
ticket.acct_start_time = online['acct_start_time']
ticket.acct_stop_time= _datetime.strftime( "%Y-%m-%d %H:%M:%S")
ticket.start_source = online['start_source']
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
radiusd.syslog.info('[username:%s] Accounting stop request, remove online'%req.get_user_name(),level=logging.INFO)
|
davislidaqing/Mcoderadius
|
toughradius/radiusd/plugins/acct_stop_process.py
|
Python
|
agpl-3.0
| 1,514 | 0.018494 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/duplicate-sources.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that specifying a source file more than once works correctly
and dos not cause a rebuild.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
def cat(target, source, env):
t = open(str(target[0]), 'wb')
for s in source:
t.write(open(str(s), 'rb').read())
t.close()
env = Environment(BUILDERS = {'Cat' : Builder(action = cat)})
env.Cat('out.txt', ['f1.in', 'f2.in', 'f1.in'])
""")
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.run(arguments='--debug=explain .')
test.must_match('out.txt', "f1.in\nf2.in\nf1.in\n")
test.up_to_date(options='--debug=explain', arguments='.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
test/duplicate-sources.py
|
Python
|
mit
| 2,021 | 0.000495 |
# candidate/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import CandidateCampaignListManager, CandidateCampaignManager
from ballot.models import CANDIDATE
from config.base import get_environment_variable
from django.contrib import messages
from django.http import HttpResponse
from exception.models import handle_exception
from import_export_vote_smart.controllers import retrieve_and_match_candidate_from_vote_smart, \
retrieve_candidate_photo_from_vote_smart
import json
from office.models import ContestOfficeManager
from politician.models import PoliticianManager
import requests
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
CANDIDATES_SYNC_URL = get_environment_variable("CANDIDATES_SYNC_URL")
def candidates_import_from_sample_file():
"""
Get the json data, and either create new entries or update existing
:return:
"""
# Load saved json from local file
logger.info("Loading CandidateCampaigns from local file")
with open("candidate/import_data/candidate_campaigns_sample.json") as json_data:
structured_json = json.load(json_data)
return candidates_import_from_structured_json(structured_json)
def candidates_import_from_master_server(request, google_civic_election_id=''):
"""
Get the json data, and either create new entries or update existing
:return:
"""
messages.add_message(request, messages.INFO, "Loading Candidates from We Vote Master servers")
logger.info("Loading Candidates from We Vote Master servers")
# Request json file from We Vote servers
request = requests.get(CANDIDATES_SYNC_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
"format": 'json',
"google_civic_election_id": google_civic_election_id,
})
structured_json = json.loads(request.text)
results = filter_candidates_structured_json_for_local_duplicates(structured_json)
filtered_structured_json = results['structured_json']
duplicates_removed = results['duplicates_removed']
import_results = candidates_import_from_structured_json(filtered_structured_json)
import_results['duplicates_removed'] = duplicates_removed
return import_results
def filter_candidates_structured_json_for_local_duplicates(structured_json):
"""
With this function, we remove candidates that seem to be duplicates, but have different we_vote_id's.
We do not check to see if we have a matching office this routine -- that is done elsewhere.
:param structured_json:
:return:
"""
duplicates_removed = 0
filtered_structured_json = []
candidate_list_manager = CandidateCampaignListManager()
for one_candidate in structured_json:
candidate_name = one_candidate['candidate_name'] if 'candidate_name' in one_candidate else ''
google_civic_candidate_name = one_candidate['google_civic_candidate_name'] \
if 'google_civic_candidate_name' in one_candidate else ''
we_vote_id = one_candidate['we_vote_id'] if 'we_vote_id' in one_candidate else ''
google_civic_election_id = \
one_candidate['google_civic_election_id'] if 'google_civic_election_id' in one_candidate else ''
contest_office_we_vote_id = \
one_candidate['contest_office_we_vote_id'] if 'contest_office_we_vote_id' in one_candidate else ''
politician_we_vote_id = one_candidate['politician_we_vote_id'] \
if 'politician_we_vote_id' in one_candidate else ''
candidate_twitter_handle = one_candidate['candidate_twitter_handle'] \
if 'candidate_twitter_handle' in one_candidate else ''
vote_smart_id = one_candidate['vote_smart_id'] if 'vote_smart_id' in one_candidate else ''
maplight_id = one_candidate['maplight_id'] if 'maplight_id' in one_candidate else ''
# Check to see if there is an entry that matches in all critical ways, minus the we_vote_id
we_vote_id_from_master = we_vote_id
results = candidate_list_manager.retrieve_possible_duplicate_candidates(
candidate_name, google_civic_candidate_name, google_civic_election_id, contest_office_we_vote_id,
politician_we_vote_id, candidate_twitter_handle, vote_smart_id, maplight_id,
we_vote_id_from_master)
if results['candidate_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_candidate)
candidates_results = {
'success': True,
'status': "FILTER_CANDIDATES_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return candidates_results
def candidates_import_from_structured_json(structured_json):
candidate_campaign_manager = CandidateCampaignManager()
candidates_saved = 0
candidates_updated = 0
candidates_not_processed = 0
for one_candidate in structured_json:
candidate_name = one_candidate['candidate_name'] if 'candidate_name' in one_candidate else ''
we_vote_id = one_candidate['we_vote_id'] if 'we_vote_id' in one_candidate else ''
google_civic_election_id = \
one_candidate['google_civic_election_id'] if 'google_civic_election_id' in one_candidate else ''
ocd_division_id = one_candidate['ocd_division_id'] if 'ocd_division_id' in one_candidate else ''
contest_office_we_vote_id = \
one_candidate['contest_office_we_vote_id'] if 'contest_office_we_vote_id' in one_candidate else ''
# This routine imports from another We Vote server, so a contest_office_id doesn't come from import
# Look up contest_office in this local database.
# If we don't find a contest_office by we_vote_id, then we know the contest_office hasn't been imported
# from another server yet, so we fail out.
contest_office_manager = ContestOfficeManager()
contest_office_id = contest_office_manager.fetch_contest_office_id_from_we_vote_id(
contest_office_we_vote_id)
if positive_value_exists(candidate_name) and positive_value_exists(google_civic_election_id) \
and positive_value_exists(we_vote_id) and positive_value_exists(contest_office_id):
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
updated_candidate_campaign_values = {
# Values we search against
'google_civic_election_id': google_civic_election_id,
'ocd_division_id': ocd_division_id,
'contest_office_we_vote_id': contest_office_we_vote_id,
'candidate_name': candidate_name,
# The rest of the values
'we_vote_id': we_vote_id,
'maplight_id': one_candidate['maplight_id'] if 'maplight_id' in one_candidate else None,
'vote_smart_id': one_candidate['vote_smart_id'] if 'vote_smart_id' in one_candidate else None,
'contest_office_id': contest_office_id, # Retrieved from above
'politician_we_vote_id':
one_candidate['politician_we_vote_id'] if 'politician_we_vote_id' in one_candidate else '',
'state_code': one_candidate['state_code'] if 'state_code' in one_candidate else '',
'party': one_candidate['party'] if 'party' in one_candidate else '',
'order_on_ballot': one_candidate['order_on_ballot'] if 'order_on_ballot' in one_candidate else 0,
'candidate_url': one_candidate['candidate_url'] if 'candidate_url' in one_candidate else '',
'photo_url': one_candidate['photo_url'] if 'photo_url' in one_candidate else '',
'photo_url_from_maplight':
one_candidate['photo_url_from_maplight'] if 'photo_url_from_maplight' in one_candidate else '',
'photo_url_from_vote_smart':
one_candidate['photo_url_from_vote_smart'] if 'photo_url_from_vote_smart' in one_candidate else '',
'facebook_url': one_candidate['facebook_url'] if 'facebook_url' in one_candidate else '',
'twitter_url': one_candidate['twitter_url'] if 'twitter_url' in one_candidate else '',
'google_plus_url': one_candidate['google_plus_url'] if 'google_plus_url' in one_candidate else '',
'youtube_url': one_candidate['youtube_url'] if 'youtube_url' in one_candidate else '',
'google_civic_candidate_name':
one_candidate['google_civic_candidate_name']
if 'google_civic_candidate_name' in one_candidate else '',
'candidate_email': one_candidate['candidate_email'] if 'candidate_email' in one_candidate else '',
'candidate_phone': one_candidate['candidate_phone'] if 'candidate_phone' in one_candidate else '',
'twitter_user_id': one_candidate['twitter_user_id'] if 'twitter_user_id' in one_candidate else '',
'candidate_twitter_handle': one_candidate['candidate_twitter_handle']
if 'candidate_twitter_handle' in one_candidate else '',
'twitter_name': one_candidate['twitter_name'] if 'twitter_name' in one_candidate else '',
'twitter_location': one_candidate['twitter_location'] if 'twitter_location' in one_candidate else '',
'twitter_followers_count': one_candidate['twitter_followers_count']
if 'twitter_followers_count' in one_candidate else '',
'twitter_profile_image_url_https': one_candidate['twitter_profile_image_url_https']
if 'twitter_profile_image_url_https' in one_candidate else '',
'twitter_description': one_candidate['twitter_description']
if 'twitter_description' in one_candidate else '',
'wikipedia_page_id': one_candidate['wikipedia_page_id']
if 'wikipedia_page_id' in one_candidate else '',
'wikipedia_page_title': one_candidate['wikipedia_page_title']
if 'wikipedia_page_title' in one_candidate else '',
'wikipedia_photo_url': one_candidate['wikipedia_photo_url']
if 'wikipedia_photo_url' in one_candidate else '',
'ballotpedia_page_title': one_candidate['ballotpedia_page_title']
if 'ballotpedia_page_title' in one_candidate else '',
'ballotpedia_photo_url': one_candidate['ballotpedia_photo_url']
if 'ballotpedia_photo_url' in one_candidate else '',
'ballot_guide_official_statement': one_candidate['ballot_guide_official_statement']
if 'ballot_guide_official_statement' in one_candidate else '',
}
results = candidate_campaign_manager.update_or_create_candidate_campaign(
we_vote_id, google_civic_election_id, ocd_division_id,
contest_office_id, contest_office_we_vote_id,
candidate_name, updated_candidate_campaign_values)
else:
candidates_not_processed += 1
results = {
'success': False,
'status': 'Required value missing, cannot update or create'
}
if results['success']:
if results['new_candidate_created']:
candidates_saved += 1
else:
candidates_updated += 1
candidates_results = {
'success': True,
'status': "CANDIDATES_IMPORT_PROCESS_COMPLETE",
'saved': candidates_saved,
'updated': candidates_updated,
'not_processed': candidates_not_processed,
}
return candidates_results
def candidate_retrieve_for_api(candidate_id, candidate_we_vote_id): # candidateRetrieve
"""
Used by the api
:param candidate_id:
:param candidate_we_vote_id:
:return:
"""
# NOTE: Candidates retrieve is independent of *who* wants to see the data. Candidates retrieve never triggers
# a ballot data lookup from Google Civic, like voterBallotItems does
if not positive_value_exists(candidate_id) and not positive_value_exists(candidate_we_vote_id):
status = 'VALID_CANDIDATE_ID_AND_CANDIDATE_WE_VOTE_ID_MISSING'
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': CANDIDATE,
'id': candidate_id,
'we_vote_id': candidate_we_vote_id,
'google_civic_election_id': 0,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
candidate_manager = CandidateCampaignManager()
if positive_value_exists(candidate_id):
results = candidate_manager.retrieve_candidate_campaign_from_id(candidate_id)
success = results['success']
status = results['status']
elif positive_value_exists(candidate_we_vote_id):
results = candidate_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_we_vote_id)
success = results['success']
status = results['status']
else:
status = 'VALID_CANDIDATE_ID_AND_CANDIDATE_WE_VOTE_ID_MISSING_2' # It should be impossible to reach this
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': CANDIDATE,
'id': candidate_id,
'we_vote_id': candidate_we_vote_id,
'google_civic_election_id': 0,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if success:
candidate_campaign = results['candidate_campaign']
if not positive_value_exists(candidate_campaign.contest_office_name):
candidate_campaign = candidate_manager.refresh_cached_candidate_info(candidate_campaign)
json_data = {
'status': status,
'success': True,
'kind_of_ballot_item': CANDIDATE,
'id': candidate_campaign.id,
'we_vote_id': candidate_campaign.we_vote_id,
'ballot_item_display_name': candidate_campaign.display_candidate_name(),
'candidate_photo_url': candidate_campaign.candidate_photo_url(),
'order_on_ballot': candidate_campaign.order_on_ballot,
'google_civic_election_id': candidate_campaign.google_civic_election_id,
'maplight_id': candidate_campaign.maplight_id,
'contest_office_id': candidate_campaign.contest_office_id,
'contest_office_we_vote_id': candidate_campaign.contest_office_we_vote_id,
'contest_office_name': candidate_campaign.contest_office_name,
'politician_id': candidate_campaign.politician_id,
'politician_we_vote_id': candidate_campaign.politician_we_vote_id,
# 'google_civic_candidate_name': candidate_campaign.google_civic_candidate_name,
'party': candidate_campaign.political_party_display(),
'ocd_division_id': candidate_campaign.ocd_division_id,
'state_code': candidate_campaign.state_code,
'candidate_url': candidate_campaign.candidate_url,
'facebook_url': candidate_campaign.facebook_url,
'twitter_url': candidate_campaign.twitter_url,
'twitter_handle': candidate_campaign.fetch_twitter_handle(),
'twitter_description': candidate_campaign.twitter_description,
'twitter_followers_count': candidate_campaign.twitter_followers_count,
'google_plus_url': candidate_campaign.google_plus_url,
'youtube_url': candidate_campaign.youtube_url,
'candidate_email': candidate_campaign.candidate_email,
'candidate_phone': candidate_campaign.candidate_phone,
}
else:
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': CANDIDATE,
'id': candidate_id,
'we_vote_id': candidate_we_vote_id,
'google_civic_election_id': 0,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def candidates_retrieve_for_api(office_id, office_we_vote_id):
"""
Used by the api
:param office_id:
:param office_we_vote_id:
:return:
"""
# NOTE: Candidates retrieve is independent of *who* wants to see the data. Candidates retrieve never triggers
# a ballot data lookup from Google Civic, like voterBallotItems does
if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):
status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'
json_data = {
'status': status,
'success': False,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'google_civic_election_id': 0,
'candidate_list': [],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
candidate_list = []
candidates_to_display = []
google_civic_election_id = 0
try:
candidate_list_object = CandidateCampaignListManager()
results = candidate_list_object.retrieve_all_candidates_for_office(office_id, office_we_vote_id)
success = results['success']
status = results['status']
candidate_list = results['candidate_list']
except Exception as e:
status = 'FAILED candidates_retrieve. ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
success = False
if success:
# Reset office_we_vote_id and office_id so we are sure that it matches what we pull from the database
office_id = 0
office_we_vote_id = ''
for candidate in candidate_list:
one_candidate = {
'id': candidate.id,
'we_vote_id': candidate.we_vote_id,
'ballot_item_display_name': candidate.display_candidate_name(),
'candidate_photo_url': candidate.candidate_photo_url(),
'party': candidate.political_party_display(),
'order_on_ballot': candidate.order_on_ballot,
'kind_of_ballot_item': CANDIDATE,
}
candidates_to_display.append(one_candidate.copy())
# Capture the office_we_vote_id and google_civic_election_id so we can return
if not positive_value_exists(office_id) and candidate.contest_office_id:
office_id = candidate.contest_office_id
if not positive_value_exists(office_we_vote_id) and candidate.contest_office_we_vote_id:
office_we_vote_id = candidate.contest_office_we_vote_id
if not positive_value_exists(google_civic_election_id) and candidate.google_civic_election_id:
google_civic_election_id = candidate.google_civic_election_id
if len(candidates_to_display):
status = 'CANDIDATES_RETRIEVED'
else:
status = 'NO_CANDIDATES_RETRIEVED'
json_data = {
'status': status,
'success': True,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'candidate_list': candidates_to_display,
}
else:
json_data = {
'status': status,
'success': False,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'candidate_list': [],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def retrieve_candidate_photos(we_vote_candidate, force_retrieve=False):
vote_smart_candidate_exists = False
vote_smart_candidate_just_retrieved = False
vote_smart_candidate_photo_exists = False
vote_smart_candidate_photo_just_retrieved = False
# Has this candidate already been linked to a Vote Smart candidate?
candidate_retrieve_results = retrieve_and_match_candidate_from_vote_smart(we_vote_candidate, force_retrieve)
if positive_value_exists(candidate_retrieve_results['vote_smart_candidate_id']):
# Bring out the object that now has vote_smart_id attached
we_vote_candidate = candidate_retrieve_results['we_vote_candidate']
# Reach out to Vote Smart and retrieve photo URL
photo_retrieve_results = retrieve_candidate_photo_from_vote_smart(we_vote_candidate)
status = photo_retrieve_results['status']
success = photo_retrieve_results['success']
vote_smart_candidate_exists = True
vote_smart_candidate_just_retrieved = candidate_retrieve_results['vote_smart_candidate_just_retrieved']
if success:
vote_smart_candidate_photo_exists = photo_retrieve_results['vote_smart_candidate_photo_exists']
vote_smart_candidate_photo_just_retrieved = \
photo_retrieve_results['vote_smart_candidate_photo_just_retrieved']
else:
status = candidate_retrieve_results['status'] + ' '
status += 'RETRIEVE_CANDIDATE_PHOTOS_NO_CANDIDATE_MATCH'
success = False
results = {
'success': success,
'status': status,
'vote_smart_candidate_exists': vote_smart_candidate_exists,
'vote_smart_candidate_just_retrieved': vote_smart_candidate_just_retrieved,
'vote_smart_candidate_photo_just_retrieved': vote_smart_candidate_photo_just_retrieved,
'vote_smart_candidate_photo_exists': vote_smart_candidate_photo_exists,
}
return results
def candidate_politician_match(we_vote_candidate):
politician_manager = PoliticianManager()
politician_created = False
politician_found = False
politician_list_found = False
politician_list = []
# Does this candidate already have a we_vote_id for a politician?
if positive_value_exists(we_vote_candidate.politician_we_vote_id):
# Synchronize data and exit
update_results = politician_manager.update_or_create_politician_from_candidate(we_vote_candidate)
if update_results['politician_found']:
politician = update_results['politician']
# Save politician_we_vote_id in we_vote_candidate
we_vote_candidate.politician_we_vote_id = politician.we_vote_id
we_vote_candidate.politician_id = politician.id
we_vote_candidate.save()
results = {
'success': update_results['success'],
'status': update_results['status'],
'politician_list_found': False,
'politician_list': [],
'politician_found': update_results['politician_found'],
'politician_created': update_results['politician_created'],
'politician': update_results['politician'],
}
return results
else:
# Search the politician table for a match
results = politician_manager.retrieve_all_politicians_that_might_match_candidate(
we_vote_candidate.vote_smart_id, we_vote_candidate.maplight_id, we_vote_candidate.candidate_twitter_handle,
we_vote_candidate.candidate_name, we_vote_candidate.state_code)
if results['politician_list_found']:
# If here, return
politician_list = results['politician_list']
results = {
'success': results['success'],
'status': results['status'],
'politician_list_found': True,
'politician_list': politician_list,
'politician_found': False,
'politician_created': False,
'politician': None,
}
return results
elif results['politician_found']:
# Save this politician_we_vote_id with the candidate
politician = results['politician']
# Save politician_we_vote_id in we_vote_candidate
we_vote_candidate.politician_we_vote_id = politician.we_vote_id
we_vote_candidate.politician_id = politician.id
we_vote_candidate.save()
results = {
'success': results['success'],
'status': results['status'],
'politician_list_found': False,
'politician_list': [],
'politician_found': True,
'politician_created': False,
'politician': politician,
}
return results
else:
# Create new politician for this candidate
create_results = politician_manager.update_or_create_politician_from_candidate(we_vote_candidate)
if create_results['politician_found']:
politician = create_results['politician']
# Save politician_we_vote_id in we_vote_candidate
we_vote_candidate.politician_we_vote_id = politician.we_vote_id
we_vote_candidate.politician_id = politician.id
we_vote_candidate.save()
results = {
'success': create_results['success'],
'status': create_results['status'],
'politician_list_found': False,
'politician_list': [],
'politician_found': create_results['politician_found'],
'politician_created': create_results['politician_created'],
'politician': create_results['politician'],
}
return results
success = False
status = "TO_BE_IMPLEMENTED"
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'politician_found': politician_found,
'politician_created': politician_created,
'politician': None,
}
return results
def retrieve_candidate_politician_match_options(vote_smart_id, maplight_id, candidate_twitter_handle,
candidate_name, state_code):
politician_manager = PoliticianManager()
politician_created = False
politician_found = False
politician_list_found = False
politician_list = []
# Search the politician table for a match
results = politician_manager.retrieve_all_politicians_that_might_match_candidate(
vote_smart_id, maplight_id, candidate_twitter_handle,
candidate_name, state_code)
if results['politician_list_found']:
# If here, return
politician_list = results['politician_list']
results = {
'success': results['success'],
'status': results['status'],
'politician_list_found': True,
'politician_list': politician_list,
'politician_found': False,
'politician_created': False,
'politician': None,
}
return results
elif results['politician_found']:
# Return this politician entry
politician = results['politician']
results = {
'success': results['success'],
'status': results['status'],
'politician_list_found': False,
'politician_list': [],
'politician_found': True,
'politician_created': False,
'politician': politician,
}
return results
success = False
status = "TO_BE_IMPLEMENTED"
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'politician_found': politician_found,
'politician_created': politician_created,
'politician': None,
}
return results
|
wevote/WebAppPublic
|
candidate/controllers.py
|
Python
|
bsd-3-clause
| 29,893 | 0.003646 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Approach:
# - Listen for image load event and check the XMP tag GPano:UsePanoramaViewer
# https://developers.google.com/streetview/spherical-metadata
# - GExiv2 (in default Ubuntu install, but may not be robust enough to inconsistent/duplicate XMP tags)
# - ExifTool (not in default install)
# - If it is a panorama, replace 2D image display by 360° display
# Create a sphere and project the photo according to XMP GPano tags.
# - OpenGL: python-gtklext (not maintained and not in repos),
# python-opengl (too low-level), shortcrust
# - GTK scene graph kit: not yet completed and included in common distributions
# - JavaScript/WebGL: PhotoSphereViewer.js
# - Interactivity (drag to rotate around z-axis and tilt; scroll to zoom)
import gi, os, urllib.parse
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, Gio, Eog
# EXIF/XMP metadata
gi.require_version('GExiv2', '0.10')
from gi.repository import GExiv2
# Webview for WebGL panorama viewer
gi.require_version('WebKit2', '4.0')
from gi.repository import WebKit2
# Encoding image in data uris.
import base64
class PanoramaPlugin(GObject.Object, Eog.WindowActivatable):
# Override EogWindowActivatable's window property
# This is the EogWindow this plugin instance has been activated for
window = GObject.property(type=Eog.Window)
def __init__(self):
GObject.Object.__init__(self)
self.panorama_viewer_loaded = False
self.panorama_viewer_active = False
self.container = None
self.image_view = None
self.panorama_view = None
self.thumb_view = None
self.selection_change_handler = None
# Eye-of-Gnome API methods
def do_activate(self):
"""The plugin has been activated (on app start or through checkbox in preferences), set it up."""
# For tracking selected image.
self.thumb_view = self.window.get_thumb_view()
self.selection_change_handler = self.thumb_view.connect('selection-changed', self.on_selection_changed)
# Initialization of panorama viewer:
# Since it takes significant amount of memory, we load it only
# once we encounter a panorama image (see on_selection_changed).
#self.load_panorama_viewer()
def do_deactivate(self):
"""The plugin has been deactivated, clean everything up."""
# Remove all modifications and added widgets from the UI scene graph.
# (In this implementation same as when hiding the panorama.)
self.hide_panorama()
# Unregister event handlers.
self.thumb_view.disconnect(self.selection_change_handler)
self.selection_change_handler = None
# Release resources.
self.panorama_view = None
self.panorama_viewer_active = False
self.panorama_viewer_loaded = False
def on_selection_changed(self, thumb_view):
"""An image has been selected."""
# Use the reference of thumb_view passed as parameter, not self.thumb_view (did cause errors).
current_image = thumb_view.get_first_selected_image() # may be None
if current_image:
# Get file path
uri = current_image.get_uri_for_display()
filepath = urllib.parse.urlparse(uri).path
# If it is a panorama, switch to panorama viewer.
if self.use_panorama_viewer(filepath):
# Read panorama metadata
try:
metadata = self.get_pano_xmp(filepath)
# I tried passing just the image file path, but cross-site-scripting
# restrictions do not allow local file:// access.
# Solutions: simple server or data uri.
image = self.image_to_base64(filepath)
# Lazy loading: Create panorama_viewer only when a panorama is encountered.
# TODO: maybe unload it again after a certain amount of non-panorama images.
if not self.panorama_viewer_loaded:
# 1. Load the panorama viewer.
self.load_panorama_viewer(lambda: self.panorama_view.load_image(image, metadata, self.show_panorama) )
else:
# 2. Load the image into the panorama viewer.
# 3. When finished, make it visible.
self.panorama_view.load_image(image, metadata, self.show_panorama)
except Exception as error:
print(error)
# Fallback to display as normal image.
self.hide_panorama()
else:
# It is a normal image.
self.hide_panorama()
# Release resources in the panorama viewer by loading an empty/none image
if self.panorama_viewer_loaded:
empty_image = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAC0lEQVQI12NgAAIAAAUAAeImBZsAAAAASUVORK5CYII='
self.panorama_view.load_image(empty_image, {})
# Helper methods
def use_panorama_viewer(self, filepath):
metadata = GExiv2.Metadata(filepath)
return metadata.get_tag_string('Xmp.GPano.ProjectionType') == 'equirectangular' \
and metadata.get_tag_string('Xmp.GPano.UsePanoramaViewer') != 'False'
def get_pano_xmp(self, filepath):
"""Read XMP panorama metadata of an image file.
Args:
filepath: an image file to read
Returns:
a dict containing XMP keys with their values
"""
metadata = GExiv2.Metadata(filepath)
# For tags see: http://www.exiv2.org/tags.html
# and http://exiv2.org/tags-xmp-GPano.html
tags_required = {
'Xmp.GPano.FullPanoWidthPixels': 'full_width',
'Xmp.GPano.FullPanoHeightPixels': 'full_height',
'Xmp.GPano.CroppedAreaImageWidthPixels': 'cropped_width',
'Xmp.GPano.CroppedAreaImageHeightPixels': 'cropped_height',
'Xmp.GPano.CroppedAreaLeftPixels': 'cropped_x',
'Xmp.GPano.CroppedAreaTopPixels': 'cropped_y'
}
tags_optional = {
'Xmp.GPano.PoseHeadingDegrees': 'pose_heading',
'Xmp.GPano.InitialHorizontalFOVDegrees': 'initial_h_fov',
'Xmp.GPano.InitialViewHeadingDegrees': 'initial_heading',
'Xmp.GPano.InitialViewPitchDegrees': 'initial_pitch',
'Xmp.GPano.InitialViewRollDegrees': 'initial_roll'
}
result = {}
for (tag, key) in tags_required.items():
if metadata.has_tag(tag):
result[key] = float(metadata.get_tag_string(tag))
else:
raise Exception("Required tag %s is missing, cannot use panorama viewer."%tag)
for (tag, key) in tags_optional.items():
if metadata.has_tag(tag):
result[key] = float(metadata.get_tag_string(tag))
return result
def load_panorama_viewer(self, on_loaded_cb = None):
"""Initialize the panorama viewer widget.
Args:
on_loaded_cb: an optional callback function/lambda that is called
after loading of the panorama widget completes.
Note:
Instantiation of the WebView is synchronous, but loading of html is asynchronous.
For subsequently interacting with the document, pass a callback.
"""
if not self.panorama_viewer_loaded:
self.image_view = self.window.get_view() # EogScrollView
self.container = self.image_view.get_parent() # its parent, GtkOverlay
# Create the panorama widget.
self.panorama_view = PanoramaViewer(on_loaded_cb)
self.panorama_view.show()
self.panorama_viewer_loaded = True
def image_to_base64(self, filepath):
"""Read an image file and returm its content as base64 encoded string.
Args:
filepath: an image file to read
Returns:
a string of the base64 encoded image
"""
with open(filepath, 'rb') as f:
return 'data:' + self.get_image_mimetype(filepath) \
+ ';base64,' + base64.b64encode(f.read()).decode('ascii')
def get_image_mimetype(self, filepath):
ext = os.path.splitext(filepath)[1].lower()
eog_mimetypes = {
'.bmp': 'image/x-bmp',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg',
'.png': 'image/png',
'.tif': 'image/tiff',
'.tiff': 'image/tiff'
}
return eog_mimetypes[ext] if ext in eog_mimetypes else 'image/'+ext[1:]
def show_panorama(self):
"""Show the panorama widget and hide the image viewer."""
if not self.panorama_viewer_active:
# I tried adding both widgets to the container and just toggling their
# visibility or adding them into a Gtk.Stack, but in both cases the
# WebView did not receive mouse events. Replacing the widgets works.
self.container.remove(self.image_view)
self.container.add(self.panorama_view)
self.panorama_viewer_active = True
def hide_panorama(self):
"""Show the image viewer and hide the panorama widget."""
if self.panorama_viewer_active:
self.container.remove(self.panorama_view)
self.container.add(self.image_view)
self.panorama_viewer_active = False
class PanoramaViewer(WebKit2.WebView):
#uri_panorama_viewer = 'file://' + os.path.join(self.plugin_info.get_data_dir(), 'eog_panorama.htm')
uri_panorama_viewer = 'file://' + os.path.join(os.path.dirname(os.path.realpath(__file__)), 'eog_panorama.htm')
custom_scheme = 'eogp' # This should not clash with the plugin path, otherwise it confuses WebKit.
def __init__(self, on_loaded_cb = None):
"""Initialize the panorama viewer widget.
Args:
on_loaded_cb: an optional callback function/lambda that is called
after loading of the panorama widget completes.
"""
super(PanoramaViewer, self).__init__()
# Callback for when loading of the WebView completed.
self.on_loaded_cb = on_loaded_cb
# Callback for when loading of an image completed.
self.pending_on_completed_cb = None
# Settings
websettings = WebKit2.Settings()
websettings.set_property('enable-webgl', True)
websettings.set_property('enable-plugins', False)
#websettings.set_property('enable-developer-extras', True) # TODO: Enable this when debugging.
# Trying to work-around file access problems:
#websettings.set_property('enable-xss-auditor', False) # Detailed error reporting for external script files is blocked by crossorigin policies, but this property seems not to enable it.
#websettings.set_property('allow-file-access-from-file-url', True) # Not implemented :(
self.set_settings(websettings)
# Fill the parent widget.
self.set_hexpand(True)
self.set_vexpand(True)
# Load the panorama viewer page.
self.load_uri(self.uri_panorama_viewer)
# Disable context menu.
self.connect('context-menu', lambda *args: True)
# Set up communication from webview document to python:
context = self.get_context()
context.register_uri_scheme(self.custom_scheme, self._uri_scheme_cb)
# Detect navigation away from uri_panorama_viewer
self.connect("decide-policy", self._on_decide_policy)
def load_image(self, img_uri, metadata, on_completed_cb=None):
"""Load an image into the panorama viewer.
Args:
img_uri: a data uri of an image file
metadata: a dict containing XMP panorama tags and values.
on_completed_cb: an optional callback function/lambda that is called
after loading of the image completes.
"""
self.pending_on_completed_cb = on_completed_cb
pano_data = ', \n'.join(["%s: %d"%(key, value) for (key, value) in metadata.items()])
script = "PSV.show_panorama('%s', {%s});"%(img_uri, pano_data)
self.run_javascript(script)
def _on_decide_policy(self, webview, decision, decision_type):
if decision_type == WebKit2.PolicyDecisionType.NAVIGATION_ACTION:
navigation_request = decision.get_navigation_action().get_request()
uri_string = navigation_request.get_uri()
uri = urllib.parse.urlparse(uri_string)
# Allow custom uri scheme.
if uri.scheme == self.custom_scheme:
pass # Pass over to uri scheme handler and _uri_scheme_cb
else:
# Disallow any other uri except panorama viewer page.
if uri_string != self.uri_panorama_viewer:
decision.ignore()
# Redirect file uris to be opened by the application.
# Not elegant to execute this in PanoramaViewer class,
# better would be to let event bubble up and let eog act on it.
if uri.scheme == 'file':
app = Eog.Application.get_instance()
flags = Eog.StartupFlags.SINGLE_WINDOW # in same eog instance
app.open_uri_list([uri_string], 0, flags)
return True
return False
def _uri_scheme_cb(self, request):
"""Respond to a custom uri scheme request.
Args:
request: a WebKit2.URISchemeRequest
"""
uri = urllib.parse.urlparse(request.get_uri())
if uri.netloc == 'document_ready':
# Call the callback.
if self.on_loaded_cb:
# Issue: Webkit2.WebView does not return correct JavaScript window.devicePixelRatio on hidpi devices.
# Set the device pixel ratio from Gtk widget.
self._set_device_pixel_ratio()
self.on_loaded_cb()
self.on_loaded_cb = None
elif uri.netloc == 'log':
print('WebView log: '+urllib.parse.unquote(uri.path[1:]))
elif uri.netloc == 'warn':
print('WebView warn: '+urllib.parse.unquote(uri.path[1:]))
elif uri.netloc == 'error':
print('WebView error: '+urllib.parse.unquote(uri.path[1:]))
elif uri.netloc == 'show_panorama_completed':
# Call the callback.
if self.pending_on_completed_cb:
self.pending_on_completed_cb()
self.pending_on_completed_cb = None
# Finish the request with dummy data (we do not have a new page to load).
# Otherwise, subsequent requests and also src='data:image...' will cause an error.
request.finish(Gio.MemoryInputStream.new_from_data([0]), -1, None)
def _set_device_pixel_ratio(self):
factor = self.get_scale_factor()
self.run_javascript("window.devicePixelRatio = %s;"%factor)
self.run_javascript("PhotoSphereViewer.SYSTEM.pixelRatio = %s;"%factor)
|
Aerilius/eog_panorama
|
eog_panorama/eog_panorama.py
|
Python
|
gpl-3.0
| 15,642 | 0.007672 |
import sublime
import sublime_plugin
from ..core import oa_syntax, decorate_pkg_name
from ..core import ReportGenerationThread
from ...lib.packages import PackageList
###----------------------------------------------------------------------------
class PackageReportThread(ReportGenerationThread):
"""
Generate a tabular report of all installed packages and their state.
"""
def _process(self):
pkg_list = PackageList()
pkg_counts = pkg_list.package_counts()
title = "{} Total Packages".format(len(pkg_list))
t_sep = "=" * len(title)
fmt = '{{:>{}}}'.format(len(str(max(pkg_counts))))
stats = ("{0} [S]hipped with Sublime\n"
"{0} [I]nstalled (user) sublime-package files\n"
"{0} [U]npacked in Packages\\ directory\n"
"{0} Currently in ignored_packages\n"
"{0} Installed Dependencies\n").format(fmt).format(*pkg_counts)
row = "| {:<40} | {:3} | {:3} | {:<3} |".format("", "", "", "")
r_sep = "+------------------------------------------+-----+-----+-----+"
packages = {}
result = [title, t_sep, "", self._generation_time(), stats, r_sep]
for pkg_name, pkg_info in pkg_list:
packages[pkg_name] = pkg_info.status(detailed=False)
result.append(
"| {:<40} | [{:1}] | [{:1}] | [{:1}] |".format(
decorate_pkg_name(pkg_info, name_only=True),
"S" if pkg_info.shipped_path is not None else " ",
"I" if pkg_info.installed_path is not None else " ",
"U" if pkg_info.unpacked_path is not None else " "))
result.extend([r_sep, ""])
self._set_content("OverrideAudit: Package Report", result, ":packages",
oa_syntax("OA-PkgReport"), {
"override_audit_report_packages": packages,
"context_menu": "OverrideAuditReport.sublime-menu"
})
###----------------------------------------------------------------------------
class OverrideAuditPackageReportCommand(sublime_plugin.WindowCommand):
"""
Generate a tabular report of all installed packages and their state.
"""
def run(self, force_reuse=False):
PackageReportThread(self.window, "Generating Package Report",
self.window.active_view(),
force_reuse=force_reuse).start()
###----------------------------------------------------------------------------
#
|
OdatNurd/OverrideAudit
|
src/commands/package_report.py
|
Python
|
mit
| 2,593 | 0.0027 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2014 Peerchemist
#
# This file is part of NuBerryPi project.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
__author__ = "Peerchemist"
__license__ = "GPL"
__version__ = "0.23"
import os, sys
import sh
import argparse
import json
import urllib
import platform
from datetime import timedelta
from datetime import datetime as dt
from colored import fore, back, style
## Class that pulls and parses data
class pbinfo:
def system(self):
def uptime():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_str = str(timedelta(seconds = uptime_seconds))
return(uptime_str)
def distr():
with open('/etc/os-release', 'r') as lsb:
for line in lsb:
if line.startswith('VERSION_ID'):
return(line.split('=')[1].replace('"','').strip())
def temp():
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as temp:
return(float(temp.readline().strip())/1000)
mm = {
'nuberrypi': distr(),
'kernel release': platform.release(),
'uptime': uptime(),
'average load': os.getloadavg(),
'system_temperature': temp()
}
return(mm)
def hardware(self):
mm = {}
with open('/proc/cpuinfo') as cpuinfo:
for line in cpuinfo:
if line.startswith('Hardware'):
hardware = line.split(':')[1].strip()
if hardware == "BCM2708":
mm['hardware'] = "Raspberry Pi"
if line.startswith('Serial'):
ser = line.split(':')[1].strip()
mm['serial'] = ser
with open('/proc/cmdline', 'r') as cmdline:
for i in cmdline.readline().split():
if i.startswith('smsc95xx.macaddr'):
mm['maccaddr'] = str(i.split('=')[1])
if i.startswith('bcm2708.boardrev'):
mm['board_rev'] = str(i.split('=')[1])
return(mm)
def nud(self, argv):
get = sh.nud("getinfo", _ok_code=[0,3,5,87]).stdout
pos_diff = sh.nud("getdifficulty", _ok_code=[0,3,5,87]).stdout
try:
getinfo = json.loads(get)
pos = json.loads(pos_diff)['proof-of-stake']
getinfo["difficulty proof-of-stake"] = pos
except:
return("nud inactive")
## When posting in public, hide IP and balance.
if argv == "private":
del getinfo['balance']
del getinfo['ip']
return(getinfo)
else:
return(getinfo)
## Class that will do all the pretty printing
class box:
def default(self): ## printed when no arguments
box = {}
box['nuberrypi version'] = "v" + pbinfo.system()['nuberrypi']
box['uptime'] = pbinfo.system()['uptime']
box['nud'] = pbinfo.nud(self)
box['serial'] = pbinfo.hardware()['serial']
box['raspi_board_rev'] = pbinfo.hardware()['board_rev']
print(fore.GREEN + style.UNDERLINED + "NuBerryPi:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
if box['nud'] == "nud inactive":
print(fore.RED + style.BOLD + "WARNING: nud is not running!" + style.RESET)
def public(self): ## When privacy is needed
box = {}
box['NuBerryPi:'] = "v" + pbinfo.system()['nuberrypi']
box['serial'] = pbinfo.hardware()['serial']
box['uptime'] = pbinfo.system()['uptime']
box['nud'] = pbinfo.nud('private')
print(fore.GREEN + style.UNDERLINED + "NuBerryPi:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
def system(self):
box = pbinfo.system()
print(fore.GREEN + style.UNDERLINED + "NuBerryPi system info:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
if box['system_temperature'] > 76:
print(fore.RED + style.BOLD + "WARNING: system temperature too high!" + style.RESET)
def all(self): ## Switch to show all
box = {}
box['system'] = pbinfo.system()
box['system'].update(pbinfo.hardware())
box['nud'] = pbinfo.nud(self)
print(json.dumps(box, sort_keys=True, indent=4))
def health(self):
report = health.check()
print "Checking if we are on the right chain..."
print "Using" + " " + style.UNDERLINED + "www.peerchain.co" + style.RESET + " as reference."
print
for k,v in report.items():
if v == True:
print(k + ":" + fore.GREEN + style.BOLD + "True" + style.RESET)
else:
print(k + ":" + fore.RED + style.BOLD + "False" + style.RESET)
print
## Checking health of blockchain
class health:
def pull(self):
url = "https://peerchain.co/api/v1/blockLatest/"
response = urllib.urlopen(url)
return(json.loads(response.read()))
def local(self):
local = {}
local["heightInt"] = int(sh.nud("getblockcount", _ok_code=[0,3,5,87]).stdout)
local["hash"] = sh.nud("getblockhash", local["heightInt"],
_ok_code=[0,3,5,87]).stdout.strip()
block_info = json.loads(sh.nud("getblock", local["hash"],
_ok_code=[0,3,5,87]).stdout)
local["prevHash"] = block_info["previousblockhash"]
local["mrkRoot"] = block_info["merkleroot"]
#timestring = block_info["time"].replace("UTC", "").strip()
#local["timeStampUnix"] = dt.strptime(timestring
# , "%Y-%m-%d %H:%M:%S").strftime("%s")
return local
def check(self):
local = self.local()
remote = self.pull()
report = {}
if remote["heightInt"] == local["heightInt"]:
report["block_count_matches"] = True
else:
report["block_count_matches"] = False
if remote["hash"] == local["hash"]:
report["last_block_hash_matches"] = True
else:
report["last_block_hash_matches"] = False
if remote["prevHash"] == local["prevHash"]:
report["previous_block_hash_matches"] = True
else:
report["previous_block_hash_matches"] = False
if remote["mrkRoot"] == local["mrkRoot"]:
report["merkle_root_matches"] = True
else:
report["merkle_root_matches"] = False
return report
pbinfo = pbinfo()
box = box()
health = health()
######################### args
parser = argparse.ArgumentParser(description='Show information on NuBerryPi')
parser.add_argument('-a', '--all', help='show everything', action='store_true')
parser.add_argument('-s','--system', help='show system information', action='store_true')
parser.add_argument('-p', '--nu', help='equal to "ppcoid getinfo"', action='store_true')
parser.add_argument('--public', help='hide private data [ip, balance, serial]', action='store_true')
parser.add_argument('-o', '--output', help='dump data to stdout, use to pipe to some other program',
action='store_true')
parser.add_argument('--health', help='compare local blockchain data with peerchain.co as reference',
action='store_true')
args = parser.parse_args()
## Default, if no arguments
if not any(vars(args).values()):
box.default()
if args.all:
box.all()
if args.system:
box.system()
if args.nu:
print(json.dumps(pbinfo.nud("self"), indent=4, sort_keys=True))
if args.public:
box.public()
if args.output:
sys.stdout.write(box.all())
if args.health:
box.health()
|
inuitwallet/nuberrypi
|
PKGBLD/nuberrypi-info/nuberrypi-info.py
|
Python
|
gpl-3.0
| 7,347 | 0.028039 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from reportlab.pdfbase import ttfonts
from odoo import api, fields, models
from odoo.report.render.rml2pdf import customfonts
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
# Alternatives for the [broken] builtin PDF fonts. Default order chosen to match
# the pre-v8 mapping from odoo.report.render.rml2pdf.customfonts.CustomTTFonts.
# Format: [ (BuiltinFontFamily, mode, [AlternativeFontName, ...]), ...]
BUILTIN_ALTERNATIVES = [
('Helvetica', "normal", ["DejaVuSans", "LiberationSans"]),
('Helvetica', "bold", ["DejaVuSans-Bold", "LiberationSans-Bold"]),
('Helvetica', 'italic', ["DejaVuSans-Oblique", "LiberationSans-Italic"]),
('Helvetica', 'bolditalic', ["DejaVuSans-BoldOblique", "LiberationSans-BoldItalic"]),
('Times', 'normal', ["LiberationSerif", "DejaVuSerif"]),
('Times', 'bold', ["LiberationSerif-Bold", "DejaVuSerif-Bold"]),
('Times', 'italic', ["LiberationSerif-Italic", "DejaVuSerif-Italic"]),
('Times', 'bolditalic', ["LiberationSerif-BoldItalic", "DejaVuSerif-BoldItalic"]),
('Courier', 'normal', ["FreeMono", "DejaVuSansMono"]),
('Courier', 'bold', ["FreeMonoBold", "DejaVuSansMono-Bold"]),
('Courier', 'italic', ["FreeMonoOblique", "DejaVuSansMono-Oblique"]),
('Courier', 'bolditalic', ["FreeMonoBoldOblique", "DejaVuSansMono-BoldOblique"]),
]
class ResFont(models.Model):
_name = "res.font"
_description = 'Fonts available'
_order = 'family,name,id'
_rec_name = 'family'
family = fields.Char(string="Font family", required=True)
name = fields.Char(string="Font Name", required=True)
path = fields.Char(required=True)
mode = fields.Char(required=True)
_sql_constraints = [
('name_font_uniq', 'unique(family, name)', 'You can not register two fonts with the same name'),
]
@api.model
def font_scan(self, lazy=False):
"""Action of loading fonts
In lazy mode will scan the filesystem only if there is no founts in the database and sync if no font in CustomTTFonts
In not lazy mode will force scan filesystem and sync
"""
if lazy:
# lazy loading, scan only if no fonts in db
fonts = self.search([('path', '!=', '/dev/null')])
if not fonts:
# no scan yet or no font found on the system, scan the filesystem
self._scan_disk()
elif len(customfonts.CustomTTFonts) == 0:
# CustomTTFonts list is empty
self._sync()
else:
self._scan_disk()
return True
def _scan_disk(self):
"""Scan the file system and register the result in database"""
found_fonts = []
for font_path in customfonts.list_all_sysfonts():
try:
font = ttfonts.TTFontFile(font_path)
_logger.debug("Found font %s at %s", font.name, font_path)
found_fonts.append((font.familyName, font.name, font_path, font.styleName))
except Exception, ex:
_logger.warning("Could not register Font %s: %s", font_path, ex)
for family, name, path, mode in found_fonts:
if not self.search([('family', '=', family), ('name', '=', name)]):
self.create({'family': family, 'name': name, 'path': path, 'mode': mode})
# remove fonts not present on the disk anymore
existing_font_names = [name for (family, name, path, mode) in found_fonts]
# Remove inexistent fonts
self.search([('name', 'not in', existing_font_names), ('path', '!=', '/dev/null')]).unlink()
self.pool.signal_caches_change()
return self._sync()
def _sync(self):
"""Set the customfonts.CustomTTFonts list to the content of the database"""
customfonts.CustomTTFonts = []
local_family_modes = set()
local_font_paths = {}
for font in self.search([('path', '!=', '/dev/null')]):
local_family_modes.add((font.family, font.mode))
local_font_paths[font.name] = font.path
customfonts.CustomTTFonts.append((font.family, font.name, font.path, font.mode))
# Attempt to remap the builtin fonts (Helvetica, Times, Courier) to better alternatives
# if available, because they only support a very small subset of unicode
# (missing 'č' for example)
for builtin_font_family, mode, alts in BUILTIN_ALTERNATIVES:
if (builtin_font_family, mode) not in local_family_modes:
# No local font exists with that name, try alternatives
for altern_font in alts:
if local_font_paths.get(altern_font):
altern_def = (builtin_font_family, altern_font,
local_font_paths[altern_font], mode)
customfonts.CustomTTFonts.append(altern_def)
_logger.debug("Builtin remapping %r", altern_def)
break
else:
_logger.warning("No local alternative found for builtin font `%s` (%s mode)."
"Consider installing the DejaVu fonts if you have problems "
"with unicode characters in RML reports",
builtin_font_family, mode)
return True
@classmethod
def clear_caches(cls):
"""Force worker to resync at next report loading by setting an empty font list"""
customfonts.CustomTTFonts = []
return super(ResFont, cls).clear_caches()
|
chienlieu2017/it_management
|
odoo/odoo/addons/base/res/res_font.py
|
Python
|
gpl-3.0
| 6,052 | 0.003305 |
import sys
import os
from distutils.core import setup
if sys.version_info.major >= 3:
print 'Sorry, currently only supports Python 2. Patches welcome!'
sys.exit(1)
setup(
name='browser-cookie',
version='0.6',
packages=['browser_cookie'],
package_dir={'browser_cookie' : '.'}, # look for package contents in current directory
author='Richard Penman',
author_email='richard@webscraping.com',
description='Loads cookies from your browser into a cookiejar object so can download with urllib and other libraries the same content you see in the web browser.',
url='https://bitbucket.org/richardpenman/browser_cookie',
install_requires=['pycrypto', 'keyring'],
license='lgpl'
)
|
JFDesigner/FBAlbumDownloader
|
browser_cookie/setup.py
|
Python
|
gpl-2.0
| 722 | 0.006925 |
import logging
from ask import alexa
import car_accidents
import expected_population
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(request_obj, context=None):
return alexa.route_request(request_obj)
@alexa.default
def default_handler(request):
logger.info('default_handler')
return alexa.respond("Sorry, I don't understand.", end_session=True)
@alexa.request("LaunchRequest")
def launch_request_handler(request):
logger.info('launch_request_handler')
return alexa.respond('Ask me about any public data about Sweden.', end_session=True)
@alexa.request("SessionEndedRequest")
def session_ended_request_handler(request):
logger.info('session_ended_request_handler')
return alexa.respond('Goodbye.', end_session=True)
@alexa.intent('AMAZON.CancelIntent')
def cancel_intent_handler(request):
logger.info('cancel_intent_handler')
return alexa.respond('Okay.', end_session=True)
@alexa.intent('AMAZON.HelpIntent')
def help_intent_handler(request):
logger.info('help_intent_handler')
return alexa.respond('You can ask me about car accidents.', end_session=True)
@alexa.intent('AMAZON.StopIntent')
def stop_intent_handler(request):
logger.info('stop_intent_handler')
return alexa.respond('Okay.', end_session=True)
@alexa.intent('CarAccidents')
def car_accidents_intent_handler(request):
logger.info('car_accidents_intent_handler')
logger.info(request.get_slot_map())
city = request.get_slot_value('city')
year = request.get_slot_value('year')
if not city:
return alexa.respond('Sorry, which city?')
num_card_acc = car_accidents.get_num_accidents(year=int(year), city=city)
logger.info('%s accidents in %s in %s', num_card_acc, city, year)
return alexa.respond(
'''
<speak>
There were
<say-as interpret-as="cardinal">%s</say-as>
car accidents in %s in
<say-as interpret-as="date" format="y">%s</say-as>,
</speak>
''' % (num_card_acc, city, year),
end_session=True, is_ssml=True)
@alexa.intent('PopulationSweden')
def population_intent_handler(request):
logger.info('population_sweden_intent_handler')
logger.info(request.get_slot_map())
year = request.get_slot_value('year')
return alexa.respond(
'''
<speak>
in
<say-as interpret-as="date" format="y">%s</say-as>,
The expected population of Sweden is going to be
<say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (year, expected_population.get_expected_population(year)),
end_session=True, is_ssml=True)
@alexa.intent('WaterUsage')
def water_usage_stockholm(request):
year = request.get_slot_value('year')
logger.info('water_usage_stockholm')
logger.info(request.get_slot_map())
return alexa.respond(
'''
<speak>
the water consumption in Stockholm in <say-as interpret-as="date" format="y">%s</say-as>,
is <say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (year, car_accidents.get_water_usage_stockholm(year)),
end_session=True, is_ssml=True)
@alexa.intent('Apartments')
def housing_numbers(request):
year = request.get_slot_value('year')
logger.info('apartments')
logger.info(request.get_slot_map())
return alexa.respond(
'''
<speak>
the number of apartments built during that year in Stockholm, is <say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (car_accidents.get_num_apartments_stockholm(year)),
)
|
geoaxis/ask-sweden
|
ask_sweden/lambda_function.py
|
Python
|
mit
| 3,656 | 0.001094 |
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
"""Check consistency on dataset iris."""
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
"""Check classification with sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
"""Check regression with sparse input."""
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
if __name__ == "__main__":
import nose
nose.runmodule()
|
Barmaley-exe/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
Python
|
bsd-3-clause
| 15,811 | 0 |
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
import os
from os.path import realpath, dirname, isfile, abspath
import json
import time
import uuid
from werkzeug.datastructures import FileStorage
from hackathon.constants import FILE_TYPE, HEALTH_STATUS, HEALTH
from storage import Storage
__all__ = ["LocalStorage"]
class LocalStorage(Storage):
"""Hackathon file storage that saves all templates on local disk
template files will be save at "<src_dir>/open-hackathon-server/src/hackathon/resources"
uploaded images will be save at "<src_dir>/open-hackathon-server/src/hackathon/resources"
"""
def save(self, context):
"""Save a file to storage
:type context: Context
:param context: the execution context of file saving
:rtype context
:return the updated context which should including the full path of saved file
"""
context = self.__generate_paths(context)
self.__save_file(context.content, context.physical_path)
self.log.debug("file saved at:" + context.physical_path)
return context
def load(self, context):
"""Load file from storage
:type context: Context
:param context: the execution context of file loading
:rtype dict
:return the file content
"""
path = context.physical_path
file_type = context.file_type
if file_type == FILE_TYPE.TEMPLATE:
with open(path) as template_file:
return json.load(template_file)
else:
return None
def delete(self, context):
"""Delete file from storage
:type context: Context
:param context: the execution context of file deleting
:rtype bool
:return True if successfully deleted else False
"""
path = context.physical_path
if isfile(path):
os.remove(path)
return True
else:
self.log.warn("try to remove dir or non-existed file")
return False
def report_health(self):
"""The status of local storage should be always True"""
return {
HEALTH.STATUS: HEALTH_STATUS.OK
}
def __init__(self):
self.base_dir = self.__get_storage_base_dir()
def __ensure_dir(self, file_path):
"""Make sure the directory of target file exists"""
path = dirname(file_path)
if path and not (os.path.exists(path)):
os.makedirs(path)
return path
def __save_file(self, content, path):
"""Dump file to disk
An existing file with the same name will be erased
:type content: file | dict | FileStorage
:param content: the content of file to be saved. Can be a file object or a dict
:type path: str | unicode
:param path: the file path
"""
self.__ensure_dir(path)
with open(path, 'w') as f:
if isinstance(content, dict):
json.dump(content, f)
elif isinstance(content, file):
f.write(content.read())
elif isinstance(content, FileStorage):
content.save(path)
def __get_storage_base_dir(self):
"""Get the base directory of storage"""
return "%s/.." % dirname(realpath(__file__))
def __generate_paths(self, context):
"""Generate file new name ,physical path and uri
:type context: Context
:param context: execution context
:return updated context
"""
hackathon_name = context.hackathon_name if "hackathon_name" in context else None
# replace file_name with new random name
context.file_name = self.__generate_file_name(context.file_name, hackathon_name)
context.physical_path = self.__generate_physical_path(context.file_name, context.file_type)
context.url = self.__generate_url(context.physical_path, context.file_type)
return context
def __generate_url(self, physical_path, file_type):
"""Return the http URI of file
It's for local storage only and the uploaded images must be in dir /static
:type physical_path: str|unicode
:param physical_path: the absolute physical path of the file
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return public accessable URI
"""
# only upladed images need an URI.
# example: http://localhost:15000/static/pic/upload/win10-201456-1234.jpg
if file_type == FILE_TYPE.HACK_IMAGE:
i = physical_path.index("static")
path = physical_path[i:]
return self.util.get_config("endpoint") + "/" + path
return ""
def __generate_physical_path(self, file_name, file_type, hackathon_name=None):
"""Return the physical path of file including directory and file name
:type file_name: str|unicode
:param file_name: the original file name
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return physical path of the file to be saved
"""
if file_type == FILE_TYPE.HACK_IMAGE:
path = "%s/static/pic/upload%s/%s/%s" % (
self.__get_storage_base_dir(),
"/" + hackathon_name if hackathon_name else "",
time.strftime("%Y%m%d"),
file_name)
return abspath(path)
return abspath("%s/resources/lib/%s" % (
self.__get_storage_base_dir(),
file_name))
def __generate_file_name(self, origin_name, hackathon_name=None):
"""Generate a random file name
:type origin_name: str | unicode
:param origin_name the origin name of file
:type hackathon_name: str | unicode
:param hackathon_name: name of hackathon related to this file
:rtype str
:return a random file name which includes hackathon_name and time as parts
"""
if not hackathon_name:
hackathon_name = ""
extension = os.path.splitext(origin_name)[1]
new_name = "%s-%s-%s%s" % (
hackathon_name,
time.strftime("%Y%m%d"),
str(uuid.uuid1())[0:8],
extension
)
return new_name.strip('-')
|
xunxunzgq/open-hackathon-bak_01
|
open-hackathon-server/src/hackathon/storage/local_storage.py
|
Python
|
mit
| 7,616 | 0.003808 |
from layer import *
class TanhLayer(Layer):
def __init__(self, *args, **kwargs):
super(TanhLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == deepnet_pb2.Hyperparams.TANH
def ApplyActivation(self):
cm.tanh(self.state)
def Sample(self):
self.state.sample_bernoulli_tanh(target=self.sample)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
self.deriv.apply_tanh_deriv(self.state)
if self.hyperparams.dropout:
self.deriv.mult(self.mask)
def GetLoss(self, get_deriv=False, **kwargs):
"""Computes loss.
Computes the loss function. Assumes target is in self.data and predictions
are in self.state.
Args:
get_deriv: If True, computes the derivative of the loss function w.r.t the
inputs to this layer and puts the result in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
if self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
self.state.subtract(self.data, target=self.deriv)
error = self.deriv.euclid_norm()**2
perf.error = error
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for tanh units.')
return perf
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
self.means_temp2.subtract(self.means, target=self.means_temp)
self.means_temp2.add(self.means)
self.means_temp2.mult(self.means_temp)
return self.means_temp2
|
7404N/deepnet
|
deepnet/tanh_layer.py
|
Python
|
bsd-3-clause
| 1,606 | 0.010585 |
from mock import patch, call
import mock
from lxml import etree
from kiwi.solver.repository.rpm_md import SolverRepositoryRpmMd
from kiwi.solver.repository.base import SolverRepositoryBase
class TestSolverRepositoryRpmMd:
def setup(self):
self.xml_data = etree.parse('../data/repomd.xml')
self.uri = mock.Mock()
self.solver = SolverRepositoryRpmMd(self.uri)
@patch.object(SolverRepositoryBase, 'download_from_repository')
@patch.object(SolverRepositoryBase, '_create_solvables')
@patch.object(SolverRepositoryBase, '_create_temporary_metadata_dir')
@patch.object(SolverRepositoryBase, '_get_repomd_xml')
def test__setup_repository_metadata(
self, mock_xml, mock_mkdtemp, mock_create_solvables,
mock_download_from_repository
):
mock_mkdtemp.return_value = 'metadata_dir.XX'
mock_xml.return_value = self.xml_data
self.solver._setup_repository_metadata()
assert mock_download_from_repository.call_args_list == [
call(
'repodata/55f95a93-primary.xml.gz',
'metadata_dir.XX/55f95a93-primary.xml.gz'
),
call(
'repodata/0815-other.xml.gz',
'metadata_dir.XX/0815-other.xml.gz'
)
]
assert mock_create_solvables.call_args_list == [
call('metadata_dir.XX', 'rpmmd2solv'),
call('metadata_dir.XX', 'comps2solv')
]
@patch.object(SolverRepositoryBase, '_get_repomd_xml')
def test_timestamp(self, mock_xml):
mock_xml.return_value = self.xml_data
assert self.solver.timestamp() == '1478352191'
|
SUSE/kiwi
|
test/unit/solver/repository/rpm_md_test.py
|
Python
|
gpl-3.0
| 1,668 | 0 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Numdiff(AutotoolsPackage):
"""Numdiff is a little program that can be used to compare putatively
similar files line by line and field by field, ignoring small numeric
differences or/and different numeric formats."""
homepage = 'https://www.nongnu.org/numdiff'
url = 'http://nongnu.askapache.com/numdiff/numdiff-5.8.1.tar.gz'
maintainers = ['davydden']
version('5.9.0', '794461a7285d8b9b1f2c4a8149889ea6')
version('5.8.1', 'a295eb391f6cb1578209fc6b4f9d994e')
variant('nls', default=False,
description="Enable Natural Language Support")
variant('gmp', default=False,
description="Use GNU Multiple Precision Arithmetic Library")
depends_on('gettext', when='+nls')
depends_on('gmp', when='+gmp')
def configure_args(self):
spec = self.spec
args = []
if '+nls' in spec:
args.append('--enable-nls')
else:
args.append('--disable-nls')
if '+gmp' in spec:
# compile with -O0 as per upstream known issue with optimization
# and GMP; https://launchpad.net/ubuntu/+source/numdiff/+changelog
# http://www.nongnu.org/numdiff/#issues
# keep this variant off by default as one still encounter
# GNU MP: Cannot allocate memory (size=2305843009206983184)
args.extend([
'--enable-gmp',
'CFLAGS=-O0'
])
else:
args.append('--disable-gmp')
return args
|
skosukhin/spack
|
var/spack/repos/builtin/packages/numdiff/package.py
|
Python
|
lgpl-2.1
| 2,791 | 0.000717 |
'''
Test the sslheaders plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test sslheaders plugin.
'''
Test.SkipUnless(
Condition.HasCurlFeature('http2'),
)
Test.Disk.File('sslheaders.log').Content = 'sslheaders.gold'
server = Test.MakeOriginServer("server", options={'--load': Test.TestDirectory + '/observer.py'})
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts = Test.MakeATSProcess("ts", select_ports=False)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
# ts.addSSLfile("ssl/signer.pem")
ts.Variables.ssl_port = 4443
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.http.cache.http': 0, # Make sure each request is forwarded to the origin server.
'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name.
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': (
'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'
.format(ts.Variables.port, ts.Variables.ssl_port)),
# 'proxy.config.ssl.client.verify.server': 0,
# 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
# 'proxy.config.url_remap.pristine_host_hdr' : 1,
# 'proxy.config.ssl.client.certification_level': 2,
# 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
# 'proxy.config.ssl.TLSv1_3': 0
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map http://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.ssl_server_name_yaml.AddLines([
'- fqdn: "*bar.com"',
' verify_client: STRICT',
])
ts.Disk.plugin_config.AddLine(
'sslheaders.so SSL-Client-ID=client.subject'
)
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.Processes.Default.Command = (
'curl -H "SSL-Client-ID: My Fake Client ID" --verbose --ipv4 --insecure --header "Host: bar.com"' +
' https://localhost:{}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
|
chitianhao/trafficserver
|
tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py
|
Python
|
apache-2.0
| 3,767 | 0.004247 |
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import cgi
import os
import time
import shutil
from invenio.config import (CFG_ACCESS_CONTROL_LEVEL_SITE,
CFG_SITE_LANG,
CFG_TMPSHAREDDIR,
CFG_SITE_URL,
CFG_SITE_SECURE_URL,
CFG_WEBSUBMIT_STORAGEDIR,
CFG_SITE_RECORD,
CFG_INSPIRE_SITE,
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_ICON_DOCTYPES,
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_ICON_SIZE)
from invenio.bibdocfile_config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES, \
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC, \
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS, \
CFG_BIBDOCFILE_ICON_SUBFORMAT_RE
from invenio import webinterface_handler_config as apache
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.access_control_mailcookie import mail_cookie_create_authorize_action
from invenio.access_control_engine import acc_authorize_action
from invenio.access_control_admin import acc_is_role
from invenio.webpage import page, pageheaderonly, \
pagefooteronly, warning_page, write_warning
from invenio.webuser import getUid, page_not_authorized, collect_user_info, isUserSuperAdmin, \
isGuestUser
from invenio import webjournal_utils
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.urlutils import make_canonical_urlargd, redirect_to_url
from invenio.messages import gettext_set_language
from invenio.search_engine import \
guess_primary_collection_of_a_record, get_colID, record_exists, \
create_navtrail_links, check_user_can_view_record, record_empty, \
is_user_owner_of_record
from invenio.bibdocfile import BibRecDocs, normalize_format, file_strip_ext, \
stream_restricted_icon, BibDoc, InvenioBibDocFileError, \
get_subformat_from_format
from invenio.errorlib import register_exception
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
import invenio.template
bibdocfile_templates = invenio.template.load('bibdocfile')
webstyle_templates = invenio.template.load('webstyle')
websubmit_templates = invenio.template.load('websubmit')
websearch_templates = invenio.template.load('websearch')
from invenio.bibdocfile_managedocfiles import \
create_file_upload_interface, \
get_upload_file_interface_javascript, \
get_upload_file_interface_css, \
move_uploaded_files_to_storage
bibdocfile_templates = invenio.template.load('bibdocfile')
class WebInterfaceFilesPages(WebInterfaceDirectory):
def __init__(self, recid):
self.recid = recid
def _lookup(self, component, path):
# after /<CFG_SITE_RECORD>/<recid>/files/ every part is used as the file
# name
filename = component
def getfile(req, form):
args = wash_urlargd(form, bibdocfile_templates.files_default_urlargd)
ln = args['ln']
_ = gettext_set_language(ln)
uid = getUid(req)
user_info = collect_user_info(req)
verbose = args['verbose']
if verbose >= 1 and not isUserSuperAdmin(user_info):
# Only SuperUser can see all the details!
verbose = 0
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE > 1:
return page_not_authorized(req, "/%s/%s" % (CFG_SITE_RECORD, self.recid),
navmenuid='submit')
if record_exists(self.recid) < 1:
msg = "<p>%s</p>" % _("Requested record does not seem to exist.")
return warning_page(msg, req, ln)
if record_empty(self.recid):
msg = "<p>%s</p>" % _("Requested record does not seem to have been integrated.")
return warning_page(msg, req, ln)
(auth_code, auth_message) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
if webjournal_utils.is_recid_in_released_issue(self.recid):
# We can serve the file
pass
else:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : ln, 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
if webjournal_utils.is_recid_in_released_issue(self.recid):
# We can serve the file
pass
else:
return page_not_authorized(req, "../", \
text = auth_message)
readonly = CFG_ACCESS_CONTROL_LEVEL_SITE == 1
# From now on: either the user provided a specific file
# name (and a possible version), or we return a list of
# all the available files. In no case are the docids
# visible.
try:
bibarchive = BibRecDocs(self.recid)
except InvenioBibDocFileError:
register_exception(req=req, alert_admin=True)
msg = "<p>%s</p><p>%s</p>" % (
_("The system has encountered an error in retrieving the list of files for this document."),
_("The error has been logged and will be taken in consideration as soon as possible."))
return warning_page(msg, req, ln)
if bibarchive.deleted_p():
req.status = apache.HTTP_GONE
return warning_page(_("Requested record does not seem to exist."), req, ln)
docname = ''
docformat = ''
version = ''
warn = ''
if filename:
# We know the complete file name, guess which docid it
# refers to
## TODO: Change the extension system according to ext.py from setlink
## and have a uniform extension mechanism...
docname = file_strip_ext(filename)
docformat = filename[len(docname):]
if docformat and docformat[0] != '.':
docformat = '.' + docformat
if args['subformat']:
docformat += ';%s' % args['subformat']
else:
docname = args['docname']
if not docformat:
docformat = args['format']
if args['subformat']:
docformat += ';%s' % args['subformat']
if not version:
version = args['version']
## Download as attachment
is_download = False
if args['download']:
is_download = True
# version could be either empty, or all or an integer
try:
int(version)
except ValueError:
if version != 'all':
version = ''
display_hidden = isUserSuperAdmin(user_info)
if version != 'all':
# search this filename in the complete list of files
for doc in bibarchive.list_bibdocs():
if docname == bibarchive.get_docname(doc.id):
try:
try:
docfile = doc.get_file(docformat, version)
except InvenioBibDocFileError, msg:
req.status = apache.HTTP_NOT_FOUND
if not CFG_INSPIRE_SITE and req.headers_in.get('referer'):
## There must be a broken link somewhere.
## Maybe it's good to alert the admin
register_exception(req=req, alert_admin=True)
warn += write_warning(_("The format %s does not exist for the given version: %s") % (cgi.escape(docformat), cgi.escape(str(msg))))
break
(auth_code, auth_message) = docfile.is_restricted(user_info)
if auth_code != 0 and not is_user_owner_of_record(user_info, self.recid):
if CFG_BIBDOCFILE_ICON_SUBFORMAT_RE.match(get_subformat_from_format(docformat)):
return stream_restricted_icon(req)
if user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action('viewrestrdoc', {'status' : docfile.get_status()})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : ln, 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
redirect_to_url(req, target)
else:
req.status = apache.HTTP_UNAUTHORIZED
warn += write_warning(_("This file is restricted: ") + str(auth_message))
break
if not docfile.hidden_p():
if not readonly:
ip = str(req.remote_ip)
doc.register_download(ip, docfile.get_version(), docformat, uid)
try:
return docfile.stream(req, download=is_download)
except InvenioBibDocFileError, msg:
register_exception(req=req, alert_admin=True)
req.status = apache.HTTP_INTERNAL_SERVER_ERROR
warn += write_warning(_("An error has happened in trying to stream the request file."))
else:
req.status = apache.HTTP_UNAUTHORIZED
warn += write_warning(_("The requested file is hidden and can not be accessed."))
except InvenioBibDocFileError, msg:
register_exception(req=req, alert_admin=True)
if docname and docformat and not warn:
req.status = apache.HTTP_NOT_FOUND
warn += write_warning(_("Requested file does not seem to exist."))
# filelist = bibarchive.display("", version, ln=ln, verbose=verbose, display_hidden=display_hidden)
filelist = bibdocfile_templates.tmpl_display_bibrecdocs(bibarchive, "", version, ln=ln, verbose=verbose, display_hidden=display_hidden)
t = warn + bibdocfile_templates.tmpl_filelist(
ln=ln,
filelist=filelist)
cc = guess_primary_collection_of_a_record(self.recid)
unordered_tabs = get_detailed_page_tabs(get_colID(cc), self.recid, ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln),
tab_id == 'files',
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(self.recid)
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
args['ln'],
citationnum=tabs_counts['Citations'],
referencenum=tabs_counts['References'],
discussionnum=tabs_counts['Discussions'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
args['ln'])
title, description, keywords = websearch_templates.tmpl_record_page_header_content(req, self.recid, args['ln'])
return pageheaderonly(title=title,
navtrail=create_navtrail_links(cc=cc, aas=0, ln=ln) + \
''' > <a class="navtrail" href="%s/%s/%s">%s</a>
> %s''' % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.recid, title, _("Access to Fulltext")),
description=description,
keywords=keywords,
uid=uid,
language=ln,
req=req,
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(ln) + \
top + t + bottom + \
websearch_templates.tmpl_search_pageend(ln) + \
pagefooteronly(language=ln, req=req)
return getfile, []
def __call__(self, req, form):
"""Called in case of URLs like /CFG_SITE_RECORD/123/files without
trailing slash.
"""
args = wash_urlargd(form, bibdocfile_templates.files_default_urlargd)
ln = args['ln']
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
return redirect_to_url(req, '%s/%s/%s/files/%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, link_ln))
def bibdocfile_legacy_getfile(req, form):
""" Handle legacy /getfile.py URLs """
args = wash_urlargd(form, {
'recid': (int, 0),
'docid': (int, 0),
'version': (str, ''),
'name': (str, ''),
'format': (str, ''),
'ln' : (str, CFG_SITE_LANG)
})
_ = gettext_set_language(args['ln'])
def _getfile_py(req, recid=0, docid=0, version="", name="", docformat="", ln=CFG_SITE_LANG):
if not recid:
## Let's obtain the recid from the docid
if docid:
try:
bibdoc = BibDoc(docid=docid)
recid = bibdoc.bibrec_links[0]["recid"]
except InvenioBibDocFileError:
return warning_page(_("An error has happened in trying to retrieve the requested file."), req, ln)
else:
return warning_page(_('Not enough information to retrieve the document'), req, ln)
else:
brd = BibRecDocs(recid)
if not name and docid:
## Let's obtain the name from the docid
try:
name = brd.get_docname(docid)
except InvenioBibDocFileError:
return warning_page(_("An error has happened in trying to retrieving the requested file."), req, ln)
docformat = normalize_format(docformat)
redirect_to_url(req, '%s/%s/%s/files/%s%s?ln=%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recid, name, docformat, ln, version and 'version=%s' % version or ''), apache.HTTP_MOVED_PERMANENTLY)
return _getfile_py(req, **args)
# --------------------------------------------------
class WebInterfaceManageDocFilesPages(WebInterfaceDirectory):
_exports = ['', 'managedocfiles', 'managedocfilesasync']
def managedocfiles(self, req, form):
"""
Display admin interface to manage files of a record
"""
argd = wash_urlargd(form, {
'ln': (str, ''),
'access': (str, ''),
'recid': (int, None),
'do': (int, 0),
'cancel': (str, None),
})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
user_info = collect_user_info(req)
# Check authorization
(auth_code, auth_msg) = acc_authorize_action(req,
'runbibdocfile')
if auth_code and user_info['email'] == 'guest':
# Ask to login
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'ln' : argd['ln'],
'referer' : CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif auth_code:
return page_not_authorized(req, referer="/%s/managedocfiles" % CFG_SITE_RECORD,
uid=uid, text=auth_msg,
ln=argd['ln'],
navmenuid="admin")
# Prepare navtrail
navtrail = '''<a class="navtrail" href="%(CFG_SITE_URL)s/help/admin">Admin Area</a> > %(manage_files)s''' \
% {'CFG_SITE_URL': CFG_SITE_URL,
'manage_files': _("Manage Document Files")}
body = ''
if argd['do'] != 0 and not argd['cancel']:
# Apply modifications
working_dir = os.path.join(CFG_TMPSHAREDDIR,
'websubmit_upload_interface_config_' + str(uid),
argd['access'])
if not os.path.isdir(working_dir):
# We accessed the url without preliminary steps
# (we did not upload a file)
# Our working dir does not exist
# Display the file manager
argd['do'] = 0
else:
move_uploaded_files_to_storage(working_dir=working_dir,
recid=argd['recid'],
icon_sizes=CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_ICON_SIZE,
create_icon_doctypes=CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_ICON_DOCTYPES,
force_file_revision=False)
# Clean temporary directory
shutil.rmtree(working_dir)
# Confirm modifications
body += '<p style="color:#0f0">%s</p>' % \
(_('Your modifications to record #%i have been submitted') % argd['recid'])
elif argd['cancel']:
# Clean temporary directory
working_dir = os.path.join(CFG_TMPSHAREDDIR,
'websubmit_upload_interface_config_' + str(uid),
argd['access'])
shutil.rmtree(working_dir)
body += '<p style="color:#c00">%s</p>' % \
(_('Your modifications to record #%i have been cancelled') % argd['recid'])
if not argd['recid'] or argd['do'] != 0:
body += '''
<form method="post" action="%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/managedocfiles">
<label for="recid">%(edit_record)s:</label>
<input type="text" name="recid" id="recid" />
<input type="submit" value="%(edit)s" class="adminbutton" />
</form>
''' % {'edit': _('Edit'),
'edit_record': _('Edit record'),
'CFG_SITE_URL': CFG_SITE_URL,
'CFG_SITE_RECORD': CFG_SITE_RECORD}
access = time.strftime('%Y%m%d_%H%M%S')
if argd['recid'] and argd['do'] == 0:
# Displaying interface to manage files
# Prepare navtrail
title, dummy_description, dummy_keywords = websearch_templates.tmpl_record_page_header_content(req, argd['recid'],
argd['ln'])
navtrail = '''<a class="navtrail" href="%(CFG_SITE_URL)s/help/admin">Admin Area</a> >
<a class="navtrail" href="%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/managedocfiles">%(manage_files)s</a> >
%(record)s: %(title)s
''' \
% {'CFG_SITE_URL': CFG_SITE_URL,
'title': title,
'manage_files': _("Document File Manager"),
'record': _("Record #%i") % argd['recid'],
'CFG_SITE_RECORD': CFG_SITE_RECORD}
body += create_file_upload_interface(\
recid=argd['recid'],
ln=argd['ln'],
uid=uid,
sbm_access=access,
display_hidden_files=True,
restrictions_and_desc=CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS,
doctypes_and_desc=CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES,
**CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC)[1]
body += '''<br />
<form method="post" action="%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/managedocfiles">
<input type="hidden" name="recid" value="%(recid)s" />
<input type="hidden" name="do" value="1" />
<input type="hidden" name="access" value="%(access)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<div style="font-size:small">
<input type="submit" name="cancel" value="%(cancel_changes)s" />
<input type="submit" onclick="user_must_confirm_before_leaving_page=false;return true;" class="adminbutton" name="submit" id="applyChanges" value="%(apply_changes)s" />
</div></form>''' % \
{'apply_changes': _("Apply changes"),
'cancel_changes': _("Cancel all changes"),
'recid': argd['recid'],
'access': access,
'ln': argd['ln'],
'CFG_SITE_URL': CFG_SITE_URL,
'CFG_SITE_RECORD': CFG_SITE_RECORD}
body += websubmit_templates.tmpl_page_do_not_leave_submission_js(argd['ln'], enabled=True)
return page(title = _("Document File Manager") + (argd['recid'] and (': ' + _("Record #%i") % argd['recid']) or ''),
navtrail=navtrail,
navtrail_append_title_p=0,
metaheaderadd = get_upload_file_interface_javascript(form_url_params='?access='+access) + \
get_upload_file_interface_css(),
body = body,
uid = uid,
language=argd['ln'],
req=req,
navmenuid='admin')
def managedocfilesasync(self, req, form):
"Upload file and returns upload interface"
argd = wash_urlargd(form, {
'ln': (str, ''),
'recid': (int, 1),
'doctype': (str, ''),
'access': (str, ''),
'indir': (str, ''),
})
user_info = collect_user_info(req)
include_headers = False
# User submitted either through WebSubmit, or admin interface.
if form.has_key('doctype') and form.has_key('indir') \
and form.has_key('access'):
# Submitted through WebSubmit. Check rights
include_headers = True
working_dir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR,
argd['indir'], argd['doctype'],
argd['access'])
try:
assert(working_dir == os.path.abspath(working_dir))
except AssertionError:
raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED)
try:
# Retrieve recid from working_dir, safer.
recid_fd = file(os.path.join(working_dir, 'SN'))
recid = int(recid_fd.read())
recid_fd.close()
except:
recid = ""
try:
act_fd = file(os.path.join(working_dir, 'act'))
action = act_fd.read()
act_fd.close()
except:
action = ""
# Is user authorized to perform this action?
auth_code = acc_authorize_action(user_info,
"submit",
authorized_if_no_roles=not isGuestUser(getUid(req)),
doctype=argd['doctype'],
act=action)[0]
if auth_code and not acc_is_role("submit", doctype=argd['doctype'], act=action):
# There is NO authorization plugged. User should have access
auth_code = 0
else:
# User must be allowed to attach files
auth_code = acc_authorize_action(user_info, 'runbibdocfile')[0]
recid = argd['recid']
if auth_code:
raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED)
return create_file_upload_interface(recid=recid,
ln=argd['ln'],
print_outside_form_tag=False,
print_envelope=False,
form=form,
include_headers=include_headers,
sbm_indir=argd['indir'],
sbm_access=argd['access'],
sbm_doctype=argd['doctype'],
uid=user_info['uid'])[1]
__call__ = managedocfiles
|
kaplun/ops
|
modules/bibdocfile/lib/bibdocfile_webinterface.py
|
Python
|
gpl-2.0
| 26,790 | 0.005524 |
import urlparse
import os, sys
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_utils_parsing import *
from blockchain_utils import *
from msc_apps import *
import random
def send_form_response(response_dict):
expected_fields=['from_address', 'to_address', 'amount', 'currency', 'fee']
# if marker is True, send dust to marker (for payments of sells)
for field in expected_fields:
if not response_dict.has_key(field):
info('No field '+field+' in response dict '+str(response_dict))
return (None, 'No field '+field+' in response dict '+str(response_dict))
if len(response_dict[field]) != 1:
info('Multiple values for field '+field)
return (None, 'Multiple values for field '+field)
if response_dict.has_key( 'pubKey' ) and is_pubkey_valid( response_dict['pubKey'][0]):
pubkey = response_dict['pubKey'][0]
response_status='OK'
else:
response_status='invalid pubkey'
pubkey=None
from_addr=response_dict['from_address'][0]
if not is_valid_bitcoin_address_or_pubkey(from_addr):
return (None, 'From address is neither bitcoin address nor pubkey')
to_addr=response_dict['to_address'][0]
if not is_valid_bitcoin_address(to_addr):
return (None, 'To address is not a bitcoin address')
amount=response_dict['amount'][0]
if float(amount)<0 or float( from_satoshi(amount))>max_currency_value:
return (None, 'Invalid amount: ' + str( from_satoshi( amount )) + ', max: ' + str( max_currency_value ))
btc_fee=response_dict['fee'][0]
if float(btc_fee)<0 or float( from_satoshi(btc_fee))>max_currency_value:
return (None, 'Invalid fee: ' + str( from_satoshi( amount )) + ', max: ' + str( max_currency_value ))
currency=response_dict['currency'][0]
if currency=='OMNI':
currency_id=1
else:
if currency=='T-OMNI':
currency_id=2
else:
if currency=='BTC':
currency_id=0
else:
if currency[:2] == 'SP':
currency_id=int(currency[2:])
else:
return (None, 'Invalid currency')
marker_addr=None
try:
marker=response_dict['marker'][0]
if marker.lower()=='true':
marker_addr=exodus_address
except KeyError:
# if no marker, marker_addr stays None
pass
if pubkey == None:
tx_to_sign_dict={'transaction':'','sourceScript':''}
l=len(from_addr)
if l == 66 or l == 130: # probably pubkey
if is_pubkey_valid(from_addr):
pubkey=from_addr
response_status='OK'
else:
response_status='invalid pubkey'
else:
if not is_valid_bitcoin_address(from_addr):
response_status='invalid address'
else:
from_pubkey=bc_getpubkey(from_addr)
if not is_pubkey_valid(from_pubkey):
response_status='missing pubkey'
else:
pubkey=from_pubkey
response_status='OK'
try:
if pubkey != None:
tx_to_sign_dict=prepare_send_tx_for_signing( pubkey, to_addr, marker_addr, currency_id, amount, btc_fee)
else:
# hack to show error on page
tx_to_sign_dict['sourceScript']=response_status
response='{"status":"'+response_status+'", "transaction":"'+tx_to_sign_dict['transaction']+'", "sourceScript":"'+tx_to_sign_dict['sourceScript']+'"}'
print "Sending unsigned tx to user for signing", response
return (response, None)
except Exception as e:
print "error creating unsigned tx", e
return (None, str(e))
# simple send and bitcoin send (with or without marker)
def prepare_send_tx_for_signing(from_address, to_address, marker_address, currency_id, amount, btc_fee=500000):
print '*** send tx for signing, amount: ' + amount
print ' btc_fee: ' + btc_fee
# consider a more general func that covers also sell offer and sell accept
# check if address or pubkey was given as from address
if from_address.startswith('0'): # a pubkey was given
from_address_pub=from_address
from_address=get_addr_from_key(from_address)
else: # address was given
from_address_pub=addrPub=bc_getpubkey(from_address)
from_address_pub=from_address_pub.strip()
# set change address to from address
change_address_pub=from_address_pub
changeAddress=from_address
satoshi_amount=int( amount )
fee=int( btc_fee )
# differ bitcoin send and other currencies
if currency_id == 0: # bitcoin
# normal bitcoin send
required_value=satoshi_amount
# if marker is needed, allocate dust for the marker
if marker_address != None:
required_value+=1*dust_limit
else:
tx_type=0 # only simple send is supported
required_value=4*dust_limit
#------------------------------------------- New utxo calls
fee_total_satoshi=required_value+fee
dirty_txes = bc_getutxo( from_address, fee_total_satoshi )
if (dirty_txes['error'][:3]=='Con'):
raise Exception({ "status": "NOT OK", "error": "Couldn't get list of unspent tx's. Response Code: " + dirty_txes['code'] })
if (dirty_txes['error'][:3]=='Low'):
raise Exception({ "status": "NOT OK", "error": "Not enough funds, try again. Needed: " + str(fee_total_satoshi) + " but Have: " + dirty_txes['avail'] })
inputs_total_value = dirty_txes['avail']
inputs = dirty_txes['utxos']
#------------------------------------------- Old utxo calls
# get utxo required for the tx
#utxo_all=get_utxo(from_address, required_value+fee)
#utxo_split=utxo_all.split()
#inputs_number=len(utxo_split)/12
#inputs=[]
#inputs_total_value=0
#if inputs_number < 1:
# info('Error not enough BTC to generate tx - no inputs')
# raise Exception('This address must have enough BTC for protocol transaction fees and miner fees')
#for i in range(inputs_number):
# inputs.append(utxo_split[i*12+3])
# try:
# inputs_total_value += int(utxo_split[i*12+7])
# except ValueError:
# info('Error parsing utxo, '+ str(utxo_split) )
# raise Exception('Error: parsing inputs was invalid, do you have enough BTC?')
#inputs_outputs='/dev/stdout'
#for i in inputs:
# inputs_outputs+=' -i '+i
#---------------------------------------------- End Old utxo calls
inputs_outputs='/dev/stdout'
for i in inputs:
inputs_outputs+=' -i '+str(i[0])+':'+str(i[1])
# calculate change
change_value=inputs_total_value-required_value-fee
if change_value < 0:
info('Error not enough BTC to generate tx - negative change')
raise Exception('This address must have enough BTC for miner fees and protocol transaction fees')
if currency_id == 0: # bitcoin
# create a normal bitcoin transaction (not mastercoin)
# dust to marker if required
# amount to to_address
# change to change
if marker_address != None:
inputs_outputs+=' -o '+marker_address+':'+str(dust_limit)
inputs_outputs+=' -o '+to_address+':'+str(satoshi_amount)
else:
# create multisig tx
# simple send - multisig
# dust to exodus
# dust to to_address
# double dust to rawscript "1 [ change_address_pub ] [ dataHex_obfuscated ] 2 checkmultisig"
# change to change
dataSequenceNum=1
dataHex = '{:02x}'.format(0) + '{:02x}'.format(dataSequenceNum) + \
'{:08x}'.format(tx_type) + '{:08x}'.format(currency_id) + \
'{:016x}'.format(satoshi_amount) + '{:06x}'.format(0)
dataBytes = dataHex.decode('hex_codec')
dataAddress = hash_160_to_bc_address(dataBytes[1:21])
# create the BIP11 magic
change_address_compressed_pub=get_compressed_pubkey_format( change_address_pub )
obfus_str=get_sha256(from_address)[:62]
padded_dataHex=dataHex[2:]+''.zfill(len(change_address_compressed_pub)-len(dataHex))[2:]
dataHex_obfuscated=get_string_xor(padded_dataHex,obfus_str).zfill(62)
random_byte=hex(random.randrange(0,255)).strip('0x').zfill(2)
hacked_dataHex_obfuscated='02'+dataHex_obfuscated+random_byte
info('plain dataHex: --'+padded_dataHex+'--')
info('obfus dataHex: '+hacked_dataHex_obfuscated)
valid_dataHex_obfuscated=get_nearby_valid_pubkey(hacked_dataHex_obfuscated)
info('valid dataHex: '+valid_dataHex_obfuscated)
script_str='1 [ '+change_address_pub+' ] [ '+valid_dataHex_obfuscated+' ] 2 checkmultisig'
info('change address is '+changeAddress)
info('too_address is '+to_address)
info('total inputs value is '+str(inputs_total_value))
info('fee is '+str(fee))
info('dust limit is '+str(dust_limit))
info('BIP11 script is '+script_str)
dataScript=rawscript(script_str)
inputs_outputs+=' -o '+exodus_address+':'+str(dust_limit) + \
' -o '+to_address+':'+str(dust_limit) + \
' -o '+dataScript+':'+str(2*dust_limit)
if change_value >= dust_limit:
inputs_outputs+=' -o '+changeAddress+':'+str(change_value)
else:
# under dust limit leave all remaining as fees
pass
tx=mktx(inputs_outputs)
info('inputs_outputs are '+inputs_outputs)
info('parsed tx is '+str(get_json_tx(tx)))
hash160=bc_address_to_hash_160(from_address).encode('hex_codec')
prevout_script='OP_DUP OP_HASH160 ' + hash160 + ' OP_EQUALVERIFY OP_CHECKSIG'
# tx, inputs
return_dict={'transaction':tx, 'sourceScript':prevout_script}
return return_dict
def send_handler(environ, start_response):
return general_handler(environ, start_response, send_form_response)
|
Nevtep/omniwallet
|
api/send.py
|
Python
|
agpl-3.0
| 10,105 | 0.017912 |
from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
from PyQt5.Qt import QSystemTrayIcon, QIcon
class TrayIcon(QSystemTrayIcon):
ActivationReason = ['Unknown', 'Context', 'DoubleClick', 'Trigger', 'MiddleClick']
onactivate = pyqtSignal(int, str)
onmessageclick = pyqtSignal()
def __init__(self, parent, toolTip = '', icon = ''):
super(TrayIcon, self).__init__(parent)
self.setObjectName('trayIcon')
self.setIcon(icon)
self.setToolTip(toolTip)
self.activated.connect(self.activateHandler)
self.messageClicked.connect(self.onmessageclick)
# Slots
# 设置工具提示
@pyqtSlot(str)
def setToolTip(self, toolTip):
super(TrayIcon, self).setToolTip(toolTip)
# 设置图标
@pyqtSlot(str)
def setIcon(self, icon):
if icon:
icon = QIcon(icon)
else:
icon = self.parent().windowIcon()
super(TrayIcon, self).setIcon(QIcon(icon))
# 设置右键菜单
@pyqtSlot(QObject)
def setContextMenu(self, menu):
super(TrayIcon, self).setContextMenu(menu)
# 获取是否可见
@pyqtSlot(result = bool)
def isVisible(self):
return super(TrayIcon, self).isVisible()
# 获取是否支持消息弹泡
@pyqtSlot(result = bool)
def supportsMessages(self):
return super(TrayIcon, self).supportsMessages()
# 获取是否支持系统托盘图标
@pyqtSlot(result = bool)
def isSystemTrayAvailable(self):
return super(TrayIcon, self).isSystemTrayAvailable()
# 显示托盘消息
# showMessage
# 设置可见性
# setVisible
# 显示
# show
# 隐藏
# hide
# Sinals
def activateHandler(self, reason):
self.onactivate.emit(reason, TrayIcon.ActivationReason[reason])
|
xiruibing/hae
|
src/trayicon.py
|
Python
|
mit
| 1,695 | 0.040583 |
"""
Extended docstrings for functions.py
"""
pi = r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi` (the last digit is actually
a 4 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362465'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`~mpmath.sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
1.22464679914735e-16
One solution is to use the :func:`~mpmath.sinpi` function instead::
>>> sinpi(1)
0.0
See the documentation of trigonometric functions for additional
details.
"""
degree = r"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> sin(30 * degree)
0.5
>>> asin(0.5) / degree
30.0
"""
e = r"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`~mpmath.ln`) and of the exponential function
(:func:`~mpmath.exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e` (the last digit is actually
a 5 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427166'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
phi = r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
euler = r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`~mpmath.harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma` (the last digit is actually
a 5 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858166'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> -diff(gamma, 1)
0.5772156649015328606065121
>>> limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
"""
catalan = r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K` (the last digit is actually
a 3 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871504'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
0.91596559417721901505460351493238411077414937428167
"""
khinchin = r"""
Khinchin's constant `K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*int(n)))
>>> exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
glaisher = r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`~mpmath.zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +glaisher
1.282427129100622636875342568869791727767688927325
We can verify that the value computed by :data:`glaisher` is
correct using mpmath's facilities for numerical
differentiation and arbitrary evaluation of the zeta function:
>>> exp(mpf(1)/12 - diff(zeta, -1))
1.282427129100622636875342568869791727767688927325
Here is an example of an integral that can be evaluated in
terms of Glaisher's constant:
>>> mp.dps = 15
>>> quad(lambda x: log(gamma(x)), [1, 1.5])
-0.0428537406502909
>>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
-0.042853740650291
Mpmath computes Glaisher's constant by applying Euler-Maclaurin
summation to a slowly convergent series. The implementation is
reasonably efficient up to about 10,000 digits. See the source
code for additional details.
References:
http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
"""
apery = r"""
Represents Apery's constant, which is the irrational number
approximately equal to 1.2020569 given by
.. math ::
\zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
The calculation is based on an efficient hypergeometric
series. To 50 decimal places, the value is given by::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +apery
1.2020569031595942853997381615114499907649862923405
Other ways to evaluate Apery's constant using mpmath
include::
>>> zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> -psi(2,1)/2
1.2020569031595942853997381615114499907649862923405
>>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
1.2020569031595942853997381615114499907649862923405
>>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
>>> 7*pi**3/180 - nsum(f, [1,inf])
1.2020569031595942853997381615114499907649862923405
This shows digits 9991-10000 of Apery's constant::
>>> mp.dps = 10000
>>> str(apery)[-10:]
'3189504235'
"""
mertens = r"""
Represents the Mertens or Meissel-Mertens constant, which is the
prime number analog of Euler's constant:
.. math ::
B_1 = \lim_{N\to\infty}
\left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right)
Here `p_k` denotes the `k`-th prime number. Other names for this
constant include the Hadamard-de la Vallee-Poussin constant or
the prime reciprocal constant.
The following gives the Mertens constant to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +mertens
0.2614972128476427837554268386086958590515666482612
References:
http://mathworld.wolfram.com/MertensConstant.html
"""
twinprime = r"""
Represents the twin prime constant, which is the factor `C_2`
featuring in the Hardy-Littlewood conjecture for the growth of the
twin prime counting function,
.. math ::
\pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}.
It is given by the product over primes
.. math ::
C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016
Computing `C_2` to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +twinprime
0.66016181584686957392781211001455577843262336028473
References:
http://mathworld.wolfram.com/TwinPrimesConstant.html
"""
ln = r"""
Computes the natural logarithm of `x`, `\ln x`.
See :func:`~mpmath.log` for additional documentation."""
sqrt = r"""
``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`.
For positive real numbers, the principal root is simply the
positive square root. For arbitrary complex numbers, the principal
square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
The function thus has a branch cut along the negative half real axis.
For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
performing ``x**0.5``.
**Examples**
Basic examples and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sqrt(10)
3.16227766016838
>>> sqrt(100)
10.0
>>> sqrt(-4)
(0.0 + 2.0j)
>>> sqrt(1+1j)
(1.09868411346781 + 0.455089860562227j)
>>> sqrt(inf)
+inf
Square root evaluation is fast at huge precision::
>>> mp.dps = 50000
>>> a = sqrt(3)
>>> str(a)[-10:]
'9329332815'
:func:`mpmath.iv.sqrt` supports interval arguments::
>>> iv.dps = 15; iv.pretty = True
>>> iv.sqrt([16,100])
[4.0, 10.0]
>>> iv.sqrt(2)
[1.4142135623730949234, 1.4142135623730951455]
>>> iv.sqrt(2) ** 2
[1.9999999999999995559, 2.0000000000000004441]
"""
cbrt = r"""
``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This
function is faster and more accurate than raising to a floating-point
fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 125**(mpf(1)/3)
mpf('4.9999999999999991')
>>> cbrt(125)
mpf('5.0')
Every nonzero complex number has three cube roots. This function
returns the cube root defined by `\exp(\log(x)/3)` where the
principal branch of the natural logarithm is used. Note that this
does not give a real cube root for negative real numbers::
>>> mp.pretty = True
>>> cbrt(-1)
(0.5 + 0.866025403784439j)
"""
exp = r"""
Computes the exponential function,
.. math ::
\exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}.
For complex numbers, the exponential function also satisfies
.. math ::
\exp(x+yi) = e^x (\cos y + i \sin y).
**Basic examples**
Some values of the exponential function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> exp(0)
1.0
>>> exp(1)
2.718281828459045235360287
>>> exp(-1)
0.3678794411714423215955238
>>> exp(inf)
+inf
>>> exp(-inf)
0.0
Arguments can be arbitrarily large::
>>> exp(10000)
8.806818225662921587261496e+4342
>>> exp(-10000)
1.135483865314736098540939e-4343
Evaluation is supported for interval arguments via
:func:`mpmath.iv.exp`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.exp([-inf,0])
[0.0, 1.0]
>>> iv.exp([0,1])
[1.0, 2.71828182845904523536028749558]
The exponential function can be evaluated efficiently to arbitrary
precision::
>>> mp.dps = 10000
>>> exp(pi) #doctest: +ELLIPSIS
23.140692632779269005729...8984304016040616
**Functional properties**
Numerical verification of Euler's identity for the complex
exponential function::
>>> mp.dps = 15
>>> exp(j*pi)+1
(0.0 + 1.22464679914735e-16j)
>>> chop(exp(j*pi)+1)
0.0
This recovers the coefficients (reciprocal factorials) in the
Maclaurin series expansion of exp::
>>> nprint(taylor(exp, 0, 5))
[1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333]
The exponential function is its own derivative and antiderivative::
>>> exp(pi)
23.1406926327793
>>> diff(exp, pi)
23.1406926327793
>>> quad(exp, [-inf, pi])
23.1406926327793
The exponential function can be evaluated using various methods,
including direct summation of the series, limits, and solving
the defining differential equation::
>>> nsum(lambda k: pi**k/fac(k), [0,inf])
23.1406926327793
>>> limit(lambda k: (1+pi/k)**k, inf)
23.1406926327793
>>> odefun(lambda t, x: x, 0, 1)(pi)
23.1406926327793
"""
cosh = r"""
Computes the hyperbolic cosine of `x`,
`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cosh(0)
1.0
>>> cosh(1)
1.543080634815243778477906
>>> cosh(-inf), cosh(+inf)
(+inf, +inf)
The hyperbolic cosine is an even, convex function with
a global minimum at `x = 0`, having a Maclaurin series
that starts::
>>> nprint(chop(taylor(cosh, 0, 5)))
[1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0]
Generalized to complex numbers, the hyperbolic cosine is
equivalent to a cosine with the argument rotated
in the imaginary direction, or `\cosh x = \cos ix`::
>>> cosh(2+3j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
>>> cos(3-2j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
"""
sinh = r"""
Computes the hyperbolic sine of `x`,
`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sinh(0)
0.0
>>> sinh(1)
1.175201193643801456882382
>>> sinh(-inf), sinh(+inf)
(-inf, +inf)
The hyperbolic sine is an odd function, with a Maclaurin
series that starts::
>>> nprint(chop(taylor(sinh, 0, 5)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333]
Generalized to complex numbers, the hyperbolic sine is
essentially a sine with a rotation `i` applied to
the argument; more precisely, `\sinh x = -i \sin ix`::
>>> sinh(2+3j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
>>> j*sin(3-2j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
"""
tanh = r"""
Computes the hyperbolic tangent of `x`,
`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tanh(0)
0.0
>>> tanh(1)
0.7615941559557648881194583
>>> tanh(-inf), tanh(inf)
(-1.0, 1.0)
The hyperbolic tangent is an odd, sigmoidal function, similar
to the inverse tangent and error function. Its Maclaurin
series is::
>>> nprint(chop(taylor(tanh, 0, 5)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333]
Generalized to complex numbers, the hyperbolic tangent is
essentially a tangent with a rotation `i` applied to
the argument; more precisely, `\tanh x = -i \tan ix`::
>>> tanh(2+3j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
>>> j*tan(3-2j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
"""
cos = r"""
Computes the cosine of `x`, `\cos(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cos(pi/3)
0.5
>>> cos(100000001)
-0.9802850113244713353133243
>>> cos(2+3j)
(-4.189625690968807230132555 - 9.109227893755336597979197j)
>>> cos(inf)
nan
>>> nprint(chop(taylor(cos, 0, 6)))
[1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889]
Intervals are supported via :func:`mpmath.iv.cos`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cos([0,1])
[0.540302305868139717400936602301, 1.0]
>>> iv.cos([0,2])
[-0.41614683654714238699756823214, 1.0]
"""
sin = r"""
Computes the sine of `x`, `\sin(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sin(pi/3)
0.8660254037844386467637232
>>> sin(100000001)
0.1975887055794968911438743
>>> sin(2+3j)
(9.1544991469114295734673 - 4.168906959966564350754813j)
>>> sin(inf)
nan
>>> nprint(chop(taylor(sin, 0, 6)))
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0]
Intervals are supported via :func:`mpmath.iv.sin`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sin([0,1])
[0.0, 0.841470984807896506652502331201]
>>> iv.sin([0,2])
[0.0, 1.0]
"""
tan = r"""
Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`.
The tangent function is singular at `x = (n+1/2)\pi`, but
``tan(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tan(pi/3)
1.732050807568877293527446
>>> tan(100000001)
-0.2015625081449864533091058
>>> tan(2+3j)
(-0.003764025641504248292751221 + 1.003238627353609801446359j)
>>> tan(inf)
nan
>>> nprint(chop(taylor(tan, 0, 6)))
[0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0]
Intervals are supported via :func:`mpmath.iv.tan`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.tan([0,1])
[0.0, 1.55740772465490223050697482944]
>>> iv.tan([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
sec = r"""
Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`.
The secant function is singular at `x = (n+1/2)\pi`, but
``sec(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sec(pi/3)
2.0
>>> sec(10000001)
-1.184723164360392819100265
>>> sec(2+3j)
(-0.04167496441114427004834991 + 0.0906111371962375965296612j)
>>> sec(inf)
nan
>>> nprint(chop(taylor(sec, 0, 6)))
[1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222]
Intervals are supported via :func:`mpmath.iv.sec`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sec([0,1])
[1.0, 1.85081571768092561791175326276]
>>> iv.sec([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
csc = r"""
Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`.
This cosecant function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``csc(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> csc(pi/3)
1.154700538379251529018298
>>> csc(10000001)
-1.864910497503629858938891
>>> csc(2+3j)
(0.09047320975320743980579048 + 0.04120098628857412646300981j)
>>> csc(inf)
nan
Intervals are supported via :func:`mpmath.iv.csc`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.csc([0,1]) # Interval includes a singularity
[1.18839510577812121626159943988, +inf]
>>> iv.csc([0,2])
[1.0, +inf]
"""
cot = r"""
Computes the cotangent of `x`,
`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`.
This cotangent function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``cot(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cot(pi/3)
0.5773502691896257645091488
>>> cot(10000001)
1.574131876209625656003562
>>> cot(2+3j)
(-0.003739710376336956660117409 - 0.9967577965693583104609688j)
>>> cot(inf)
nan
Intervals are supported via :func:`mpmath.iv.cot`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cot([0,1]) # Interval includes a singularity
[0.642092615934330703006419974862, +inf]
>>> iv.cot([1,2])
[-inf, +inf]
"""
acos = r"""
Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`.
Since `-1 \le \cos(x) \le 1` for real `x`, the inverse
cosine is real-valued only for `-1 \le x \le 1`. On this interval,
:func:`~mpmath.acos` is defined to be a monotonically decreasing
function assuming values between `+\pi` and `0`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> acos(-1)
3.141592653589793238462643
>>> acos(0)
1.570796326794896619231322
>>> acos(1)
0.0
>>> nprint(chop(taylor(acos, 0, 6)))
[1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0]
:func:`~mpmath.acos` is defined so as to be a proper inverse function of
`\cos(\theta)` for `0 \le \theta < \pi`.
We have `\cos(\cos^{-1}(x)) = x` for all `x`, but
`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`::
>>> for x in [1, 10, -1, 2+3j, 10+3j]:
... print("%s %s" % (cos(acos(x)), acos(cos(x))))
...
1.0 1.0
(10.0 + 0.0j) 2.566370614359172953850574
-1.0 1.0
(2.0 + 3.0j) (2.0 + 3.0j)
(10.0 + 3.0j) (2.566370614359172953850574 - 3.0j)
The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
asin = r"""
Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`.
Since `-1 \le \sin(x) \le 1` for real `x`, the inverse
sine is real-valued only for `-1 \le x \le 1`.
On this interval, it is defined to be a monotonically increasing
function assuming values between `-\pi/2` and `\pi/2`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> asin(-1)
-1.570796326794896619231322
>>> asin(0)
0.0
>>> asin(1)
1.570796326794896619231322
>>> nprint(chop(taylor(asin, 0, 6)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0]
:func:`~mpmath.asin` is defined so as to be a proper inverse function of
`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\sin(\sin^{-1}(x)) = x` for all `x`, but
`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (chop(sin(asin(x))), asin(sin(x))))
...
1.0 1.0
10.0 -0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.0 + 3.0j)
(-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j)
The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
atan = r"""
Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`.
This is a real-valued function for all real `x`, with range
`(-\pi/2, \pi/2)`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> atan(-inf)
-1.570796326794896619231322
>>> atan(-1)
-0.7853981633974483096156609
>>> atan(0)
0.0
>>> atan(1)
0.7853981633974483096156609
>>> atan(inf)
1.570796326794896619231322
>>> nprint(chop(taylor(atan, 0, 6)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0]
The inverse tangent is often used to compute angles. However,
the atan2 function is often better for this as it preserves sign
(see :func:`~mpmath.atan2`).
:func:`~mpmath.atan` is defined so as to be a proper inverse function of
`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\tan(\tan^{-1}(x)) = x` for all `x`, but
`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> mp.dps = 25
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (tan(atan(x)), atan(tan(x))))
...
1.0 1.0
10.0 0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.000000000000000000000001 + 3.0j)
(-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j)
The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan`
places the branch cuts along the line segments `(-i \infty, -i)` and
`(+i, +i \infty)`. In general,
.. math ::
\tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right)
where the principal-branch log is implied.
"""
acot = r"""Computes the inverse cotangent of `x`,
`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`."""
asec = r"""Computes the inverse secant of `x`,
`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`."""
acsc = r"""Computes the inverse cosecant of `x`,
`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`."""
coth = r"""Computes the hyperbolic cotangent of `x`,
`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`.
"""
sech = r"""Computes the hyperbolic secant of `x`,
`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`.
"""
csch = r"""Computes the hyperbolic cosecant of `x`,
`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`.
"""
acosh = r"""Computes the inverse hyperbolic cosine of `x`,
`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`.
"""
asinh = r"""Computes the inverse hyperbolic sine of `x`,
`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`.
"""
atanh = r"""Computes the inverse hyperbolic tangent of `x`,
`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`.
"""
acoth = r"""Computes the inverse hyperbolic cotangent of `x`,
`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`."""
asech = r"""Computes the inverse hyperbolic secant of `x`,
`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`."""
acsch = r"""Computes the inverse hyperbolic cosecant of `x`,
`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`."""
sinpi = r"""
Computes `\sin(\pi x)`, more accurately than the expression
``sin(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinpi(10**10), sin(pi*(10**10))
(0.0, -2.23936276195592e-6)
>>> sinpi(10**10+0.5), sin(pi*(10**10+0.5))
(1.0, 0.999999999998721)
"""
cospi = r"""
Computes `\cos(\pi x)`, more accurately than the expression
``cos(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> cospi(10**10), cos(pi*(10**10))
(1.0, 0.999999999997493)
>>> cospi(10**10+0.5), cos(pi*(10**10+0.5))
(0.0, 1.59960492420134e-6)
"""
sinc = r"""
``sinc(x)`` computes the unnormalized sinc function, defined as
.. math ::
\mathrm{sinc}(x) = \begin{cases}
\sin(x)/x, & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
See :func:`~mpmath.sincpi` for the normalized sinc function.
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinc(0)
1.0
>>> sinc(1)
0.841470984807897
>>> sinc(inf)
0.0
The integral of the sinc function is the sine integral Si::
>>> quad(sinc, [0, 1])
0.946083070367183
>>> si(1)
0.946083070367183
"""
sincpi = r"""
``sincpi(x)`` computes the normalized sinc function, defined as
.. math ::
\mathrm{sinc}_{\pi}(x) = \begin{cases}
\sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
Equivalently, we have
`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`.
The normalization entails that the function integrates
to unity over the entire real line::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quadosc(sincpi, [-inf, inf], period=2.0)
1.0
Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately
at its roots::
>>> sincpi(10)
0.0
"""
expj = r"""
Convenience function for computing `e^{ix}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expj(0)
(1.0 + 0.0j)
>>> expj(-1)
(0.5403023058681397174009366 - 0.8414709848078965066525023j)
>>> expj(j)
(0.3678794411714423215955238 + 0.0j)
>>> expj(1+j)
(0.1987661103464129406288032 + 0.3095598756531121984439128j)
"""
expjpi = r"""
Convenience function for computing `e^{i \pi x}`.
Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`,
:func:`~mpmath.sinpi`)::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expjpi(0)
(1.0 + 0.0j)
>>> expjpi(1)
(-1.0 + 0.0j)
>>> expjpi(0.5)
(0.0 + 1.0j)
>>> expjpi(-1)
(-1.0 + 0.0j)
>>> expjpi(j)
(0.04321391826377224977441774 + 0.0j)
>>> expjpi(1+j)
(-0.04321391826377224977441774 + 0.0j)
"""
floor = r"""
Computes the floor of `x`, `\lfloor x \rfloor`, defined as
the largest integer less than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> floor(3.5)
mpf('3.0')
.. note ::
:func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a
floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is
too large to be represented exactly at the present working precision,
the result will be rounded, not necessarily in the direction
implied by the mathematical definition of the function.
To avoid rounding, use *prec=0*::
>>> mp.dps = 15
>>> print(int(floor(10**30+1)))
1000000000000000019884624838656
>>> print(int(floor(10**30+1, prec=0)))
1000000000000000000000000000001
The floor function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> floor(3.25+4.75j)
mpc(real='3.0', imag='4.0')
"""
ceil = r"""
Computes the ceiling of `x`, `\lceil x \rceil`, defined as
the smallest integer greater than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> ceil(3.5)
mpf('4.0')
The ceiling function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> ceil(3.25+4.75j)
mpc(real='4.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
nint = r"""
Evaluates the nearest integer function, `\mathrm{nint}(x)`.
This gives the nearest integer to `x`; on a tie, it
gives the nearest even integer::
>>> from mpmath import *
>>> mp.pretty = False
>>> nint(3.2)
mpf('3.0')
>>> nint(3.8)
mpf('4.0')
>>> nint(3.5)
mpf('4.0')
>>> nint(4.5)
mpf('4.0')
The nearest integer function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> nint(3.25+4.75j)
mpc(real='3.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
frac = r"""
Gives the fractional part of `x`, defined as
`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`).
In effect, this computes `x` modulo 1, or `x+n` where
`n \in \mathbb{Z}` is such that `x+n \in [0,1)`::
>>> from mpmath import *
>>> mp.pretty = False
>>> frac(1.25)
mpf('0.25')
>>> frac(3)
mpf('0.0')
>>> frac(-1.25)
mpf('0.75')
For a complex number, the fractional part function applies to
the real and imaginary parts separately::
>>> frac(2.25+3.75j)
mpc(real='0.25', imag='0.75')
Plotted, the fractional part function gives a sawtooth
wave. The Fourier series coefficients have a simple
form::
>>> mp.dps = 15
>>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4))
([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775])
>>> nprint([-1/(pi*k) for k in range(1,5)])
[-0.31831, -0.159155, -0.106103, -0.0795775]
.. note::
The fractional part is sometimes defined as a symmetric
function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`.
This convention is used, for instance, by Mathematica's
``FractionalPart``.
"""
sign = r"""
Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|`
(with the special case `\mathrm{sign}(0) = 0`)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> sign(10)
mpf('1.0')
>>> sign(-10)
mpf('-1.0')
>>> sign(0)
mpf('0.0')
Note that the sign function is also defined for complex numbers,
for which it gives the projection onto the unit circle::
>>> mp.dps = 15; mp.pretty = True
>>> sign(1+j)
(0.707106781186547 + 0.707106781186547j)
"""
arg = r"""
Computes the complex argument (phase) of `x`, defined as the
signed angle between the positive real axis and `x` in the
complex plane::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> arg(3)
0.0
>>> arg(3+3j)
0.785398163397448
>>> arg(3j)
1.5707963267949
>>> arg(-3)
3.14159265358979
>>> arg(-3j)
-1.5707963267949
The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and
with the sign convention that a nonnegative imaginary part
results in a nonnegative argument.
The value returned by :func:`~mpmath.arg` is an ``mpf`` instance.
"""
fabs = r"""
Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`,
:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``)
into mpmath numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fabs(3)
mpf('3.0')
>>> fabs(-3)
mpf('3.0')
>>> fabs(3+4j)
mpf('5.0')
"""
re = r"""
Returns the real part of `x`, `\Re(x)`. :func:`~mpmath.re`
converts a non-mpmath number to an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> re(3)
mpf('3.0')
>>> re(-1+4j)
mpf('-1.0')
"""
im = r"""
Returns the imaginary part of `x`, `\Im(x)`. :func:`~mpmath.im`
converts a non-mpmath number to an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> im(3)
mpf('0.0')
>>> im(-1+4j)
mpf('4.0')
"""
conj = r"""
Returns the complex conjugate of `x`, `\overline{x}`. Unlike
``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> conj(3)
mpf('3.0')
>>> conj(-1+4j)
mpc(real='-1.0', imag='-4.0')
"""
polar = r"""
Returns the polar representation of the complex number `z`
as a pair `(r, \phi)` such that `z = r e^{i \phi}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polar(-2)
(2.0, 3.14159265358979)
>>> polar(3-4j)
(5.0, -0.927295218001612)
"""
rect = r"""
Returns the complex number represented by polar
coordinates `(r, \phi)`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> chop(rect(2, pi))
-2.0
>>> rect(sqrt(2), -pi/4)
(1.0 - 1.0j)
"""
expm1 = r"""
Computes `e^x - 1`, accurately for small `x`.
Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from
potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> exp(1e-10)-1; print(expm1(1e-10))
1.00000008274037e-10
1.00000000005e-10
>>> exp(1e-20)-1; print(expm1(1e-20))
0.0
1.0e-20
>>> 1/(exp(1e-20)-1)
Traceback (most recent call last):
...
ZeroDivisionError
>>> 1/expm1(1e-20)
1.0e+20
Evaluation works for extremely tiny values::
>>> expm1(0)
0.0
>>> expm1('1e-10000000')
1.0e-10000000
"""
log1p = r"""
Computes `\log(1+x)`, accurately for small `x`.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> log(1+1e-10); print(mp.log1p(1e-10))
1.00000008269037e-10
9.9999999995e-11
>>> mp.log1p(1e-100j)
(5.0e-201 + 1.0e-100j)
>>> mp.log1p(0)
0.0
"""
powm1 = r"""
Computes `x^y - 1`, accurately when `x^y` is very close to 1.
This avoids potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> power(0.99999995, 1e-10) - 1
0.0
>>> powm1(0.99999995, 1e-10)
-5.00000012791934e-18
Powers exactly equal to 1, and only those powers, yield 0 exactly::
>>> powm1(-j, 4)
(0.0 + 0.0j)
>>> powm1(3, 0)
0.0
>>> powm1(fadd(-1, 1e-100, exact=True), 4)
-4.0e-100
Evaluation works for extremely tiny `y`::
>>> powm1(2, '1e-100000')
6.93147180559945e-100001
>>> powm1(j, '1e-1000')
(-1.23370055013617e-2000 + 1.5707963267949e-1000j)
"""
root = r"""
``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number
`r` that (up to possible approximation error) satisfies `r^n = z`.
(``nthroot`` is available as an alias for ``root``.)
Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are
equidistant points on a circle with radius `|z|^{1/n}`, centered around the
origin. A specific root may be selected using the optional index
`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root
closest to the positive real half-axis.
The `k = 0` root is the so-called principal `n`-th root, often denoted by
`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is
a positive real number, the principal root is just the unique positive
`n`-th root of `z`. Under some circumstances, non-principal real roots exist:
for positive real `z`, `n` even, there is a negative root given by `k = n/2`;
for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`.
To obtain all roots with a simple expression, use
``[root(z,n,k) for k in range(n)]``.
An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of
unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots`
provides a slightly more convenient way to obtain the roots of unity,
including the option to compute only the primitive roots of unity.
Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be
reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or
the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed.
:func:`~mpmath.root` is implemented to use Newton's method for small
`n`. At high precision, this makes `x^{1/n}` not much more
expensive than the regular exponentiation, `x^n`. For very large
`n`, :func:`~mpmath.nthroot` falls back to use the exponential function.
**Examples**
:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a
floating-point fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 16807 ** (mpf(1)/5)
mpf('7.0000000000000009')
>>> root(16807, 5)
mpf('7.0')
>>> nthroot(16807, 5) # Alias
mpf('7.0')
A high-precision root::
>>> mp.dps = 50; mp.pretty = True
>>> nthroot(10, 5)
1.584893192461113485202101373391507013269442133825
>>> nthroot(10, 5) ** 5
10.0
Computing principal and non-principal square and cube roots::
>>> mp.dps = 15
>>> root(10, 2)
3.16227766016838
>>> root(10, 2, 1)
-3.16227766016838
>>> root(-10, 3)
(1.07721734501594 + 1.86579517236206j)
>>> root(-10, 3, 1)
-2.15443469003188
>>> root(-10, 3, 2)
(1.07721734501594 - 1.86579517236206j)
All the 7th roots of a complex number::
>>> for r in [root(3+4j, 7, k) for k in range(7)]:
... print("%s %s" % (r, r**7))
...
(1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j)
(0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j)
(-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j)
(-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j)
(-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j)
(-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j)
(0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j)
Cube roots of unity::
>>> for k in range(3): print(root(1, 3, k))
...
1.0
(-0.5 + 0.866025403784439j)
(-0.5 - 0.866025403784439j)
Some exact high order roots::
>>> root(75**210, 105)
5625.0
>>> root(1, 128, 96)
(0.0 - 1.0j)
>>> root(4**128, 128, 96)
(0.0 - 4.0j)
"""
unitroots = r"""
``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`,
all the distinct `n`-th roots of unity, as a list. If the option
*primitive=True* is passed, only the primitive roots are returned.
Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct
roots for each `n` (`\zeta_k` and `\zeta_j` are the same when
`k = j \pmod n`), which form a regular polygon with vertices on the unit
circle. They are ordered counterclockwise with increasing `k`, starting
with `\zeta_0 = 1`.
**Examples**
The roots of unity up to `n = 4`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(unitroots(1))
[1.0]
>>> nprint(unitroots(2))
[1.0, -1.0]
>>> nprint(unitroots(3))
[1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4))
[1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)]
Roots of unity form a geometric series that sums to 0::
>>> mp.dps = 50
>>> chop(fsum(unitroots(25)))
0.0
Primitive roots up to `n = 4`::
>>> mp.dps = 15
>>> nprint(unitroots(1, primitive=True))
[1.0]
>>> nprint(unitroots(2, primitive=True))
[-1.0]
>>> nprint(unitroots(3, primitive=True))
[(-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4, primitive=True))
[(0.0 + 1.0j), (0.0 - 1.0j)]
There are only four primitive 12th roots::
>>> nprint(unitroots(12, primitive=True))
[(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)]
The `n`-th roots of unity form a group, the cyclic group of order `n`.
Any primitive root `r` is a generator for this group, meaning that
`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in
some permuted order)::
>>> for r in unitroots(6): print(r)
...
1.0
(0.5 + 0.866025403784439j)
(-0.5 + 0.866025403784439j)
-1.0
(-0.5 - 0.866025403784439j)
(0.5 - 0.866025403784439j)
>>> r = unitroots(6, primitive=True)[1]
>>> for k in range(6): print(chop(r**k))
...
1.0
(0.5 - 0.866025403784439j)
(-0.5 - 0.866025403784439j)
-1.0
(-0.5 + 0.866025403784438j)
(0.5 + 0.866025403784438j)
The number of primitive roots equals the Euler totient function `\phi(n)`::
>>> [len(unitroots(n, primitive=True)) for n in range(1,20)]
[1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18]
"""
log = r"""
Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is
unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm
and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm
is defined in terms of the natural logarithm as
`\log_b(x) = \ln(x)/\ln(b)`.
By convention, we take `\log(0) = -\infty`.
The natural logarithm is real if `x > 0` and complex if `x < 0` or if
`x` is complex. The principal branch of the complex logarithm is
used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> log(1)
0.0
>>> log(2)
0.693147180559945
>>> log(1000,10)
3.0
>>> log(4, 16)
0.5
>>> log(j)
(0.0 + 1.5707963267949j)
>>> log(-1)
(0.0 + 3.14159265358979j)
>>> log(0)
-inf
>>> log(inf)
+inf
The natural logarithm is the antiderivative of `1/x`::
>>> quad(lambda x: 1/x, [1, 5])
1.6094379124341
>>> log(5)
1.6094379124341
>>> diff(log, 10)
0.1
The Taylor series expansion of the natural logarithm around
`x = 1` has coefficients `(-1)^{n+1}/n`::
>>> nprint(taylor(log, 1, 7))
[0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857]
:func:`~mpmath.log` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> log(pi)
1.1447298858494001741434273513530587116472948129153
>>> log(pi, pi**3)
0.33333333333333333333333333333333333333333333333333
>>> mp.dps = 25
>>> log(3+4j)
(1.609437912434100374600759 + 0.9272952180016122324285125j)
"""
log10 = r"""
Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)``
is equivalent to ``log(x, 10)``.
"""
fmod = r"""
Converts `x` and `y` to mpmath numbers and returns `x \mod y`.
For mpmath numbers, this is equivalent to ``x % y``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> fmod(100, pi)
2.61062773871641
You can use :func:`~mpmath.fmod` to compute fractional parts of numbers::
>>> fmod(10.25, 1)
0.25
"""
radians = r"""
Converts the degree angle `x` to radians::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> radians(60)
1.0471975511966
"""
degrees = r"""
Converts the radian angle `x` to a degree angle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> degrees(pi/3)
60.0
"""
atan2 = r"""
Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`,
giving the signed angle between the positive `x`-axis and the
point `(x, y)` in the 2D plane. This function is defined for
real `x` and `y` only.
The two-argument arctangent essentially computes
`\mathrm{atan}(y/x)`, but accounts for the signs of both
`x` and `y` to give the angle for the correct quadrant. The
following examples illustrate the difference::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> atan2(1,1), atan(1/1.)
(0.785398163397448, 0.785398163397448)
>>> atan2(1,-1), atan(1/-1.)
(2.35619449019234, -0.785398163397448)
>>> atan2(-1,1), atan(-1/1.)
(-0.785398163397448, -0.785398163397448)
>>> atan2(-1,-1), atan(-1/-1.)
(-2.35619449019234, 0.785398163397448)
The angle convention is the same as that used for the complex
argument; see :func:`~mpmath.arg`.
"""
fibonacci = r"""
``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The
Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)`
with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci`
extends this definition to arbitrary real and complex arguments
using the formula
.. math ::
F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this
continuous formula to compute `F(n)` for extremely large `n`, where
calculating the exact integer would be wasteful.
For convenience, :func:`~mpmath.fib` is available as an alias for
:func:`~mpmath.fibonacci`.
**Basic examples**
Some small Fibonacci numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for i in range(10):
... print(fibonacci(i))
...
0.0
1.0
1.0
2.0
3.0
5.0
8.0
13.0
21.0
34.0
>>> fibonacci(50)
12586269025.0
The recurrence for `F(n)` extends backwards to negative `n`::
>>> for i in range(10):
... print(fibonacci(-i))
...
0.0
1.0
-1.0
2.0
-3.0
5.0
-8.0
13.0
-21.0
34.0
Large Fibonacci numbers will be computed approximately unless
the precision is set high enough::
>>> fib(200)
2.8057117299251e+41
>>> mp.dps = 45
>>> fib(200)
280571172992510140037611932413038677189525.0
:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers
of stupendous size::
>>> mp.dps = 15
>>> fibonacci(10**25)
3.49052338550226e+2089876402499787337692720
**Real and complex arguments**
The extended Fibonacci function is an analytic function. The
property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`::
>>> mp.dps = 15
>>> fib(pi)
2.1170270579161
>>> fib(pi-1) + fib(pi-2)
2.1170270579161
>>> fib(3+4j)
(-5248.51130728372 - 14195.962288353j)
>>> fib(2+4j) + fib(1+4j)
(-5248.51130728372 - 14195.962288353j)
The Fibonacci function has infinitely many roots on the
negative half-real axis. The first root is at 0, the second is
close to -0.18, and then there are infinitely many roots that
asymptotically approach `-n+1/2`::
>>> findroot(fib, -0.2)
-0.183802359692956
>>> findroot(fib, -2)
-1.57077646820395
>>> findroot(fib, -17)
-16.4999999596115
>>> findroot(fib, -24)
-23.5000000000479
**Mathematical relationships**
For large `n`, `F(n+1)/F(n)` approaches the golden ratio::
>>> mp.dps = 50
>>> fibonacci(101)/fibonacci(100)
1.6180339887498948482045868343656381177203127439638
>>> +phi
1.6180339887498948482045868343656381177203091798058
The sum of reciprocal Fibonacci numbers converges to an irrational
number for which no closed form expression is known::
>>> mp.dps = 15
>>> nsum(lambda n: 1/fib(n), [1, inf])
3.35988566624318
Amazingly, however, the sum of odd-index reciprocal Fibonacci
numbers can be expressed in terms of a Jacobi theta function::
>>> nsum(lambda n: 1/fib(2*n+1), [0, inf])
1.82451515740692
>>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4
1.82451515740692
Some related sums can be done in closed form::
>>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf])
1.11803398874989
>>> phi - 0.5
1.11803398874989
>>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1)))
>>> nsum(f, [1, inf])
0.618033988749895
>>> phi-1
0.618033988749895
**References**
1. http://mathworld.wolfram.com/FibonacciNumber.html
"""
altzeta = r"""
Gives the Dirichlet eta function, `\eta(s)`, also known as the
alternating zeta function. This function is defined in analogy
with the Riemann zeta function as providing the sum of the
alternating series
.. math ::
\eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s}
= 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots
The eta function, unlike the Riemann zeta function, is an entire
function, having a finite value for all complex `s`. The special case
`\eta(1) = \log(2)` gives the value of the alternating harmonic series.
The alternating zeta function may expressed using the Riemann zeta function
as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed
in terms of the Hurwitz zeta function, for example using
:func:`~mpmath.dirichlet` (see documentation for that function).
**Examples**
Some special values are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> altzeta(1)
0.693147180559945
>>> altzeta(0)
0.5
>>> altzeta(-1)
0.25
>>> altzeta(-2)
0.0
An example of a sum that can be computed more accurately and
efficiently via :func:`~mpmath.altzeta` than via numerical summation::
>>> sum(-(-1)**n / mpf(n)**2.5 for n in range(1, 100))
0.867204951503984
>>> altzeta(2.5)
0.867199889012184
At positive even integers, the Dirichlet eta function
evaluates to a rational multiple of a power of `\pi`::
>>> altzeta(2)
0.822467033424113
>>> pi**2/12
0.822467033424113
Like the Riemann zeta function, `\eta(s)`, approaches 1
as `s` approaches positive infinity, although it does
so from below rather than from above::
>>> altzeta(30)
0.999999999068682
>>> altzeta(inf)
1.0
>>> mp.pretty = False
>>> altzeta(1000, rounding='d')
mpf('0.99999999999999989')
>>> altzeta(1000, rounding='u')
mpf('1.0')
**References**
1. http://mathworld.wolfram.com/DirichletEtaFunction.html
2. http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
factorial = r"""
Computes the factorial, `x!`. For integers `n \ge 0`, we have
`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial
is defined for real or complex `x` by `x! = \Gamma(x+1)`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(6):
... print("%s %s" % (k, fac(k)))
...
0 1.0
1 1.0
2 2.0
3 6.0
4 24.0
5 120.0
>>> fac(inf)
+inf
>>> fac(0.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
For large positive `x`, `x!` can be approximated by
Stirling's formula::
>>> x = 10**10
>>> fac(x)
2.32579620567308e+95657055186
>>> sqrt(2*pi*x)*(x/e)**x
2.32579597597705e+95657055186
:func:`~mpmath.fac` supports evaluation for astronomically large values::
>>> fac(10**30)
6.22311232304258e+29565705518096748172348871081098
Reciprocal factorials appear in the Taylor series of the
exponential function (among many other contexts)::
>>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1)
(2.71828182845905, 2.71828182845905)
>>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi)
(23.1406926327793, 23.1406926327793)
"""
gamma = r"""
Computes the gamma function, `\Gamma(x)`. The gamma function is a
shifted version of the ordinary factorial, satisfying
`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it
is defined by
.. math ::
\Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt
for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0`
by analytic continuation.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(1, 6):
... print("%s %s" % (k, gamma(k)))
...
1 1.0
2 1.0
3 2.0
4 6.0
5 24.0
>>> gamma(inf)
+inf
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: gamma function pole
The gamma function of a half-integer is a rational multiple of
`\sqrt{\pi}`::
>>> gamma(0.5), sqrt(pi)
(1.77245385090552, 1.77245385090552)
>>> gamma(1.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
We can check the integral definition::
>>> gamma(3.5)
3.32335097044784
>>> quad(lambda t: t**2.5*exp(-t), [0,inf])
3.32335097044784
:func:`~mpmath.gamma` supports arbitrary-precision evaluation and
complex arguments::
>>> mp.dps = 50
>>> gamma(sqrt(3))
0.91510229697308632046045539308226554038315280564184
>>> mp.dps = 25
>>> gamma(2j)
(0.009902440080927490985955066 - 0.07595200133501806872408048j)
Arguments can also be large. Note that the gamma function grows
very quickly::
>>> mp.dps = 15
>>> gamma(10**20)
1.9328495143101e+1956570551809674817225
"""
psi = r"""
Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`.
Special cases are known as the *digamma function* (`\psi^{(0)}(z)`),
the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma
functions are defined as the logarithmic derivatives of the gamma
function:
.. math ::
\psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z)
In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the
present implementation of :func:`~mpmath.psi`, the order `m` must be a
nonnegative integer, while the argument `z` may be an arbitrary
complex number (with exception for the polygamma function's poles
at `z = 0, -1, -2, \ldots`).
**Examples**
For various rational arguments, the polygamma function reduces to
a combination of standard mathematical constants::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> psi(0, 1), -euler
(-0.5772156649015328606065121, -0.5772156649015328606065121)
>>> psi(1, '1/4'), pi**2+8*catalan
(17.19732915450711073927132, 17.19732915450711073927132)
>>> psi(2, '1/2'), -14*apery
(-16.82879664423431999559633, -16.82879664423431999559633)
The polygamma functions are derivatives of each other::
>>> diff(lambda x: psi(3, x), pi), psi(4, pi)
(-0.1105749312578862734526952, -0.1105749312578862734526952)
>>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2)
(-0.375, -0.375)
The digamma function diverges logarithmically as `z \to \infty`,
while higher orders tend to zero::
>>> psi(0,inf), psi(1,inf), psi(2,inf)
(+inf, 0.0, 0.0)
Evaluation for a complex argument::
>>> psi(2, -1-2j)
(0.03902435405364952654838445 + 0.1574325240413029954685366j)
Evaluation is supported for large orders `m` and/or large
arguments `z`::
>>> psi(3, 10**100)
2.0e-300
>>> psi(250, 10**30+10**20*j)
(-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j)
**Application to infinite series**
Any infinite series where the summand is a rational function of
the index `k` can be evaluated in closed form in terms of polygamma
functions of the roots and poles of the summand::
>>> a = sqrt(2)
>>> b = sqrt(3)
>>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf])
0.4049668927517857061917531
>>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2
0.4049668927517857061917531
This follows from the series representation (`m > 0`)
.. math ::
\psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty}
\frac{1}{(z+k)^{m+1}}.
Since the roots of a polynomial may be complex, it is sometimes
necessary to use the complex polygamma function to evaluate
an entirely real-valued sum::
>>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf])
1.694361433907061256154665
>>> nprint(polyroots([1,-2,3]))
[(1.0 - 1.41421j), (1.0 + 1.41421j)]
>>> r1 = 1-sqrt(2)*j
>>> r2 = r1.conjugate()
>>> (psi(0,-r2)-psi(0,-r1))/(r1-r2)
(1.694361433907061256154665 + 0.0j)
"""
digamma = r"""
Shortcut for ``psi(0,z)``.
"""
harmonic = r"""
If `n` is an integer, ``harmonic(n)`` gives a floating-point
approximation of the `n`-th harmonic number `H(n)`, defined as
.. math ::
H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}
The first few harmonic numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(8):
... print("%s %s" % (n, harmonic(n)))
...
0 0.0
1 1.0
2 1.5
3 1.83333333333333
4 2.08333333333333
5 2.28333333333333
6 2.45
7 2.59285714285714
The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges::
>>> harmonic(inf)
+inf
:func:`~mpmath.harmonic` is evaluated using the digamma function rather
than by summing the harmonic series term by term. It can therefore
be computed quickly for arbitrarily large `n`, and even for
nonintegral arguments::
>>> harmonic(10**100)
230.835724964306
>>> harmonic(0.5)
0.613705638880109
>>> harmonic(3+4j)
(2.24757548223494 + 0.850502209186044j)
:func:`~mpmath.harmonic` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> harmonic(11)
3.0198773448773448773448773448773448773448773448773
>>> harmonic(pi)
1.8727388590273302654363491032336134987519132374152
The harmonic series diverges, but at a glacial pace. It is possible
to calculate the exact number of terms required before the sum
exceeds a given amount, say 100::
>>> mp.dps = 50
>>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10)
>>> v
15092688622113788323693563264538101449859496.864101
>>> v = int(ceil(v))
>>> print(v)
15092688622113788323693563264538101449859497
>>> harmonic(v-1)
99.999999999999999999999999999999999999999999942747
>>> harmonic(v)
100.000000000000000000000000000000000000000000009
"""
bernoulli = r"""
Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`.
The Bernoulli numbers are rational numbers, but this function
returns a floating-point approximation. To obtain an exact
fraction, use :func:`~mpmath.bernfrac` instead.
**Examples**
Numerical values of the first few Bernoulli numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(15):
... print("%s %s" % (n, bernoulli(n)))
...
0 1.0
1 -0.5
2 0.166666666666667
3 0.0
4 -0.0333333333333333
5 0.0
6 0.0238095238095238
7 0.0
8 -0.0333333333333333
9 0.0
10 0.0757575757575758
11 0.0
12 -0.253113553113553
13 0.0
14 1.16666666666667
Bernoulli numbers can be approximated with arbitrary precision::
>>> mp.dps = 50
>>> bernoulli(100)
-2.8382249570693706959264156336481764738284680928013e+78
Arbitrarily large `n` are supported::
>>> mp.dps = 15
>>> bernoulli(10**20 + 2)
3.09136296657021e+1876752564973863312327
The Bernoulli numbers are related to the Riemann zeta function
at integer arguments::
>>> -bernoulli(8) * (2*pi)**8 / (2*fac(8))
1.00407735619794
>>> zeta(8)
1.00407735619794
**Algorithm**
For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence
formula due to Ramanujan. All results in this range are cached,
so sequential computation of small Bernoulli numbers is
guaranteed to be fast.
For larger `n`, `B_n` is evaluated in terms of the Riemann zeta
function.
"""
stieltjes = r"""
For a nonnegative integer `n`, ``stieltjes(n)`` computes the
`n`-th Stieltjes constant `\gamma_n`, defined as the
`n`-th coefficient in the Laurent series expansion of the
Riemann zeta function around the pole at `s = 1`. That is,
we have:
.. math ::
\zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty}
\frac{(-1)^n}{n!} \gamma_n (s-1)^n
More generally, ``stieltjes(n, a)`` gives the corresponding
coefficient `\gamma_n(a)` for the Hurwitz zeta function
`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`).
**Examples**
The zeroth Stieltjes constant is just Euler's constant `\gamma`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> stieltjes(0)
0.577215664901533
Some more values are::
>>> stieltjes(1)
-0.0728158454836767
>>> stieltjes(10)
0.000205332814909065
>>> stieltjes(30)
0.00355772885557316
>>> stieltjes(1000)
-1.57095384420474e+486
>>> stieltjes(2000)
2.680424678918e+1109
>>> stieltjes(1, 2.5)
-0.23747539175716
An alternative way to compute `\gamma_1`::
>>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1)
-0.0728158454836767
:func:`~mpmath.stieltjes` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> stieltjes(2)
-0.0096903631928723184845303860352125293590658061013408
**Algorithm**
:func:`~mpmath.stieltjes` numerically evaluates the integral in
the following representation due to Ainsworth, Howell and
Coffey [1], [2]:
.. math ::
\gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} +
\frac{2}{a} \Re \int_0^{\infty}
\frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx.
For some reference values with `a = 1`, see e.g. [4].
**References**
1. O. R. Ainsworth & L. W. Howell, "An integral representation of
the generalized Euler-Mascheroni constants", NASA Technical
Paper 2456 (1985),
http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf
2. M. W. Coffey, "The Stieltjes constants, their relation to the
`\eta_j` coefficients, and representation of the Hurwitz
zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343
3. http://mathworld.wolfram.com/StieltjesConstants.html
4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt
"""
gammaprod = r"""
Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the
product / quotient of gamma functions:
.. math ::
\frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)}
{\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)}
Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers
the entire product as a limit and evaluates this limit properly if
any of the numerator or denominator arguments are nonpositive
integers such that poles of the gamma function are encountered.
That is, :func:`~mpmath.gammaprod` evaluates
.. math ::
\lim_{\epsilon \to 0}
\frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots
\Gamma(a_p+\epsilon)}
{\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots
\Gamma(b_q+\epsilon)}
In particular:
* If there are equally many poles in the numerator and the
denominator, the limit is a rational number times the remaining,
regular part of the product.
* If there are more poles in the numerator, :func:`~mpmath.gammaprod`
returns ``+inf``.
* If there are more poles in the denominator, :func:`~mpmath.gammaprod`
returns 0.
**Examples**
The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`::
>>> from mpmath import *
>>> mp.dps = 15
>>> gammaprod([], [0])
0.0
A limit::
>>> gammaprod([-4], [-3])
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1)
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1)
-0.25
"""
beta = r"""
Computes the beta function,
`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`.
The beta function is also commonly defined by the integral
representation
.. math ::
B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt
**Examples**
For integer and half-integer arguments where all three gamma
functions are finite, the beta function becomes either rational
number or a rational multiple of `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> beta(5, 2)
0.0333333333333333
>>> beta(1.5, 2)
0.266666666666667
>>> 16*beta(2.5, 1.5)
3.14159265358979
Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole
of the beta function is taken to result in ``+inf``::
>>> beta(-0.5, 0.5)
0.0
>>> beta(-3, 3)
-0.333333333333333
>>> beta(-2, 3)
+inf
>>> beta(inf, 1)
0.0
>>> beta(inf, 0)
nan
:func:`~mpmath.beta` supports complex numbers and arbitrary precision
evaluation::
>>> beta(1, 2+j)
(0.4 - 0.2j)
>>> mp.dps = 25
>>> beta(j,0.5)
(1.079424249270925780135675 - 1.410032405664160838288752j)
>>> mp.dps = 50
>>> beta(pi, e)
0.037890298781212201348153837138927165984170287886464
Various integrals can be computed by means of the
beta function::
>>> mp.dps = 15
>>> quad(lambda t: t**2.5*(1-t)**2, [0, 1])
0.0230880230880231
>>> beta(3.5, 3)
0.0230880230880231
>>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2])
0.319504062596158
>>> beta(2.5, 0.75)/2
0.319504062596158
"""
betainc = r"""
``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized
incomplete beta function,
.. math ::
I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt.
When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete)
beta function `B(a,b)`; see :func:`~mpmath.beta`.
With the keyword argument ``regularized=True``, :func:`~mpmath.betainc`
computes the regularized incomplete beta function
`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the
beta distribution with parameters `a`, `b`.
.. note :
Implementations of the incomplete beta function in some other
software uses a different argument order. For example, Mathematica uses the
reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's
three-argument incomplete beta integral (implicitly with `x1 = 0`), use
``betainc(a,b,0,x2,regularized=True)``.
**Examples**
Verifying that :func:`~mpmath.betainc` computes the integral in the
definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> x,y,a,b = 3, 4, 0, 6
>>> betainc(x, y, a, b)
-4010.4
>>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b])
-4010.4
The arguments may be arbitrary complex numbers::
>>> betainc(0.75, 1-4j, 0, 2+3j)
(0.2241657956955709603655887 + 0.3619619242700451992411724j)
With regularization::
>>> betainc(1, 2, 0, 0.25, regularized=True)
0.4375
>>> betainc(pi, e, 0, 1, regularized=True) # Complete
1.0
The beta integral satisfies some simple argument transformation
symmetries::
>>> mp.dps = 15
>>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4)
(56.0833333333333, 56.0833333333333, 56.0833333333333)
The beta integral can often be evaluated analytically. For integer and
rational arguments, the incomplete beta function typically reduces to a
simple algebraic-logarithmic expression::
>>> mp.dps = 25
>>> identify(chop(betainc(0, 0, 3, 4)))
'-(log((9/8)))'
>>> identify(betainc(2, 3, 4, 5))
'(673/12)'
>>> identify(betainc(1.5, 1, 1, 2))
'((-12+sqrt(1152))/18)'
"""
binomial = r"""
Computes the binomial coefficient
.. math ::
{n \choose k} = \frac{n!}{k!(n-k)!}.
The binomial coefficient gives the number of ways that `k` items
can be chosen from a set of `n` items. More generally, the binomial
coefficient is a well-defined function of arbitrary real or
complex `n` and `k`, via the gamma function.
**Examples**
Generate Pascal's triangle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint([binomial(n,k) for k in range(n+1)])
...
[1.0]
[1.0, 1.0]
[1.0, 2.0, 1.0]
[1.0, 3.0, 3.0, 1.0]
[1.0, 4.0, 6.0, 4.0, 1.0]
There is 1 way to select 0 items from the empty set, and 0 ways to
select 1 item from the empty set::
>>> binomial(0, 0)
1.0
>>> binomial(0, 1)
0.0
:func:`~mpmath.binomial` supports large arguments::
>>> binomial(10**20, 10**20-5)
8.33333333333333e+97
>>> binomial(10**20, 10**10)
2.60784095465201e+104342944813
Nonintegral binomial coefficients find use in series
expansions::
>>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4))
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
>>> nprint([binomial(0.25, k) for k in range(5)])
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
An integral representation::
>>> n, k = 5, 3
>>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n
>>> chop(quad(f, [-pi,pi])/(2*pi))
10.0
>>> binomial(n,k)
10.0
"""
rf = r"""
Computes the rising factorial or Pochhammer symbol,
.. math ::
x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the rising factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: rf(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 2.0, 3.0, 1.0]
[0.0, 6.0, 11.0, 6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> rf(2+3j, 5.5)
(-7202.03920483347 - 3777.58810701527j)
"""
ff = r"""
Computes the falling factorial,
.. math ::
(x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the falling factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: ff(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, -1.0, 1.0]
[0.0, 2.0, -3.0, 1.0]
[0.0, -6.0, 11.0, -6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> ff(2+3j, 5.5)
(-720.41085888203 + 316.101124983878j)
"""
fac2 = r"""
Computes the double factorial `x!!`, defined for integers
`x > 0` by
.. math ::
x!! = \begin{cases}
1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\
2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even}
\end{cases}
and more generally by [1]
.. math ::
x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4}
\Gamma\left(\frac{x}{2}+1\right).
**Examples**
The integer sequence of double factorials begins::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([fac2(n) for n in range(10)])
[1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0]
For large `x`, double factorials follow a Stirling-like asymptotic
approximation::
>>> x = mpf(10000)
>>> fac2(x)
5.97272691416282e+17830
>>> sqrt(pi)*x**((x+1)/2)*exp(-x/2)
5.97262736954392e+17830
The recurrence formula `x!! = x (x-2)!!` can be reversed to
define the double factorial of negative odd integers (but
not negative even integers)::
>>> fac2(-1), fac2(-3), fac2(-5), fac2(-7)
(1.0, -1.0, 0.333333333333333, -0.0666666666666667)
>>> fac2(-2)
Traceback (most recent call last):
...
ValueError: gamma function pole
With the exception of the poles at negative even integers,
:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments.
The recurrence formula is valid generally::
>>> fac2(pi+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
>>> (pi+2j)*fac2(pi-2+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
Double factorials should not be confused with nested factorials,
which are immensely larger::
>>> fac(fac(20))
5.13805976125208e+43675043585825292774
>>> fac2(20)
3715891200.0
Double factorials appear, among other things, in series expansions
of Gaussian functions and the error function. Infinite series
include::
>>> nsum(lambda k: 1/fac2(k), [0, inf])
3.05940740534258
>>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2))
3.05940740534258
>>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf])
4.06015693855741
>>> e * erf(1) * sqrt(pi)
4.06015693855741
A beautiful Ramanujan sum::
>>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf])
0.90917279454693
>>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2
0.90917279454693
**References**
1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/
2. http://mathworld.wolfram.com/DoubleFactorial.html
"""
hyper = r"""
Evaluates the generalized hypergeometric function
.. math ::
\,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) =
\sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n}
{(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!}
where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`).
The parameters lists ``a_s`` and ``b_s`` may contain integers,
real numbers, complex numbers, as well as exact fractions given in
the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle
integers and fractions more efficiently than arbitrary
floating-point parameters (since rational parameters are by
far the most common).
**Examples**
Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by
comparison with :func:`~mpmath.nsum`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a,b,c,d = 2,3,4,5
>>> x = 0.25
>>> hyper([a,b],[c,d],x)
1.078903941164934876086237
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
1.078903941164934876086237
The parameters can be any combination of integers, fractions,
floats and complex numbers::
>>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3)
>>> x = 0.2j
>>> hyper([a,b],[c,d,e],x)
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
>>> b, e = -0.5, mpf(2)/3
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
The `\,_0F_0` and `\,_1F_0` series are just elementary functions::
>>> a, z = sqrt(2), +pi
>>> hyper([],[],z)
23.14069263277926900572909
>>> exp(z)
23.14069263277926900572909
>>> hyper([a],[],z)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
>>> (1-z)**(-a)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
If any `a_k` coefficient is a nonpositive integer, the series terminates
into a finite polynomial::
>>> hyper([1,1,1,-3],[2,5],1)
0.7904761904761904761904762
>>> identify(_)
'(83/105)'
If any `b_k` is a nonpositive integer, the function is undefined (unless the
series terminates before the division by zero occurs)::
>>> hyper([1,1,1,-3],[-2,5],1)
Traceback (most recent call last):
...
ZeroDivisionError: pole in hypergeometric series
>>> hyper([1,1,1,-1],[-2,5],1)
1.1
Except for polynomial cases, the radius of convergence `R` of the hypergeometric
series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or
`R = 0` (if `p > q+1`).
The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`,
`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions
can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2`
are available to handle the most common cases (see their documentation),
but functions of higher degree are also supported via :func:`~mpmath.hyper`::
>>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point
1.141783505526870731311423
>>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole
+inf
>>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4
(1.543998916527972259717257 - 0.5876309929580408028816365j)
>>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5
(0.9996565821853579063502466 + 0.0129721075905630604445669j)
Near `z = 1` with noninteger parameters::
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1)
2.219433352235586121250027
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1)
+inf
>>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))()
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1)
2923978034.412973409330956
Please note that, as currently implemented, evaluation of `\,_pF_{p-1}`
with `p \ge 3` may be slow or inaccurate when `|z-1|` is small,
for some parameter values.
Evaluation may be aborted if convergence appears to be too slow.
The optional ``maxterms`` (limiting the number of series terms) and ``maxprec``
(limiting the internal precision) keyword arguments can be used
to control evaluation::
>>> hyper([1,2,3], [4,5,6], 10000)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
>>> hyper([1,2,3], [4,5,6], 10000, maxterms=10**6)
7.622806053177969474396918e+4310
Additional options include ``force_series`` (which forces direct use of
a hypergeometric series even if another evaluation method might work better)
and ``asymp_tol`` which controls the target tolerance for using
asymptotic series.
When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent
series. For `\,_2F_0` the Borel sum has an analytic solution and can be
computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions
is evaluated first by attempting to sum it directly as an asymptotic
series (this only works for tiny `|z|`), and then by evaluating the Borel
regularized sum using numerical integration. Except for
special parameter combinations, this can be extremely slow.
>>> hyper([1,1], [], 0.5) # regularization of 2F0
(1.340965419580146562086448 + 0.8503366631752726568782447j)
>>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1
(1.108287213689475145830699 + 0.5327107430640678181200491j)
With the following magnitude of argument, the asymptotic series for `\,_3F_1`
gives only a few digits. Using Borel summation, ``hyper`` can produce
a value with full accuracy::
>>> mp.dps = 15
>>> hyper([2,0.5,4], [5.25], '0.08', force_series=True)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
>>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4)
1.0725535790737
>>> hyper([2,0.5,4], [5.25], '0.08')
(1.07269542893559 + 5.54668863216891e-5j)
>>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4)
0.946344925484879
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.946312503737771
>>> mp.dps = 25
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.9463125037377662296700858
Note that with the positive `z` value, there is a complex part in the
correct result, which falls below the tolerance of the asymptotic series.
By default, a parameter that appears in both ``a_s`` and ``b_s`` will be removed
unless it is a nonpositive integer. This generally speeds up evaluation
by producing a hypergeometric function of lower order.
This optimization can be disabled by passing ``eliminate=False``.
>>> hyper([1,2,3], [4,5,3], 10000)
1.268943190440206905892212e+4321
>>> hyper([1,2,3], [4,5,3], 10000, eliminate=False)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
>>> hyper([1,2,3], [4,5,3], 10000, eliminate=False, maxterms=10**6)
1.268943190440206905892212e+4321
If a nonpositive integer `-n` appears in both ``a_s`` and ``b_s``, this parameter
cannot be unambiguously removed since it creates a term 0 / 0.
In this case the hypergeometric series is understood to terminate before
the division by zero occurs. This convention is consistent with Mathematica.
An alternative convention of eliminating the parameters can be toggled
with ``eliminate_all=True``:
>>> hyper([2,-1], [-1], 3)
7.0
>>> hyper([2,-1], [-1], 3, eliminate_all=True)
0.25
>>> hyper([2], [], 3)
0.25
"""
hypercomb = r"""
Computes a weighted combination of hypergeometric functions
.. math ::
\sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}}
\frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r}
\Gamma(\beta_{r,k})}
\,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1},
\ldots, b_{r,q}; z_r)\right].
Typically the parameters are linear combinations of a small set of base
parameters; :func:`~mpmath.hypercomb` permits computing a correct value in
the case that some of the `\alpha`, `\beta`, `b` turn out to be
nonpositive integers, or if division by zero occurs for some `w^c`,
assuming that there are opposing singularities that cancel out.
The limit is computed by evaluating the function with the base
parameters perturbed, at a higher working precision.
The first argument should be a function that takes the perturbable
base parameters ``params`` as input and returns `N` tuples
``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``,
gamma factors ``alpha``, ``beta``, and hypergeometric coefficients
``a``, ``b`` each should be lists of numbers, and ``z`` should be a single
number.
**Examples**
The following evaluates
.. math ::
(a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1)
with `a=1, z=3`. There is a zero factor, two gamma function poles, and
the 1F1 function is singular; all singularities cancel out to give a finite
value::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1])
-180.769832308689
>>> -9*exp(3)
-180.769832308689
"""
hyp0f1 = r"""
Gives the hypergeometric function `\,_0F_1`, sometimes known as the
confluent limit function, defined as
.. math ::
\,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}.
This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`,
and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`).
``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for
:func:`~mpmath.hyper` for more information.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp0f1(2, 0.25)
1.130318207984970054415392
>>> hyp0f1((1,2), 1234567)
6.27287187546220705604627e+964
>>> hyp0f1(3+4j, 1000000j)
(3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j)
Evaluation is supported for arbitrarily large values of `z`,
using asymptotic expansions::
>>> hyp0f1(1, 10**50)
2.131705322874965310390701e+8685889638065036553022565
>>> hyp0f1(1, -10**50)
1.115945364792025420300208e-13
Verifying the differential equation::
>>> a = 2.5
>>> f = lambda z: hyp0f1(a,z)
>>> for z in [0, 10, 3+4j]:
... chop(z*diff(f,z,2) + a*diff(f,z) - f(z))
...
0.0
0.0
0.0
"""
hyp1f1 = r"""
Gives the confluent hypergeometric function of the first kind,
.. math ::
\,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!},
also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This
function gives one solution to the confluent (Kummer's) differential equation
.. math ::
z f''(z) + (b-z) f'(z) - af(z) = 0.
A second solution is given by the `U` function; see :func:`~mpmath.hyperu`.
Solutions are also given in an alternate form by the Whittaker
functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`).
``hyp1f1(a,b,z)`` is equivalent
to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more
information.
**Examples**
Evaluation for real and complex values of the argument `z`, with
fixed parameters `a = 2, b = -1/3`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp1f1(2, (-1,3), 3.25)
-2815.956856924817275640248
>>> hyp1f1(2, (-1,3), -3.25)
-1.145036502407444445553107
>>> hyp1f1(2, (-1,3), 1000)
-8.021799872770764149793693e+441
>>> hyp1f1(2, (-1,3), -1000)
0.000003131987633006813594535331
>>> hyp1f1(2, (-1,3), 100+100j)
(-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j)
Parameters may be complex::
>>> hyp1f1(2+3j, -1+j, 10j)
(261.8977905181045142673351 + 160.8930312845682213562172j)
Arbitrarily large values of `z` are supported::
>>> hyp1f1(3, 4, 10**20)
3.890569218254486878220752e+43429448190325182745
>>> hyp1f1(3, 4, -10**20)
6.0e-60
>>> hyp1f1(3, 4, 10**20*j)
(-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j)
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyp1f1(a,b,z)
>>> for z in [0, -10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
0.0
An integral representation::
>>> a, b = 1.5, 3
>>> z = 1.5
>>> hyp1f1(a,b,z)
2.269381460919952778587441
>>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1)
>>> gammaprod([b],[a,b-a])*quad(g, [0,1])
2.269381460919952778587441
"""
hyp1f2 = r"""
Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to
``hyper([a1],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c = 1.5, (-1,3), 2.25
>>> hyp1f2(a, b, c, 10**20)
-1.159388148811981535941434e+8685889639
>>> hyp1f2(a, b, c, -10**20)
-12.60262607892655945795907
>>> hyp1f2(a, b, c, 10**20*j)
(4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j)
>>> hyp1f2(2+3j, -2j, 0.5j, 10-20j)
(135881.9905586966432662004 - 86681.95885418079535738828j)
"""
hyp2f2 = r"""
Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to
``hyper([a1,a2],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c, d = 1.5, (-1,3), 2.25, 4
>>> hyp2f2(a, b, c, d, 10**20)
-5.275758229007902299823821e+43429448190325182663
>>> hyp2f2(a, b, c, d, -10**20)
2561445.079983207701073448
>>> hyp2f2(a, b, c, d, 10**20*j)
(2218276.509664121194836667 - 1280722.539991603850462856j)
>>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j)
(80500.68321405666957342788 - 20346.82752982813540993502j)
"""
hyp2f3 = r"""
Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`.
The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to
``hyper([a1,a2],[b1,b2,b3],z)``.
Evaluation works for arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5)
>>> hyp2f3(a1,a2,b1,b2,b3,10**20)
-4.169178177065714963568963e+8685889590
>>> hyp2f3(a1,a2,b1,b2,b3,-10**20)
7064472.587757755088178629
>>> hyp2f3(a1,a2,b1,b2,b3,10**20*j)
(-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j)
(-2280.938956687033150740228 + 13620.97336609573659199632j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j)
(4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j)
"""
hyp2f1 = r"""
Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as
*the* hypergeometric function), defined for `|z| < 1` as
.. math ::
\,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty}
\frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)`
when necessary.
Special cases of this function include many of the orthogonal polynomials as
well as the incomplete beta function and other functions. Properties of the
Gauss hypergeometric function are documented comprehensively in many references,
for example Abramowitz & Stegun, section 15.
The implementation supports the analytic continuation as well as evaluation
close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)``
is equivalent to ``hyper([a,b],[c],z)``.
**Examples**
Evaluation with `z` inside, outside and on the unit circle, for
fixed parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f1(2, (1,2), 4, 0.75)
1.303703703703703703703704
>>> hyp2f1(2, (1,2), 4, -1.75)
0.7431290566046919177853916
>>> hyp2f1(2, (1,2), 4, 1.75)
(1.418075801749271137026239 - 1.114976146679907015775102j)
>>> hyp2f1(2, (1,2), 4, 1)
1.6
>>> hyp2f1(2, (1,2), 4, -1)
0.8235498012182875315037882
>>> hyp2f1(2, (1,2), 4, j)
(0.9144026291433065674259078 + 0.2050415770437884900574923j)
>>> hyp2f1(2, (1,2), 4, 2+j)
(0.9274013540258103029011549 + 0.7455257875808100868984496j)
>>> hyp2f1(2, (1,2), 4, 0.25j)
(0.9931169055799728251931672 + 0.06154836525312066938147793j)
Evaluation with complex parameter values::
>>> hyp2f1(1+j, 0.75, 10j, 1+5j)
(0.8834833319713479923389638 + 0.7053886880648105068343509j)
Evaluation with `z = 1`::
>>> hyp2f1(-2.5, 3.5, 1.5, 1)
0.0
>>> hyp2f1(-2.5, 3, 4, 1)
0.06926406926406926406926407
>>> hyp2f1(2, 3, 4, 1)
+inf
Evaluation for huge arguments::
>>> hyp2f1((-1,3), 1.75, 4, '1e100')
(7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000')
(7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000j')
(1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j)
An integral representation::
>>> a,b,c,z = -0.5, 1, 2.5, 0.25
>>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a)
>>> gammaprod([c],[b,c-b]) * quad(g, [0,1])
0.9480458814362824478852618
>>> hyp2f1(a,b,c,z)
0.9480458814362824478852618
Verifying the hypergeometric differential equation::
>>> f = lambda z: hyp2f1(a,b,c,z)
>>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z))
0.0
"""
hyp3f2 = r"""
Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1`
as
.. math ::
\,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty}
\frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation. The analytic structure of this
function is similar to that of `\,_2F_1`, generally with a singularity at
`z = 1` and a branch cut on `(1, \infty)`.
Evaluation is supported inside, on, and outside
the circle of convergence `|z| = 1`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp3f2(1,2,3,4,5,0.25)
1.083533123380934241548707
>>> hyp3f2(1,2+2j,3,4,5,-10+10j)
(0.1574651066006004632914361 - 0.03194209021885226400892963j)
>>> hyp3f2(1,2,3,4,5,-10)
0.3071141169208772603266489
>>> hyp3f2(1,2,3,4,5,10)
(-0.4857045320523947050581423 - 0.5988311440454888436888028j)
>>> hyp3f2(0.25,1,1,2,1.5,1)
1.157370995096772047567631
>>> (8-pi-2*ln2)/3
1.157370995096772047567631
>>> hyp3f2(1+j,0.5j,2,1,-2j,-1)
(1.74518490615029486475959 + 0.1454701525056682297614029j)
>>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j))
(0.9829816481834277511138055 - 0.4059040020276937085081127j)
>>> hyp3f2(-3,2,1,-5,4,1)
1.41
>>> hyp3f2(-3,2,1,-5,4,2)
2.12
Evaluation very close to the unit circle::
>>> hyp3f2(1,2,3,4,5,'1.0001')
(1.564877796743282766872279 - 3.76821518787438186031973e-11j)
>>> hyp3f2(1,2,3,4,5,'1+0.0001j')
(1.564747153061671573212831 + 0.0001305757570366084557648482j)
>>> hyp3f2(1,2,3,4,5,'0.9999')
1.564616644881686134983664
>>> hyp3f2(1,2,3,4,5,'-0.9999')
0.7823896253461678060196207
.. note ::
Evaluation for `|z-1|` small can currently be inaccurate or slow
for some parameter combinations.
For various parameter combinations, `\,_3F_2` admits representation in terms
of hypergeometric functions of lower degree, or in terms of
simpler functions::
>>> for a, b, z in [(1,2,-1), (2,0.5,1)]:
... hyp2f1(a,b,a+b+0.5,z)**2
... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z)
...
0.4246104461966439006086308
0.4246104461966439006086308
7.111111111111111111111111
7.111111111111111111111111
>>> z = 2+3j
>>> hyp3f2(0.5,1,1.5,2,2,z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
>>> 4*(pi-2*ellipe(z))/(pi*z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
"""
hyperu = r"""
Gives the Tricomi confluent hypergeometric function `U`, also known as
the Kummer or confluent hypergeometric function of the second kind. This
function gives a second linearly independent solution to the confluent
hypergeometric differential equation (the first is provided by `\,_1F_1` --
see :func:`~mpmath.hyp1f1`).
**Examples**
Evaluation for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyperu(2,3,4)
0.0625
>>> hyperu(0.25, 5, 1000)
0.1779949416140579573763523
>>> hyperu(0.25, 5, -1000)
(0.1256256609322773150118907 - 0.1256256609322773150118907j)
The `U` function may be singular at `z = 0`::
>>> hyperu(1.5, 2, 0)
+inf
>>> hyperu(1.5, -2, 0)
0.1719434921288400112603671
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyperu(a,b,z)
>>> for z in [-10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
An integral representation::
>>> a,b,z = 2, 3.5, 4.25
>>> hyperu(a,b,z)
0.06674960718150520648014567
>>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a)
0.06674960718150520648014567
[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
"""
hyp2f0 = r"""
Gives the hypergeometric function `\,_2F_0`, defined formally by the
series
.. math ::
\,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}.
This series usually does not converge. For small enough `z`, it can be viewed
as an asymptotic series that may be summed directly with an appropriate
truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum,
or equivalently, it uses a representation in terms of the
hypergeometric U function [1]. The series also converges when either `a` or `b`
is a nonpositive integer, as it then terminates into a polynomial
after `-a` or `-b` terms.
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f0((2,3), 1.25, -100)
0.07095851870980052763312791
>>> hyp2f0((2,3), 1.25, 100)
(-0.03254379032170590665041131 + 0.07269254613282301012735797j)
>>> hyp2f0(-0.75, 1-j, 4j)
(-0.3579987031082732264862155 - 3.052951783922142735255881j)
Even with real arguments, the regularized value of 2F0 is often complex-valued,
but the imaginary part decreases exponentially as `z \to 0`. In the following
example, the first call uses complex evaluation while the second has a small
enough `z` to evaluate using the direct series and thus the returned value
is strictly real (this should be taken to indicate that the imaginary
part is less than ``eps``)::
>>> mp.dps = 15
>>> hyp2f0(1.5, 0.5, 0.05)
(1.04166637647907 + 8.34584913683906e-8j)
>>> hyp2f0(1.5, 0.5, 0.0005)
1.00037535207621
The imaginary part can be retrieved by increasing the working precision::
>>> mp.dps = 80
>>> nprint(hyp2f0(1.5, 0.5, 0.009).imag)
1.23828e-46
In the polynomial case (the series terminating), 2F0 can evaluate exactly::
>>> mp.dps = 15
>>> hyp2f0(-6,-6,2)
291793.0
>>> identify(hyp2f0(-2,1,0.25))
'(5/8)'
The coefficients of the polynomials can be recovered using Taylor expansion::
>>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10))
[1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10))
[1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
"""
gammainc = r"""
``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete
gamma function with integration limits `[a, b]`:
.. math ::
\Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt
The generalized incomplete gamma function reduces to the
following special cases when one or both endpoints are fixed:
* `\Gamma(z,0,\infty)` is the standard ("complete")
gamma function, `\Gamma(z)` (available directly
as the mpmath function :func:`~mpmath.gamma`)
* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma
function, `\Gamma(z,a)`
* `\Gamma(z,0,b)` is the "lower" incomplete gamma
function, `\gamma(z,b)`.
Of course, we have
`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)`
for all `z` and `x`.
Note however that some authors reverse the order of the
arguments when defining the lower and upper incomplete
gamma function, so one should be careful to get the correct
definition.
If also given the keyword argument ``regularized=True``,
:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma
function
.. math ::
P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}.
**Examples**
We can compare with numerical quadrature to verify that
:func:`~mpmath.gammainc` computes the integral in the definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gammainc(2+3j, 4, 10)
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
>>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10])
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
Argument symmetries follow directly from the integral definition::
>>> gammainc(3, 4, 5) + gammainc(3, 5, 4)
0.0
>>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4)
1.523793388892911312363331
1.523793388892911312363331
>>> findroot(lambda z: gammainc(2,z,3), 1)
3.0
Evaluation for arbitrarily large arguments::
>>> gammainc(10, 100)
4.083660630910611272288592e-26
>>> gammainc(10, 10000000000000000)
5.290402449901174752972486e-4342944819032375
>>> gammainc(3+4j, 1000000+1000000j)
(-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j)
Evaluation of a generalized incomplete gamma function automatically chooses
the representation that gives a more accurate result, depending on which
parameter is larger::
>>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad
0.0
>>> gammainc(10000000, 2, 3) # Good
1.755146243738946045873491e+4771204
>>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad
0.0
>>> gammainc(2, 100000000, 100000001) # Good
4.078258353474186729184421e-43429441
The incomplete gamma functions satisfy simple recurrence
relations::
>>> mp.dps = 25
>>> z, a = mpf(3.5), mpf(2)
>>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a)
10.60130296933533459267329
10.60130296933533459267329
>>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a)
1.030425427232114336470932
1.030425427232114336470932
Evaluation at integers and poles::
>>> gammainc(-3, -4, -5)
(-0.2214577048967798566234192 + 0.0j)
>>> gammainc(-3, 0, 5)
+inf
If `z` is an integer, the recurrence reduces the incomplete gamma
function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and
`Q` are polynomials::
>>> gammainc(1, 2); exp(-2)
0.1353352832366126918939995
0.1353352832366126918939995
>>> mp.dps = 50
>>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)'])
'(326*exp(-1) + (-872)*exp(-2))'
The incomplete gamma functions reduce to functions such as
the exponential integral Ei and the error function for special
arguments::
>>> mp.dps = 25
>>> gammainc(0, 4); -ei(-4)
0.00377935240984890647887486
0.00377935240984890647887486
>>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2))
1.691806732945198336509541
1.691806732945198336509541
"""
erf = r"""
Computes the error function, `\mathrm{erf}(x)`. The error
function is the normalized antiderivative of the Gaussian function
`\exp(-t^2)`. More precisely,
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt
**Basic examples**
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erf(0)
0.0
>>> erf(1)
0.842700792949715
>>> erf(-1)
-0.842700792949715
>>> erf(inf)
1.0
>>> erf(-inf)
-1.0
For large real `x`, `\mathrm{erf}(x)` approaches 1 very
rapidly::
>>> erf(3)
0.999977909503001
>>> erf(5)
0.999999999998463
The error function is an odd function::
>>> nprint(chop(taylor(erf, 0, 5)))
[0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838]
:func:`~mpmath.erf` implements arbitrary-precision evaluation and
supports complex numbers::
>>> mp.dps = 50
>>> erf(0.5)
0.52049987781304653768274665389196452873645157575796
>>> mp.dps = 25
>>> erf(1+j)
(1.316151281697947644880271 + 0.1904534692378346862841089j)
Evaluation is supported for large arguments::
>>> mp.dps = 25
>>> erf('1e1000')
1.0
>>> erf('-1e1000')
-1.0
>>> erf('1e-1000')
1.128379167095512573896159e-1000
>>> erf('1e7j')
(0.0 + 8.593897639029319267398803e+43429448190317j)
>>> erf('1e7+1e7j')
(0.9999999858172446172631323 + 3.728805278735270407053139e-8j)
**Related functions**
See also :func:`~mpmath.erfc`, which is more accurate for large `x`,
and :func:`~mpmath.erfi` which gives the antiderivative of
`\exp(t^2)`.
The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc`
are also related to the error function.
"""
erfc = r"""
Computes the complementary error function,
`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
This function avoids cancellation that occurs when naively
computing the complementary error function as ``1-erf(x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> 1 - erf(10)
0.0
>>> erfc(10)
2.08848758376254e-45
:func:`~mpmath.erfc` works accurately even for ludicrously large
arguments::
>>> erfc(10**10)
4.3504398860243e-43429448190325182776
Complex arguments are supported::
>>> erfc(500+50j)
(1.19739830969552e-107492 + 1.46072418957528e-107491j)
"""
erfi = r"""
Computes the imaginary error function, `\mathrm{erfi}(x)`.
The imaginary error function is defined in analogy with the
error function, but with a positive sign in the integrand:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
Whereas the error function rapidly converges to 1 as `x` grows,
the imaginary error function rapidly diverges to infinity.
The functions are related as
`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
numbers `x`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfi(0)
0.0
>>> erfi(1)
1.65042575879754
>>> erfi(-1)
-1.65042575879754
>>> erfi(inf)
+inf
>>> erfi(-inf)
-inf
Note the symmetry between erf and erfi::
>>> erfi(3j)
(0.0 + 0.999977909503001j)
>>> erf(3)
0.999977909503001
>>> erf(1+2j)
(-0.536643565778565 - 5.04914370344703j)
>>> erfi(2+1j)
(-5.04914370344703 - 0.536643565778565j)
Large arguments are supported::
>>> erfi(1000)
1.71130938718796e+434291
>>> erfi(10**10)
7.3167287567024e+43429448190325182754
>>> erfi(-10**10)
-7.3167287567024e+43429448190325182754
>>> erfi(1000-500j)
(2.49895233563961e+325717 + 2.6846779342253e+325717j)
>>> erfi(100000j)
(0.0 + 1.0j)
>>> erfi(-100000j)
(0.0 - 1.0j)
"""
erfinv = r"""
Computes the inverse error function, satisfying
.. math ::
\mathrm{erf}(\mathrm{erfinv}(x)) =
\mathrm{erfinv}(\mathrm{erf}(x)) = x.
This function is defined only for `-1 \le x \le 1`.
**Examples**
Special values include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfinv(0)
0.0
>>> erfinv(1)
+inf
>>> erfinv(-1)
-inf
The domain is limited to the standard interval::
>>> erfinv(2)
Traceback (most recent call last):
...
ValueError: erfinv(x) is defined only for -1 <= x <= 1
It is simple to check that :func:`~mpmath.erfinv` computes inverse values of
:func:`~mpmath.erf` as promised::
>>> erf(erfinv(0.75))
0.75
>>> erf(erfinv(-0.995))
-0.995
:func:`~mpmath.erfinv` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> x = erf(2)
>>> x
0.99532226501895273416206925636725292861089179704006
>>> erfinv(x)
2.0
A definite integral involving the inverse error function::
>>> mp.dps = 15
>>> quad(erfinv, [0, 1])
0.564189583547756
>>> 1/sqrt(pi)
0.564189583547756
The inverse error function can be used to generate random numbers
with a Gaussian distribution (although this is a relatively
inefficient algorithm)::
>>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
[-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
"""
npdf = r"""
``npdf(x, mu=0, sigma=1)`` evaluates the probability density
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
Elementary properties of the probability distribution can
be verified using numerical integration::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(npdf, [-inf, inf])
1.0
>>> quad(lambda x: npdf(x, 3), [3, inf])
0.5
>>> quad(lambda x: npdf(x, 3, 2), [3, inf])
0.5
See also :func:`~mpmath.ncdf`, which gives the cumulative
distribution.
"""
ncdf = r"""
``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
See also :func:`~mpmath.npdf`, which gives the probability density.
Elementary properties include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ncdf(pi, mu=pi)
0.5
>>> ncdf(-inf)
0.0
>>> ncdf(+inf)
1.0
The cumulative distribution is the integral of the density
function having identical mu and sigma::
>>> mp.dps = 15
>>> diff(ncdf, 2)
0.053990966513188
>>> npdf(2)
0.053990966513188
>>> diff(lambda x: ncdf(x, 1, 0.5), 0)
0.107981933026376
>>> npdf(0, 1, 0.5)
0.107981933026376
"""
expint = r"""
:func:`~mpmath.expint(n,z)` gives the generalized exponential integral
or En-function,
.. math ::
\mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt,
where `n` and `z` may both be complex numbers. The case with `n = 1` is
also given by :func:`~mpmath.e1`.
**Examples**
Evaluation at real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expint(1, 6.25)
0.0002704758872637179088496194
>>> expint(-3, 2+3j)
(0.00299658467335472929656159 + 0.06100816202125885450319632j)
>>> expint(2+3j, 4-5j)
(0.001803529474663565056945248 - 0.002235061547756185403349091j)
At negative integer values of `n`, `E_n(z)` reduces to a
rational-exponential function::
>>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\
... exp(z)/z**(n+2)
>>> n = 3
>>> z = 1/pi
>>> expint(-n,z)
584.2604820613019908668219
>>> f(n,z)
584.2604820613019908668219
>>> n = 5
>>> expint(-n,z)
115366.5762594725451811138
>>> f(n,z)
115366.5762594725451811138
"""
e1 = r"""
Computes the exponential integral `\mathrm{E}_1(z)`, given by
.. math ::
\mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt.
This is equivalent to :func:`~mpmath.expint` with `n = 1`.
**Examples**
Two ways to evaluate this function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> e1(6.25)
0.0002704758872637179088496194
>>> expint(1,6.25)
0.0002704758872637179088496194
The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`)
with negated argument, except for an imaginary branch cut term::
>>> e1(2.5)
0.02491491787026973549562801
>>> -ei(-2.5)
0.02491491787026973549562801
>>> e1(-2.5)
(-7.073765894578600711923552 - 3.141592653589793238462643j)
>>> -ei(2.5)
-7.073765894578600711923552
"""
ei = r"""
Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
The exponential integral is defined as
.. math ::
\mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
When the integration range includes `t = 0`, the exponential
integral is interpreted as providing the Cauchy principal value.
For real `x`, the Ei-function behaves roughly like
`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
The Ei-function is related to the more general family of exponential
integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`.
**Basic examples**
Some basic values and limits are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ei(0)
-inf
>>> ei(1)
1.89511781635594
>>> ei(inf)
+inf
>>> ei(-inf)
0.0
For `x < 0`, the defining integral can be evaluated
numerically as a reference::
>>> ei(-4)
-0.00377935240984891
>>> quad(lambda t: exp(t)/t, [-inf, -4])
-0.00377935240984891
:func:`~mpmath.ei` supports complex arguments and arbitrary
precision evaluation::
>>> mp.dps = 50
>>> ei(pi)
10.928374389331410348638445906907535171566338835056
>>> mp.dps = 25
>>> ei(3+4j)
(-4.154091651642689822535359 + 4.294418620024357476985535j)
**Related functions**
The exponential integral is closely related to the logarithmic
integral. See :func:`~mpmath.li` for additional information.
The exponential integral is related to the hyperbolic
and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`,
:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary
exponential function is related to the hyperbolic and
trigonometric functions::
>>> mp.dps = 15
>>> ei(3)
9.93383257062542
>>> chi(3) + shi(3)
9.93383257062542
>>> chop(ci(3j) - j*si(3j) - pi*j/2)
9.93383257062542
Beware that logarithmic corrections, as in the last example
above, are required to obtain the correct branch in general.
For details, see [1].
The exponential integral is also a special case of the
hypergeometric function `\,_2F_2`::
>>> z = 0.6
>>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
0.769881289937359
>>> ei(z)
0.769881289937359
**References**
1. Relations between Ei and other functions:
http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
2. Abramowitz & Stegun, section 5:
http://people.math.sfu.ca/~cbm/aands/page_228.htm
3. Asymptotic expansion for Ei:
http://mathworld.wolfram.com/En-Function.html
"""
li = r"""
Computes the logarithmic integral or li-function
`\mathrm{li}(x)`, defined by
.. math ::
\mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
The logarithmic integral has a singularity at `x = 1`.
Alternatively, ``li(x, offset=True)`` computes the offset
logarithmic integral (used in number theory)
.. math ::
\mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt.
These two functions are related via the simple identity
`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
The logarithmic integral should also not be confused with
the polylogarithm (also denoted by Li), which is implemented
as :func:`~mpmath.polylog`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> li(0)
0.0
>>> li(1)
-inf
>>> li(1)
-inf
>>> li(2)
1.04516378011749278484458888919
>>> findroot(li, 2)
1.45136923488338105028396848589
>>> li(inf)
+inf
>>> li(2, offset=True)
0.0
>>> li(1, offset=True)
-inf
>>> li(0, offset=True)
-1.04516378011749278484458888919
>>> li(10, offset=True)
5.12043572466980515267839286347
The logarithmic integral can be evaluated for arbitrary
complex arguments::
>>> mp.dps = 20
>>> li(3+4j)
(3.1343755504645775265 + 2.6769247817778742392j)
The logarithmic integral is related to the exponential integral::
>>> ei(log(3))
2.1635885946671919729
>>> li(3)
2.1635885946671919729
The logarithmic integral grows like `O(x/\log(x))`::
>>> mp.dps = 15
>>> x = 10**100
>>> x/log(x)
4.34294481903252e+97
>>> li(x)
4.3619719871407e+97
The prime number theorem states that the number of primes less
than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently
`\mathrm{li}(x)`). For example, it is known that there are
exactly 1,925,320,391,606,803,968,923 prime numbers less than
`10^{23}` [1]. The logarithmic integral provides a very
accurate estimate::
>>> li(10**23, offset=True)
1.92532039161405e+21
A definite integral is::
>>> quad(li, [0, 1])
-0.693147180559945
>>> -ln(2)
-0.693147180559945
**References**
1. http://mathworld.wolfram.com/PrimeCountingFunction.html
2. http://mathworld.wolfram.com/LogarithmicIntegral.html
"""
ci = r"""
Computes the cosine integral,
.. math ::
\mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ci(0)
-inf
>>> ci(1)
0.3374039229009681346626462
>>> ci(pi)
0.07366791204642548599010096
>>> ci(inf)
0.0
>>> ci(-inf)
(0.0 + 3.141592653589793238462643j)
>>> ci(2+3j)
(1.408292501520849518759125 - 2.983617742029605093121118j)
The cosine integral behaves roughly like the sinc function
(see :func:`~mpmath.sinc`) for large real `x`::
>>> ci(10**10)
-4.875060251748226537857298e-11
>>> sinc(10**10)
-4.875060250875106915277943e-11
>>> chop(limit(ci, inf))
0.0
It has infinitely many roots on the positive real axis::
>>> findroot(ci, 1)
0.6165054856207162337971104
>>> findroot(ci, 2)
3.384180422551186426397851
Evaluation is supported for `z` anywhere in the complex plane::
>>> ci(10**6*(1+j))
(4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1)
-0.190029749656644
>>> ci(5)
-0.190029749656644
Some infinite series can be evaluated using the
cosine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf])
-0.239811742000565
>>> ci(1) - euler
-0.239811742000565
"""
si = r"""
Computes the sine integral,
.. math ::
\mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt.
The sine integral is thus the antiderivative of the sinc
function (see :func:`~mpmath.sinc`).
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> si(0)
0.0
>>> si(1)
0.9460830703671830149413533
>>> si(-1)
-0.9460830703671830149413533
>>> si(pi)
1.851937051982466170361053
>>> si(inf)
1.570796326794896619231322
>>> si(-inf)
-1.570796326794896619231322
>>> si(2+3j)
(4.547513889562289219853204 + 1.399196580646054789459839j)
The sine integral approaches `\pi/2` for large real `x`::
>>> si(10**10)
1.570796326707584656968511
>>> pi/2
1.570796326794896619231322
Evaluation is supported for `z` anywhere in the complex plane::
>>> si(10**6*(1+j))
(-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> quad(sinc, [0, 5])
1.54993124494467
>>> si(5)
1.54993124494467
Some infinite series can be evaluated using the
sine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf])
0.946083070367183
>>> si(1)
0.946083070367183
"""
chi = r"""
Computes the hyperbolic cosine integral, defined
in analogy with the cosine integral (see :func:`~mpmath.ci`) as
.. math ::
\mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> chi(0)
-inf
>>> chi(1)
0.8378669409802082408946786
>>> chi(inf)
+inf
>>> findroot(chi, 0.5)
0.5238225713898644064509583
>>> chi(2+3j)
(-0.1683628683277204662429321 + 2.625115880451325002151688j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> chi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
shi = r"""
Computes the hyperbolic sine integral, defined
in analogy with the sine integral (see :func:`~mpmath.si`) as
.. math ::
\mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt.
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> shi(0)
0.0
>>> shi(1)
1.057250875375728514571842
>>> shi(-1)
-1.057250875375728514571842
>>> shi(inf)
+inf
>>> shi(2+3j)
(-0.1931890762719198291678095 + 2.645432555362369624818525j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> shi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
fresnels = r"""
Computes the Fresnel sine integral
.. math ::
S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnels(0)
0.0
>>> fresnels(inf)
0.5
>>> fresnels(-inf)
-0.5
>>> fresnels(1)
0.4382591473903547660767567
>>> fresnels(1+2j)
(36.72546488399143842838788 + 15.58775110440458732748279j)
Comparing with the definition::
>>> fresnels(3)
0.4963129989673750360976123
>>> quad(lambda t: sin(pi*t**2/2), [0,3])
0.4963129989673750360976123
"""
fresnelc = r"""
Computes the Fresnel cosine integral
.. math ::
C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnelc(0)
0.0
>>> fresnelc(inf)
0.5
>>> fresnelc(-inf)
-0.5
>>> fresnelc(1)
0.7798934003768228294742064
>>> fresnelc(1+2j)
(16.08787137412548041729489 - 36.22568799288165021578758j)
Comparing with the definition::
>>> fresnelc(3)
0.6057207892976856295561611
>>> quad(lambda t: cos(pi*t**2/2), [0,3])
0.6057207892976856295561611
"""
airyai = r"""
Computes the Airy function `\operatorname{Ai}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Ai}(0) =
\frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Ai}'(0) =
-\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}.
Other common ways of defining the Ai-function include
integrals such as
.. math ::
\operatorname{Ai}(x) = \frac{1}{\pi}
\int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt
\qquad x \in \mathbb{R}
\operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi}
\int_0^{\infty}
\exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt.
The Ai-function is an entire function with a turning point,
behaving roughly like a slowly decaying sine wave for `z < 0` and
like a rapidly decreasing exponential for `z > 0`.
A second solution of the Airy differential equation
is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`).
Optionally, with *derivative=alpha*, :func:`airyai` can compute the
`\alpha`-th order fractional derivative with respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral
.. math ::
f_0(z) = \operatorname{Ai}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt.
The Ai-function has infinitely many zeros, all located along the
negative half of the real axis. They can be computed with
:func:`~mpmath.airyaizero`.
**Plots**
.. literalinclude :: /plots/ai.py
.. image :: /plots/ai.png
.. literalinclude :: /plots/ai_c.py
.. image :: /plots/ai_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyai(0); 1/(power(3,'2/3')*gamma('2/3'))
0.3550280538878172392600632
0.3550280538878172392600632
>>> airyai(1)
0.1352924163128814155241474
>>> airyai(-1)
0.5355608832923521187995166
>>> airyai(inf); airyai(-inf)
0.0
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airyai(-100)
0.1767533932395528780908311
>>> airyai(100)
2.634482152088184489550553e-291
>>> airyai(50+50j)
(-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j)
>>> airyai(-50+50j)
(1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j)
Huge arguments are also fine::
>>> airyai(10**10)
1.162235978298741779953693e-289529654602171
>>> airyai(-10**10)
0.0001736206448152818510510181
>>> w = airyai(10**10*(1+j))
>>> w.real
5.711508683721355528322567e-186339621747698
>>> w.imag
1.867245506962312577848166e-186339621747697
The first root of the Ai-function is::
>>> findroot(airyai, -2)
-2.338107410459767038489197
>>> airyaizero(1)
-2.338107410459767038489197
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airyai(z,2) - z*airyai(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airyai, 0, 5))
[0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0]
The Airy functions satisfy the Wronskian relation
`\operatorname{Ai}(z) \operatorname{Bi}'(z) -
\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`::
>>> z = -0.5
>>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z)
0.3183098861837906715377675
>>> 1/pi
0.3183098861837906715377675
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airyai(z)
-0.3788142936776580743472439
>>> y = 2*power(-z,'3/2')/3
>>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3
-0.3788142936776580743472439
**Derivatives and integrals**
Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`)::
>>> airyai(-3,1); diff(airyai,-3)
0.3145837692165988136507873
0.3145837692165988136507873
>>> airyai(-3,2); diff(airyai,-3,2)
1.136442881032974223041732
1.136442881032974223041732
>>> airyai(1000,1); diff(airyai,1000)
-2.943133917910336090459748e-9156
-2.943133917910336090459748e-9156
Several derivatives at `z = 0`::
>>> airyai(0,0); airyai(0,1); airyai(0,2)
0.3550280538878172392600632
-0.2588194037928067984051836
0.0
>>> airyai(0,3); airyai(0,4); airyai(0,5)
0.3550280538878172392600632
-0.5176388075856135968103671
0.0
>>> airyai(0,15); airyai(0,16); airyai(0,17)
1292.30211615165475090663
-3188.655054727379756351861
0.0
The integral of the Ai-function::
>>> airyai(3,-1); quad(airyai, [0,3])
0.3299203760070217725002701
0.3299203760070217725002701
>>> airyai(-10,-1); quad(airyai, [0,-10])
-0.765698403134212917425148
-0.765698403134212917425148
Integrals of high or fractional order::
>>> airyai(-2,0.5); differint(airyai,-2,0.5,0)
(0.0 + 0.2453596101351438273844725j)
(0.0 + 0.2453596101351438273844725j)
>>> airyai(-2,-4); differint(airyai,-2,-4,0)
0.2939176441636809580339365
0.2939176441636809580339365
>>> airyai(0,-1); airyai(0,-2); airyai(0,-3)
0.0
0.0
0.0
Integrals of the Ai-function can be evaluated at limit points::
>>> airyai(-1000000,-1); airyai(-inf,-1)
-0.6666843728311539978751512
-0.6666666666666666666666667
>>> airyai(10,-1); airyai(+inf,-1)
0.3333333332991690159427932
0.3333333333333333333333333
>>> airyai(+inf,-2); airyai(+inf,-3)
+inf
+inf
>>> airyai(-1000000,-2); airyai(-inf,-2)
666666.4078472650651209742
+inf
>>> airyai(-1000000,-3); airyai(-inf,-3)
-333333074513.7520264995733
-inf
**References**
1. [DLMF]_ Chapter 9: Airy and Related Functions
2. [WolframFunctions]_ section: Bessel-Type Functions
"""
airybi = r"""
Computes the Airy function `\operatorname{Bi}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Bi}(0) =
\frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Bi}'(0) =
\frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}.
Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function
is oscillatory for `z < 0`, but it grows rather than decreases
for `z > 0`.
Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals
and fractional derivatives can be computed with the *derivative*
parameter.
The Bi-function has infinitely many zeros along the negative
half-axis, as well as complex zeros, which can all be computed
with :func:`~mpmath.airybizero`.
**Plots**
.. literalinclude :: /plots/bi.py
.. image :: /plots/bi.png
.. literalinclude :: /plots/bi_c.py
.. image :: /plots/bi_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybi(0); 1/(power(3,'1/6')*gamma('2/3'))
0.6149266274460007351509224
0.6149266274460007351509224
>>> airybi(1)
1.207423594952871259436379
>>> airybi(-1)
0.10399738949694461188869
>>> airybi(inf); airybi(-inf)
+inf
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airybi(-100)
0.02427388768016013160566747
>>> airybi(100)
6.041223996670201399005265e+288
>>> airybi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> airybi(-50+50j)
(-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j)
Huge arguments::
>>> airybi(10**10)
1.369385787943539818688433e+289529654602165
>>> airybi(-10**10)
0.001775656141692932747610973
>>> w = airybi(10**10*(1+j))
>>> w.real
-6.559955931096196875845858e+186339621747689
>>> w.imag
-6.822462726981357180929024e+186339621747690
The first real root of the Bi-function is::
>>> findroot(airybi, -1); airybizero(1)
-1.17371322270912792491998
-1.17371322270912792491998
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airybi(z,2) - z*airybi(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airybi, 0, 5))
[0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0]
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airybi(z)
-0.1982896263749265432206449
>>> p = 2*power(-z,'3/2')/3
>>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p))
-0.1982896263749265432206449
**Derivatives and integrals**
Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`)::
>>> airybi(-3,1); diff(airybi,-3)
-0.675611222685258537668032
-0.675611222685258537668032
>>> airybi(-3,2); diff(airybi,-3,2)
0.5948688791247796296619346
0.5948688791247796296619346
>>> airybi(1000,1); diff(airybi,1000)
1.710055114624614989262335e+9156
1.710055114624614989262335e+9156
Several derivatives at `z = 0`::
>>> airybi(0,0); airybi(0,1); airybi(0,2)
0.6149266274460007351509224
0.4482883573538263579148237
0.0
>>> airybi(0,3); airybi(0,4); airybi(0,5)
0.6149266274460007351509224
0.8965767147076527158296474
0.0
>>> airybi(0,15); airybi(0,16); airybi(0,17)
2238.332923903442675949357
5522.912562599140729510628
0.0
The integral of the Bi-function::
>>> airybi(3,-1); quad(airybi, [0,3])
10.06200303130620056316655
10.06200303130620056316655
>>> airybi(-10,-1); quad(airybi, [0,-10])
-0.01504042480614002045135483
-0.01504042480614002045135483
Integrals of high or fractional order::
>>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0)
(0.0 + 0.5019859055341699223453257j)
(0.0 + 0.5019859055341699223453257j)
>>> airybi(-2,-4); differint(airybi,-2,-4,0)
0.2809314599922447252139092
0.2809314599922447252139092
>>> airybi(0,-1); airybi(0,-2); airybi(0,-3)
0.0
0.0
0.0
Integrals of the Bi-function can be evaluated at limit points::
>>> airybi(-1000000,-1); airybi(-inf,-1)
0.000002191261128063434047966873
0.0
>>> airybi(10,-1); airybi(+inf,-1)
147809803.1074067161675853
+inf
>>> airybi(+inf,-2); airybi(+inf,-3)
+inf
+inf
>>> airybi(-1000000,-2); airybi(-inf,-2)
0.4482883750599908479851085
0.4482883573538263579148237
>>> gamma('2/3')*power(3,'2/3')/(2*pi)
0.4482883573538263579148237
>>> airybi(-100000,-3); airybi(-inf,-3)
-44828.52827206932872493133
-inf
>>> airybi(-100000,-4); airybi(-inf,-4)
2241411040.437759489540248
+inf
"""
airyaizero = r"""
Gives the `k`-th zero of the Airy Ai-function,
i.e. the `k`-th number `a_k` ordered by magnitude for which
`\operatorname{Ai}(a_k) = 0`.
Optionally, with *derivative=1*, the corresponding
zero `a'_k` of the derivative function, i.e.
`\operatorname{Ai}'(a'_k) = 0`, is computed.
**Examples**
Some values of `a_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyaizero(1)
-2.338107410459767038489197
>>> airyaizero(2)
-4.087949444130970616636989
>>> airyaizero(3)
-5.520559828095551059129856
>>> airyaizero(1000)
-281.0315196125215528353364
Some values of `a'_k`::
>>> airyaizero(1,1)
-1.018792971647471089017325
>>> airyaizero(2,1)
-3.248197582179836537875424
>>> airyaizero(3,1)
-4.820099211178735639400616
>>> airyaizero(1000,1)
-280.9378080358935070607097
Verification::
>>> chop(airyai(airyaizero(1)))
0.0
>>> chop(airyai(airyaizero(1,1),1))
0.0
"""
airybizero = r"""
With *complex=False*, gives the `k`-th real zero of the Airy Bi-function,
i.e. the `k`-th number `b_k` ordered by magnitude for which
`\operatorname{Bi}(b_k) = 0`.
With *complex=True*, gives the `k`-th complex zero in the upper
half plane `\beta_k`. Also the conjugate `\overline{\beta_k}`
is a zero.
Optionally, with *derivative=1*, the corresponding
zero `b'_k` or `\beta'_k` of the derivative function, i.e.
`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`,
is computed.
**Examples**
Some values of `b_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybizero(1)
-1.17371322270912792491998
>>> airybizero(2)
-3.271093302836352715680228
>>> airybizero(3)
-4.830737841662015932667709
>>> airybizero(1000)
-280.9378112034152401578834
Some values of `b_k`::
>>> airybizero(1,1)
-2.294439682614123246622459
>>> airybizero(2,1)
-4.073155089071828215552369
>>> airybizero(3,1)
-5.512395729663599496259593
>>> airybizero(1000,1)
-281.0315164471118527161362
Some values of `\beta_k`::
>>> airybizero(1,complex=True)
(0.9775448867316206859469927 + 2.141290706038744575749139j)
>>> airybizero(2,complex=True)
(1.896775013895336346627217 + 3.627291764358919410440499j)
>>> airybizero(3,complex=True)
(2.633157739354946595708019 + 4.855468179979844983174628j)
>>> airybizero(1000,complex=True)
(140.4978560578493018899793 + 243.3907724215792121244867j)
Some values of `\beta'_k`::
>>> airybizero(1,1,complex=True)
(0.2149470745374305676088329 + 1.100600143302797880647194j)
>>> airybizero(2,1,complex=True)
(1.458168309223507392028211 + 2.912249367458445419235083j)
>>> airybizero(3,1,complex=True)
(2.273760763013482299792362 + 4.254528549217097862167015j)
>>> airybizero(1000,1,complex=True)
(140.4509972835270559730423 + 243.3096175398562811896208j)
Verification::
>>> chop(airybi(airybizero(1)))
0.0
>>> chop(airybi(airybizero(1,1),1))
0.0
>>> u = airybizero(1,complex=True)
>>> chop(airybi(u))
0.0
>>> chop(airybi(conj(u)))
0.0
The complex zeros (in the upper and lower half-planes respectively)
asymptotically approach the rays `z = R \exp(\pm i \pi /3)`::
>>> arg(airybizero(1,complex=True))
1.142532510286334022305364
>>> arg(airybizero(1000,complex=True))
1.047271114786212061583917
>>> arg(airybizero(1000000,complex=True))
1.047197624741816183341355
>>> pi/3
1.047197551196597746154214
"""
ellipk = r"""
Evaluates the complete elliptic integral of the first kind,
`K(m)`, defined by
.. math ::
K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \,
\frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right).
Note that the argument is the parameter `m = k^2`,
not the modulus `k` which is sometimes used.
**Plots**
.. literalinclude :: /plots/ellipk.py
.. image :: /plots/ellipk.png
**Examples**
Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipk(0)
1.570796326794896619231322
>>> ellipk(inf)
(0.0 + 0.0j)
>>> ellipk(-inf)
0.0
>>> ellipk(1)
+inf
>>> ellipk(-1)
1.31102877714605990523242
>>> ellipk(2)
(1.31102877714605990523242 - 1.31102877714605990523242j)
Verifying the defining integral and hypergeometric
representation::
>>> ellipk(0.5)
1.85407467730137191843385
>>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2])
1.85407467730137191843385
>>> pi/2*hyp2f1(0.5,0.5,1,0.5)
1.85407467730137191843385
Evaluation is supported for arbitrary complex `m`::
>>> ellipk(3+4j)
(0.9111955638049650086562171 + 0.6313342832413452438845091j)
A definite integral::
>>> quad(ellipk, [0, 1])
2.0
"""
agm = r"""
``agm(a, b)`` computes the arithmetic-geometric mean of `a` and
`b`, defined as the limit of the following iteration:
.. math ::
a_0 = a
b_0 = b
a_{n+1} = \frac{a_n+b_n}{2}
b_{n+1} = \sqrt{a_n b_n}
This function can be called with a single argument, computing
`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`.
**Examples**
It is a well-known theorem that the geometric mean of
two distinct positive numbers is less than the arithmetic
mean. It follows that the arithmetic-geometric mean lies
between the two means::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> a = mpf(3)
>>> b = mpf(4)
>>> sqrt(a*b)
3.46410161513775
>>> agm(a,b)
3.48202767635957
>>> (a+b)/2
3.5
The arithmetic-geometric mean is scale-invariant::
>>> agm(10*e, 10*pi)
29.261085515723
>>> 10*agm(e, pi)
29.261085515723
As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x`
for large `x`::
>>> agm(10**10)
643448704.760133
>>> agm(10**50)
1.34814309345871e+48
For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`::
>>> agm('0.01')
0.262166887202249
>>> -pi/2/log('0.0025')
0.262172347753122
The arithmetic-geometric mean can also be computed for complex
numbers::
>>> agm(3, 2+j)
(2.51055133276184 + 0.547394054060638j)
The AGM iteration converges very quickly (each step doubles
the number of correct digits), so :func:`~mpmath.agm` supports efficient
high-precision evaluation::
>>> mp.dps = 10000
>>> a = agm(1,2)
>>> str(a)[-10:]
'1679581912'
**Mathematical relations**
The arithmetic-geometric mean may be used to evaluate the
following two parametric definite integrals:
.. math ::
I_1 = \int_0^{\infty}
\frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx
I_2 = \int_0^{\pi/2}
\frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx
We have::
>>> mp.dps = 15
>>> a = 3
>>> b = 4
>>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5
>>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5
>>> quad(f1, [0, inf])
0.451115405388492
>>> quad(f2, [0, pi/2])
0.451115405388492
>>> pi/(2*agm(a,b))
0.451115405388492
A formula for `\Gamma(1/4)`::
>>> gamma(0.25)
3.62560990822191
>>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2)))
3.62560990822191
**Possible issues**
The branch cut chosen for complex `a` and `b` is somewhat
arbitrary.
"""
gegenbauer = r"""
Evaluates the Gegenbauer polynomial, or ultraspherical polynomial,
.. math ::
C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a;
a+\frac{1}{2}; \frac{1}{2}(1-z)\right).
When `n` is a nonnegative integer, this formula gives a polynomial
in `z` of degree `n`, but all parameters are permitted to be
complex numbers. With `a = 1/2`, the Gegenbauer polynomial
reduces to a Legendre polynomial.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gegenbauer(3, 0.5, -10)
-2485.0
>>> gegenbauer(1000, 10, 100)
3.012757178975667428359374e+2322
>>> gegenbauer(2+3j, -0.75, -1000j)
(-5038991.358609026523401901 + 9414549.285447104177860806j)
Evaluation at negative integer orders::
>>> gegenbauer(-4, 2, 1.75)
-1.0
>>> gegenbauer(-4, 3, 1.75)
0.0
>>> gegenbauer(-4, 2j, 1.75)
0.0
>>> gegenbauer(-7, 0.5, 3)
8989.0
The Gegenbauer polynomials solve the differential equation::
>>> n, a = 4.5, 1+2j
>>> f = lambda z: gegenbauer(n, a, z)
>>> for z in [0, 0.75, -0.5j]:
... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z))
...
0.0
0.0
0.0
The Gegenbauer polynomials have generating function
`(1-2zt+t^2)^{-a}`::
>>> a, z = 2.5, 1
>>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3)
[1.0, 5.0, 15.0, 35.0]
>>> [gegenbauer(n,a,z) for n in range(4)]
[1.0, 5.0, 15.0, 35.0]
The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect
to the weight `(1-z^2)^{a-\frac{1}{2}}`::
>>> a, n, m = 2.5, 4, 5
>>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000)
>>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000)
>>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1]))
0.0
"""
laguerre = r"""
Gives the generalized (associated) Laguerre polynomial, defined by
.. math ::
L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)}
\,_1F_1(-n, a+1, z).
With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary
Laguerre polynomial, the sequence of which begins
`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`.
The Laguerre polynomials are orthogonal with respect to the weight
`z^a e^{-z}` on `[0, \infty)`.
**Plots**
.. literalinclude :: /plots/laguerre.py
.. image :: /plots/laguerre.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> laguerre(5, 0, 0.25)
0.03726399739583333333333333
>>> laguerre(1+j, 0.5, 2+3j)
(4.474921610704496808379097 - 11.02058050372068958069241j)
>>> laguerre(2, 0, 10000)
49980001.0
>>> laguerre(2.5, 0, 10000)
-9.327764910194842158583189e+4328
The first few Laguerre polynomials, normalized to have integer
coefficients::
>>> for n in range(7):
... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n))
...
[1.0]
[1.0, -1.0]
[2.0, -4.0, 1.0]
[6.0, -18.0, 9.0, -1.0]
[24.0, -96.0, 72.0, -16.0, 1.0]
[120.0, -600.0, 600.0, -200.0, 25.0, -1.0]
[720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0]
Verifying orthogonality::
>>> Lm = lambda t: laguerre(m,a,t)
>>> Ln = lambda t: laguerre(n,a,t)
>>> a, n, m = 2.5, 2, 3
>>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf]))
0.0
"""
hermite = r"""
Evaluates the Hermite polynomial `H_n(z)`, which may be defined using
the recurrence
.. math ::
H_0(z) = 1
H_1(z) = 2z
H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z).
The Hermite polynomials are orthogonal on `(-\infty, \infty)` with
respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex
values of `n`, the Hermite function `H_n(z)` is defined as
.. math ::
H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2},
-\frac{1}{z^2}\right)
for `\Re{z} > 0`, or generally
.. math ::
H_n(z) = 2^n \sqrt{\pi} \left(
\frac{1}{\Gamma\left(\frac{1-n}{2}\right)}
\,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) -
\frac{2z}{\Gamma\left(-\frac{n}{2}\right)}
\,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right)
\right).
**Plots**
.. literalinclude :: /plots/hermite.py
.. image :: /plots/hermite.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hermite(0, 10)
1.0
>>> hermite(1, 10); hermite(2, 10)
20.0
398.0
>>> hermite(10000, 2)
4.950440066552087387515653e+19334
>>> hermite(3, -10**8)
-7999999999999998800000000.0
>>> hermite(-3, -10**8)
1.675159751729877682920301e+4342944819032534
>>> hermite(2+3j, -1+2j)
(-0.07652130602993513389421901 - 0.1084662449961914580276007j)
Coefficients of the first few Hermite polynomials are::
>>> for n in range(7):
... chop(taylor(lambda z: hermite(n, z), 0, n))
...
[1.0]
[0.0, 2.0]
[-2.0, 0.0, 4.0]
[0.0, -12.0, 0.0, 8.0]
[12.0, 0.0, -48.0, 0.0, 16.0]
[0.0, 120.0, 0.0, -160.0, 0.0, 32.0]
[-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0]
Values at `z = 0`::
>>> for n in range(-5, 9):
... hermite(n, 0)
...
0.02769459142039868792653387
0.08333333333333333333333333
0.2215567313631895034122709
0.5
0.8862269254527580136490837
1.0
0.0
-2.0
0.0
12.0
0.0
-120.0
0.0
1680.0
Hermite functions satisfy the differential equation::
>>> n = 4
>>> f = lambda z: hermite(n, z)
>>> z = 1.5
>>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z))
0.0
Verifying orthogonality::
>>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf]))
0.0
"""
jacobi = r"""
``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial
`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special
case of the hypergeometric function `\,_2F_1` given by:
.. math ::
P_n^{(a,b)}(x) = {n+a \choose n}
\,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right).
Note that this definition generalizes to nonintegral values
of `n`. When `n` is an integer, the hypergeometric series
terminates after a finite number of terms, giving
a polynomial in `x`.
**Evaluation of Jacobi polynomials**
A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> jacobi(4, 0.5, 0.25, 1)
2.4609375
>>> binomial(4+0.5, 4)
2.4609375
A Jacobi polynomial of degree `n` is equal to its
Taylor polynomial of degree `n`. The explicit
coefficients of Jacobi polynomials can therefore
be recovered easily using :func:`~mpmath.taylor`::
>>> for n in range(5):
... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
...
[1.0]
[-0.5, 2.5]
[-0.75, -1.5, 5.25]
[0.5, -3.5, -3.5, 10.5]
[0.625, 2.5, -11.25, -7.5, 20.625]
For nonintegral `n`, the Jacobi "polynomial" is no longer
a polynomial::
>>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
[0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
**Orthogonality**
The Jacobi polynomials are orthogonal on the interval
`[-1, 1]` with respect to the weight function
`w(x) = (1-x)^a (1+x)^b`. That is,
`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
zero if `m \ne n` and to a nonzero number if `m = n`.
The orthogonality is easy to verify using numerical
quadrature::
>>> P = jacobi
>>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
>>> a = 2
>>> b = 3
>>> m, n = 3, 4
>>> chop(quad(f, [-1, 1]), 1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.9047619047619
**Differential equation**
The Jacobi polynomials are solutions of the differential
equation
.. math ::
(1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
We can verify that :func:`~mpmath.jacobi` approximately satisfies
this equation::
>>> from mpmath import *
>>> mp.dps = 15
>>> a = 2.5
>>> b = 4
>>> n = 3
>>> y = lambda x: jacobi(n,a,b,x)
>>> x = pi
>>> A0 = n*(n+a+b+1)*y(x)
>>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
>>> A2 = (1-x**2)*diff(y,x,2)
>>> nprint(A2 + A1 + A0, 1)
4.0e-12
The difference of order `10^{-12}` is as close to zero as
it could be at 15-digit working precision, since the terms
are large::
>>> A0, A1, A2
(26560.2328981879, -21503.7641037294, -5056.46879445852)
"""
legendre = r"""
``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
The Legendre polynomials are given by the formula
.. math ::
P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
Alternatively, they can be computed recursively using
.. math ::
P_0(x) = 1
P_1(x) = x
(n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
A third definition is in terms of the hypergeometric function
`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
.. math ::
P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
**Plots**
.. literalinclude :: /plots/legendre.py
.. image :: /plots/legendre.png
**Basic evaluation**
The Legendre polynomials assume fixed values at the points
`x = -1` and `x = 1`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([legendre(n, 1) for n in range(6)])
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> nprint([legendre(n, -1) for n in range(6)])
[1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
The coefficients of Legendre polynomials can be recovered
using degree-`n` Taylor expansion::
>>> for n in range(5):
... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-0.5, 0.0, 1.5]
[0.0, -1.5, 0.0, 2.5]
[0.375, 0.0, -3.75, 0.0, 4.375]
The roots of Legendre polynomials are located symmetrically
on the interval `[-1, 1]`::
>>> for n in range(5):
... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
...
[]
[0.0]
[-0.57735, 0.57735]
[-0.774597, 0.0, 0.774597]
[-0.861136, -0.339981, 0.339981, 0.861136]
An example of an evaluation for arbitrary `n`::
>>> legendre(0.75, 2+4j)
(1.94952805264875 + 2.1071073099422j)
**Orthogonality**
The Legendre polynomials are orthogonal on `[-1, 1]` with respect
to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
>>> m, n = 3, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.222222222222222
**Differential equation**
The Legendre polynomials satisfy the differential equation
.. math ::
((1-x^2) y')' + n(n+1) y' = 0.
We can verify this numerically::
>>> n = 3.6
>>> x = 0.73
>>> P = legendre
>>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
>>> B = n*(n+1)*P(n,x)
>>> nprint(A+B,1)
9.0e-16
"""
legenp = r"""
Calculates the (associated) Legendre function of the first kind of
degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the first kind, `P_n(z)`. The parameters may be
complex numbers.
In terms of the Gauss hypergeometric function, the (associated) Legendre
function is defined as
.. math ::
P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
With *type=3* instead of *type=2*, the alternative
definition
.. math ::
\hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
is used. These functions correspond respectively to ``LegendreP[n,m,2,z]``
and ``LegendreP[n,m,3,z]`` in Mathematica.
The general solution of the (associated) Legendre differential equation
.. math ::
(1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0
is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants
`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the
second kind as implemented by :func:`~mpmath.legenq`.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenp(2, 0, 10); legendre(2, 10)
149.5
149.5
>>> legenp(-2, 0.5, 2.5)
(1.972260393822275434196053 - 1.972260393822275434196053j)
>>> legenp(2+3j, 1-j, -0.5+4j)
(-3.335677248386698208736542 - 5.663270217461022307645625j)
>>> chop(legenp(3, 2, -1.5, type=2))
28.125
>>> chop(legenp(3, 2, -1.5, type=3))
-28.125
Verifying the associated Legendre differential equation::
>>> n, m = 2, -0.5
>>> C1, C2 = 1, -3
>>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z)
>>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \
... (n*(n+1)-m**2/(1-z**2))*f(z)
>>> for z in [0, 2, -1.5, 0.5+2j]:
... chop(deq(mpmathify(z)))
...
0.0
0.0
0.0
0.0
"""
legenq = r"""
Calculates the (associated) Legendre function of the second kind of
degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the second kind, `Q_n(z)`. The parameters may be
complex numbers.
The Legendre functions of the second kind give a second set of
solutions to the (associated) Legendre differential equation.
(See :func:`~mpmath.legenp`.)
Unlike the Legendre functions of the first kind, they are not
polynomials of `z` for integer `n`, `m` but rational or logarithmic
functions with poles at `z = \pm 1`.
There are various ways to define Legendre functions of
the second kind, giving rise to different complex structure.
A version can be selected using the *type* keyword argument.
The *type=2* and *type=3* functions are given respectively by
.. math ::
Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)}
\left( \cos(\pi m) P_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right)
\hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m}
\left( \hat{P}_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right)
where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions
of the first kind. The formulas above should be understood as limits
when `m` is an integer.
These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``)
and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function
is essentially the same as the function defined in
Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead
of `(z^2-1)^{m/2}`, giving slightly different branches.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenq(2, 0, 0.5)
-0.8186632680417568557122028
>>> legenq(-1.5, -2, 2.5)
(0.6655964618250228714288277 + 0.3937692045497259717762649j)
>>> legenq(2-j, 3+4j, -6+5j)
(-10001.95256487468541686564 - 6011.691337610097577791134j)
Different versions of the function::
>>> legenq(2, 1, 0.5)
0.7298060598018049369381857
>>> legenq(2, 1, 1.5)
(-7.902916572420817192300921 + 0.1998650072605976600724502j)
>>> legenq(2, 1, 0.5, type=3)
(2.040524284763495081918338 - 0.7298060598018049369381857j)
>>> chop(legenq(2, 1, 1.5, type=3))
-0.1998650072605976600724502
"""
chebyt = r"""
``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first
kind `T_n(x)`, defined by the identity
.. math ::
T_n(\cos x) = \cos(n x).
The Chebyshev polynomials of the first kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /plots/chebyt.py
.. image :: /plots/chebyt.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-1.0, 0.0, 2.0]
[0.0, -3.0, 0.0, 4.0]
[1.0, 0.0, -8.0, 0.0, 8.0]
**Orthogonality**
The Chebyshev polynomials of the first kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = 1/\sqrt{1-x^2}`::
>>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2)
>>> m, n = 3, 4
>>> nprint(quad(f, [-1, 1]),1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.57079632596448
"""
chebyu = r"""
``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second
kind `U_n(x)`, defined by the identity
.. math ::
U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}.
The Chebyshev polynomials of the second kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /plots/chebyu.py
.. image :: /plots/chebyu.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n)))
...
[1.0]
[0.0, 2.0]
[-1.0, 0.0, 4.0]
[0.0, -4.0, 0.0, 8.0]
[1.0, 0.0, -12.0, 0.0, 16.0]
**Orthogonality**
The Chebyshev polynomials of the second kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = \sqrt{1-x^2}`::
>>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2)
>>> m, n = 3, 4
>>> quad(f, [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.5707963267949
"""
besselj = r"""
``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind
`J_n(x)`. Bessel functions of the first kind are defined as
solutions of the differential equation
.. math ::
x^2 y'' + x y' + (x^2 - n^2) y = 0
which appears, among other things, when solving the radial
part of Laplace's equation in cylindrical coordinates. This
equation has two solutions for given `n`, where the
`J_n`-function is the solution that is nonsingular at `x = 0`.
For positive integer `n`, `J_n(x)` behaves roughly like a sine
(odd `n`) or cosine (even `n`) multiplied by a magnitude factor
that decays slowly as `x \to \pm\infty`.
Generally, `J_n` is a special case of the hypergeometric
function `\,_0F_1`:
.. math ::
J_n(x) = \frac{x^n}{2^n \Gamma(n+1)}
\,_0F_1\left(n+1,-\frac{x^2}{4}\right)
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} J_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/besselj.py
.. image :: /plots/besselj.png
.. literalinclude :: /plots/besselj_c.py
.. image :: /plots/besselj_c.png
**Examples**
Evaluation is supported for arbitrary arguments, and at
arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> besselj(2, 1000)
-0.024777229528606
>>> besselj(4, 0.75)
0.000801070086542314
>>> besselj(2, 1000j)
(-2.48071721019185e+432 + 6.41567059811949e-437j)
>>> mp.dps = 25
>>> besselj(0.75j, 3+4j)
(-2.778118364828153309919653 - 1.5863603889018621585533j)
>>> mp.dps = 50
>>> besselj(1, pi)
0.28461534317975275734531059968613140570981118184947
Arguments may be large::
>>> mp.dps = 25
>>> besselj(0, 10000)
-0.007096160353388801477265164
>>> besselj(0, 10**10)
0.000002175591750246891726859055
>>> besselj(2, 10**100)
7.337048736538615712436929e-51
>>> besselj(2, 10**5*j)
(-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j)
The Bessel functions of the first kind satisfy simple
symmetries around `x = 0`::
>>> mp.dps = 15
>>> nprint([besselj(n,0) for n in range(5)])
[1.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint([besselj(n,pi) for n in range(5)])
[-0.304242, 0.284615, 0.485434, 0.333458, 0.151425]
>>> nprint([besselj(n,-pi) for n in range(5)])
[-0.304242, -0.284615, 0.485434, -0.333458, 0.151425]
Roots of Bessel functions are often used::
>>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]])
[2.40483, 5.52008, 8.65373, 11.7915, 14.9309]
>>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]])
[3.83171, 7.01559, 10.1735, 13.3237, 16.4706]
The roots are not periodic, but the distance between successive
roots asymptotically approaches `2 \pi`. Bessel functions of
the first kind have the following normalization::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a
trigonometric function::
>>> x = 10
>>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x)
(-0.13726373575505, -0.13726373575505)
>>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x)
(-0.211708866331398, -0.211708866331398)
Derivatives of any order can be computed (negative orders
correspond to integration)::
>>> mp.dps = 25
>>> besselj(0, 7.5, 1)
-0.1352484275797055051822405
>>> diff(lambda x: besselj(0,x), 7.5)
-0.1352484275797055051822405
>>> besselj(0, 7.5, 10)
-0.1377811164763244890135677
>>> diff(lambda x: besselj(0,x), 7.5, 10)
-0.1377811164763244890135677
>>> besselj(0,7.5,-1) - besselj(0,3.5,-1)
-0.1241343240399987693521378
>>> quad(j0, [3.5, 7.5])
-0.1241343240399987693521378
Differentiation with a noninteger order gives the fractional derivative
in the sense of the Riemann-Liouville differintegral, as computed by
:func:`~mpmath.differint`::
>>> mp.dps = 15
>>> besselj(1, 3.5, 0.75)
-0.385977722939384
>>> differint(lambda x: besselj(1, x), 3.5, 0.75)
-0.385977722939384
"""
besseli = r"""
``besseli(n, x, derivative=0)`` gives the modified Bessel function of the
first kind,
.. math ::
I_n(x) = i^{-n} J_n(ix).
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} I_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/besseli.py
.. image :: /plots/besseli.png
.. literalinclude :: /plots/besseli_c.py
.. image :: /plots/besseli_c.png
**Examples**
Some values of `I_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besseli(0,0)
1.0
>>> besseli(1,0)
0.0
>>> besseli(0,1)
1.266065877752008335598245
>>> besseli(3.5, 2+3j)
(-0.2904369752642538144289025 - 0.4469098397654815837307006j)
Arguments may be large::
>>> besseli(2, 1000)
2.480717210191852440616782e+432
>>> besseli(2, 10**10)
4.299602851624027900335391e+4342944813
>>> besseli(2, 6000+10000j)
(-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j)
For integers `n`, the following integral representation holds::
>>> mp.dps = 15
>>> n = 3
>>> x = 2.3
>>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi
0.349223221159309
>>> besseli(n,x)
0.349223221159309
Derivatives and antiderivatives of any order can be computed::
>>> mp.dps = 25
>>> besseli(2, 7.5, 1)
195.8229038931399062565883
>>> diff(lambda x: besseli(2,x), 7.5)
195.8229038931399062565883
>>> besseli(2, 7.5, 10)
153.3296508971734525525176
>>> diff(lambda x: besseli(2,x), 7.5, 10)
153.3296508971734525525176
>>> besseli(2,7.5,-1) - besseli(2,3.5,-1)
202.5043900051930141956876
>>> quad(lambda x: besseli(2,x), [3.5, 7.5])
202.5043900051930141956876
"""
bessely = r"""
``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind,
.. math ::
Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}.
For `n` an integer, this formula should be understood as a
limit. With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} Y_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/bessely.py
.. image :: /plots/bessely.png
.. literalinclude :: /plots/bessely_c.py
.. image :: /plots/bessely_c.png
**Examples**
Some values of `Y_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bessely(0,0), bessely(1,0), bessely(2,0)
(-inf, -inf, -inf)
>>> bessely(1, pi)
0.3588729167767189594679827
>>> bessely(0.5, 3+4j)
(9.242861436961450520325216 - 3.085042824915332562522402j)
Arguments may be large::
>>> bessely(0, 10000)
0.00364780555898660588668872
>>> bessely(2.5, 10**50)
-4.8952500412050989295774e-26
>>> bessely(2.5, -10**50)
(0.0 + 4.8952500412050989295774e-26j)
Derivatives and antiderivatives of any order can be computed::
>>> bessely(2, 3.5, 1)
0.3842618820422660066089231
>>> diff(lambda x: bessely(2, x), 3.5)
0.3842618820422660066089231
>>> bessely(0.5, 3.5, 1)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(0.5, x), 3.5)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(2, x), 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 100.5, 100)
0.02668487547301372334849043
>>> quad(lambda x: bessely(2,x), [1,3])
-1.377046859093181969213262
>>> bessely(2,3,-1) - bessely(2,1,-1)
-1.377046859093181969213262
"""
besselk = r"""
``besselk(n, x)`` gives the modified Bessel function of the
second kind,
.. math ::
K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)}
For `n` an integer, this formula should be understood as a
limit.
**Plots**
.. literalinclude :: /plots/besselk.py
.. image :: /plots/besselk.png
.. literalinclude :: /plots/besselk_c.py
.. image :: /plots/besselk_c.png
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besselk(0,1)
0.4210244382407083333356274
>>> besselk(0, -1)
(0.4210244382407083333356274 - 3.97746326050642263725661j)
>>> besselk(3.5, 2+3j)
(-0.02090732889633760668464128 + 0.2464022641351420167819697j)
>>> besselk(2+3j, 0.5)
(0.9615816021726349402626083 + 0.1918250181801757416908224j)
Arguments may be large::
>>> besselk(0, 100)
4.656628229175902018939005e-45
>>> besselk(1, 10**6)
4.131967049321725588398296e-434298
>>> besselk(1, 10**6*j)
(0.001140348428252385844876706 - 0.0005200017201681152909000961j)
>>> besselk(4.5, fmul(10**50, j, exact=True))
(1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j)
The point `x = 0` is a singularity (logarithmic if `n = 0`)::
>>> besselk(0,0)
+inf
>>> besselk(1,0)
+inf
>>> for n in range(-4, 5):
... print(besselk(n, '1e-1000'))
...
4.8e+4001
8.0e+3000
2.0e+2000
1.0e+1000
2302.701024509704096466802
1.0e+1000
2.0e+2000
8.0e+3000
4.8e+4001
"""
hankel1 = r"""
``hankel1(n,x)`` computes the Hankel function of the first kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(1)}(x) = J_n(x) + i Y_n(x).
**Plots**
.. literalinclude :: /plots/hankel1.py
.. image :: /plots/hankel1.png
.. literalinclude :: /plots/hankel1_c.py
.. image :: /plots/hankel1_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel1(2, pi)
(0.4854339326315091097054957 - 0.0999007139290278787734903j)
>>> hankel1(3.5, pi)
(0.2340002029630507922628888 - 0.6419643823412927142424049j)
"""
hankel2 = r"""
``hankel2(n,x)`` computes the Hankel function of the second kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(2)}(x) = J_n(x) - i Y_n(x).
**Plots**
.. literalinclude :: /plots/hankel2.py
.. image :: /plots/hankel2.png
.. literalinclude :: /plots/hankel2_c.py
.. image :: /plots/hankel2_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel2(2, pi)
(0.4854339326315091097054957 + 0.0999007139290278787734903j)
>>> hankel2(3.5, pi)
(0.2340002029630507922628888 + 0.6419643823412927142424049j)
"""
lambertw = r"""
The Lambert W function `W(z)` is defined as the inverse function
of `w \exp(w)`. In other words, the value of `W(z)` is such that
`z = W(z) \exp(W(z))` for any complex number `z`.
The Lambert W function is a multivalued function with infinitely
many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch
gives a different solution `w` of the equation `z = w \exp(w)`.
All branches are supported by :func:`~mpmath.lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
The definition, implementation and choice of branches
is based on [Corless]_.
**Plots**
.. literalinclude :: /plots/lambertw.py
.. image :: /plots/lambertw.png
.. literalinclude :: /plots/lambertw_c.py
.. image :: /plots/lambertw_c.png
**Basic examples**
The Lambert W function is the inverse of `w \exp(w)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> w = lambertw(1)
>>> w
0.5671432904097838729999687
>>> w*exp(w)
1.0
Any branch gives a valid inverse::
>>> w = lambertw(1, k=3)
>>> w
(-2.853581755409037807206819 + 17.11353553941214591260783j)
>>> w = lambertw(1, k=25)
>>> w
(-5.047020464221569709378686 + 155.4763860949415867162066j)
>>> chop(w*exp(w))
1.0
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower `z^{z^{z^{\ldots}}}`::
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(mpf(0.5), 100)
0.6411857445049859844862005
>>> -lambertw(-log(0.5))/log(0.5)
0.6411857445049859844862005
**Properties**
The Lambert W function grows roughly like the natural logarithm
for large arguments::
>>> lambertw(1000); log(1000)
5.249602852401596227126056
6.907755278982137052053974
>>> lambertw(10**100); log(10**100)
224.8431064451185015393731
230.2585092994045684017991
The principal branch of the Lambert W function has a rational
Taylor series expansion around `z = 0`::
>>> nprint(taylor(lambertw, 0, 6), 10)
[0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]
Some special values and limits are::
>>> lambertw(0)
0.0
>>> lambertw(1)
0.5671432904097838729999687
>>> lambertw(e)
1.0
>>> lambertw(inf)
+inf
>>> lambertw(0, k=-1)
-inf
>>> lambertw(0, k=3)
-inf
>>> lambertw(inf, k=2)
(+inf + 12.56637061435917295385057j)
>>> lambertw(inf, k=3)
(+inf + 18.84955592153875943077586j)
>>> lambertw(-inf, k=3)
(+inf + 21.9911485751285526692385j)
The `k = 0` and `k = -1` branches join at `z = -1/e` where
`W(z) = -1` for both branches. Since `-1/e` can only be represented
approximately with binary floating-point numbers, evaluating the
Lambert W function at this point only gives `-1` approximately::
>>> lambertw(-1/e, 0)
-0.9999999999998371330228251
>>> lambertw(-1/e, -1)
-1.000000000000162866977175
If `-1/e` happens to round in the negative direction, there might be
a small imaginary part::
>>> mp.dps = 15
>>> lambertw(-1/e)
(-1.0 + 8.22007971483662e-9j)
>>> lambertw(-1/e+eps)
-0.999999966242188
**References**
1. [Corless]_
"""
barnesg = r"""
Evaluates the Barnes G-function, which generalizes the
superfactorial (:func:`~mpmath.superfac`) and by extension also the
hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers
in an analogous way to how the gamma function generalizes
the ordinary factorial.
The Barnes G-function may be defined in terms of a Weierstrass
product:
.. math ::
G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2}
\prod_{n=1}^\infty
\left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right]
For positive integers `n`, we have have relation to superfactorials
`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`.
**Examples**
Some elementary values and limits of the Barnes G-function::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> barnesg(1), barnesg(2), barnesg(3)
(1.0, 1.0, 1.0)
>>> barnesg(4)
2.0
>>> barnesg(5)
12.0
>>> barnesg(6)
288.0
>>> barnesg(7)
34560.0
>>> barnesg(8)
24883200.0
>>> barnesg(inf)
+inf
>>> barnesg(0), barnesg(-1), barnesg(-2)
(0.0, 0.0, 0.0)
Closed-form values are known for some rational arguments::
>>> barnesg('1/2')
0.603244281209446
>>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3)
0.603244281209446
>>> barnesg('1/4')
0.29375596533861
>>> nthroot(exp('3/8')/exp(catalan/pi)/
... gamma(0.25)**3/sqrt(glaisher)**9, 4)
0.29375596533861
The Barnes G-function satisfies the functional equation
`G(z+1) = \Gamma(z) G(z)`::
>>> z = pi
>>> barnesg(z+1)
2.39292119327948
>>> gamma(z)*barnesg(z)
2.39292119327948
The asymptotic growth rate of the Barnes G-function is related to
the Glaisher-Kinkelin constant::
>>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)*
... (2*pi)**(n/2)*exp(-3*n**2/4)), inf)
0.847536694177301
>>> exp('1/12')/glaisher
0.847536694177301
The Barnes G-function can be differentiated in closed form::
>>> z = 3
>>> diff(barnesg, z)
0.264507203401607
>>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2)
0.264507203401607
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> barnesg(6.5)
2548.7457695685
>>> barnesg(-pi)
0.00535976768353037
>>> barnesg(3+4j)
(-0.000676375932234244 - 4.42236140124728e-5j)
>>> mp.dps = 50
>>> barnesg(1/sqrt(2))
0.81305501090451340843586085064413533788206204124732
>>> q = barnesg(10j)
>>> q.real
0.000000000021852360840356557241543036724799812371995850552234
>>> q.imag
-0.00000000000070035335320062304849020654215545839053210041457588
>>> mp.dps = 15
>>> barnesg(100)
3.10361006263698e+6626
>>> barnesg(-101)
0.0
>>> barnesg(-10.5)
5.94463017605008e+25
>>> barnesg(-10000.5)
-6.14322868174828e+167480422
>>> barnesg(1000j)
(5.21133054865546e-1173597 + 4.27461836811016e-1173597j)
>>> barnesg(-1000+1000j)
(2.43114569750291e+1026623 + 2.24851410674842e+1026623j)
**References**
1. Whittaker & Watson, *A Course of Modern Analysis*,
Cambridge University Press, 4th edition (1927), p.264
2. http://en.wikipedia.org/wiki/Barnes_G-function
3. http://mathworld.wolfram.com/BarnesG-Function.html
"""
superfac = r"""
Computes the superfactorial, defined as the product of
consecutive factorials
.. math ::
\mathrm{sf}(n) = \prod_{k=1}^n k!
For general complex `z`, `\mathrm{sf}(z)` is defined
in terms of the Barnes G-function (see :func:`~mpmath.barnesg`).
**Examples**
The first few superfactorials are (OEIS A000178)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, superfac(n)))
...
0 1.0
1 1.0
2 2.0
3 12.0
4 288.0
5 34560.0
6 24883200.0
7 125411328000.0
8 5.05658474496e+15
9 1.83493347225108e+21
Superfactorials grow very rapidly::
>>> superfac(1000)
3.24570818422368e+1177245
>>> superfac(10**10)
2.61398543581249e+467427913956904067453
Evaluation is supported for arbitrary arguments::
>>> mp.dps = 25
>>> superfac(pi)
17.20051550121297985285333
>>> superfac(2+3j)
(-0.005915485633199789627466468 + 0.008156449464604044948738263j)
>>> diff(superfac, 1)
0.2645072034016070205673056
**References**
1. http://oeis.org/A000178
"""
hyperfac = r"""
Computes the hyperfactorial, defined for integers as the product
.. math ::
H(n) = \prod_{k=1}^n k^k.
The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`.
It can be defined more generally in terms of the Barnes G-function (see
:func:`~mpmath.barnesg`) and the gamma function by the formula
.. math ::
H(z) = \frac{\Gamma(z+1)^z}{G(z)}.
The extension to complex numbers can also be done via
the integral representation
.. math ::
H(z) = (2\pi)^{-z/2} \exp \left[
{z+1 \choose 2} + \int_0^z \log(t!)\,dt
\right].
**Examples**
The rapidly-growing sequence of hyperfactorials begins
(OEIS A002109)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, hyperfac(n)))
...
0 1.0
1 1.0
2 4.0
3 108.0
4 27648.0
5 86400000.0
6 4031078400000.0
7 3.3197663987712e+18
8 5.56964379417266e+25
9 2.15779412229419e+34
Some even larger hyperfactorials are::
>>> hyperfac(1000)
5.46458120882585e+1392926
>>> hyperfac(10**10)
4.60408207642219e+489142638002418704309
The hyperfactorial can be evaluated for arbitrary arguments::
>>> hyperfac(0.5)
0.880449235173423
>>> diff(hyperfac, 1)
0.581061466795327
>>> hyperfac(pi)
205.211134637462
>>> hyperfac(-10+1j)
(3.01144471378225e+46 - 2.45285242480185e+46j)
The recurrence property of the hyperfactorial holds
generally::
>>> z = 3-4*j
>>> hyperfac(z)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z**z * hyperfac(z-1)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z = mpf(-0.6)
>>> chop(z**z * hyperfac(z-1))
1.28170142849352
>>> hyperfac(z)
1.28170142849352
The hyperfactorial may also be computed using the integral
definition::
>>> z = 2.5
>>> hyperfac(z)
15.9842119922237
>>> (2*pi)**(-z/2)*exp(binomial(z+1,2) +
... quad(lambda t: loggamma(t+1), [0, z]))
15.9842119922237
:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> hyperfac(10)
215779412229418562091680268288000000000000000.0
>>> hyperfac(1/sqrt(2))
0.89404818005227001975423476035729076375705084390942
**References**
1. http://oeis.org/A002109
2. http://mathworld.wolfram.com/Hyperfactorial.html
"""
rgamma = r"""
Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This
function evaluates to zero at the poles
of the gamma function, `z = 0, -1, -2, \ldots`.
**Examples**
Basic examples::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> rgamma(1)
1.0
>>> rgamma(4)
0.1666666666666666666666667
>>> rgamma(0); rgamma(-1)
0.0
0.0
>>> rgamma(1000)
2.485168143266784862783596e-2565
>>> rgamma(inf)
0.0
A definite integral that can be evaluated in terms of elementary
integrals::
>>> quad(rgamma, [0,inf])
2.807770242028519365221501
>>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf])
2.807770242028519365221501
"""
loggamma = r"""
Computes the principal branch of the log-gamma function,
`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many
complex branch cuts, the principal log-gamma function only has a single
branch cut along the negative half-axis. The principal branch
continuously matches the asymptotic Stirling expansion
.. math ::
\ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} +
\left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}).
The real parts of both functions agree, but their imaginary
parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`.
They coincide for `z \in \mathbb{R}, z > 0`.
Computationally, it is advantageous to use :func:`~mpmath.loggamma`
instead of :func:`~mpmath.gamma` for extremely large arguments.
**Examples**
Comparing with `\ln(\Gamma(z))`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> loggamma('13.2'); log(gamma('13.2'))
20.49400419456603678498394
20.49400419456603678498394
>>> loggamma(3+4j)
(-1.756626784603784110530604 + 4.742664438034657928194889j)
>>> log(gamma(3+4j))
(-1.756626784603784110530604 - 1.540520869144928548730397j)
>>> log(gamma(3+4j)) + 2*pi*j
(-1.756626784603784110530604 + 4.742664438034657928194889j)
Note the imaginary parts for negative arguments::
>>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5)
(1.265512123484645396488946 - 3.141592653589793238462643j)
(0.8600470153764810145109327 - 6.283185307179586476925287j)
(-0.05624371649767405067259453 - 9.42477796076937971538793j)
Some special values::
>>> loggamma(1); loggamma(2)
0.0
0.0
>>> loggamma(3); +ln2
0.6931471805599453094172321
0.6931471805599453094172321
>>> loggamma(3.5); log(15*sqrt(pi)/8)
1.200973602347074224816022
1.200973602347074224816022
>>> loggamma(inf)
+inf
Huge arguments are permitted::
>>> loggamma('1e30')
6.807755278982137052053974e+31
>>> loggamma('1e300')
6.897755278982137052053974e+302
>>> loggamma('1e3000')
6.906755278982137052053974e+3003
>>> loggamma('1e100000000000000000000')
2.302585092994045684007991e+100000000000000000020
>>> loggamma('1e30j')
(-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j)
>>> loggamma('1e300j')
(-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j)
>>> loggamma('1e3000j')
(-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j)
The log-gamma function can be integrated analytically
on any interval of unit length::
>>> z = 0
>>> quad(loggamma, [z,z+1]); log(2*pi)/2
0.9189385332046727417803297
0.9189385332046727417803297
>>> z = 3+4j
>>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
The derivatives of the log-gamma function are given by the
polygamma function (:func:`~mpmath.psi`)::
>>> diff(loggamma, -4+3j); psi(0, -4+3j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
>>> diff(loggamma, -4+3j, 2); psi(1, -4+3j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
The log-gamma function satisfies an additive form of the
recurrence relation for the ordinary gamma function::
>>> z = 2+3j
>>> loggamma(z); loggamma(z+1) - log(z)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
"""
siegeltheta = r"""
Computes the Riemann-Siegel theta function,
.. math ::
\theta(t) = \frac{
\log\Gamma\left(\frac{1+2it}{4}\right) -
\log\Gamma\left(\frac{1-2it}{4}\right)
}{2i} - \frac{\log \pi}{2} t.
The Riemann-Siegel theta function is important in
providing the phase factor for the Z-function
(see :func:`~mpmath.siegelz`). Evaluation is supported for real and
complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegeltheta(0)
0.0
>>> siegeltheta(inf)
+inf
>>> siegeltheta(-inf)
-inf
>>> siegeltheta(1)
-1.767547952812290388302216
>>> siegeltheta(10+0.25j)
(-3.068638039426838572528867 + 0.05804937947429712998395177j)
Arbitrary derivatives may be computed with derivative = k
>>> siegeltheta(1234, derivative=2)
0.0004051864079114053109473741
>>> diff(siegeltheta, 1234, n=2)
0.0004051864079114053109473741
The Riemann-Siegel theta function has odd symmetry around `t = 0`,
two local extreme points and three real roots including 0 (located
symmetrically)::
>>> nprint(chop(taylor(siegeltheta, 0, 5)))
[0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218]
>>> findroot(diffun(siegeltheta), 7)
6.28983598883690277966509
>>> findroot(siegeltheta, 20)
17.84559954041086081682634
For large `t`, there is a famous asymptotic formula
for `\theta(t)`, to first order given by::
>>> t = mpf(10**6)
>>> siegeltheta(t)
5488816.353078403444882823
>>> -t*log(2*pi/t)/2-t/2
5488816.745777464310273645
"""
grampoint = r"""
Gives the `n`-th Gram point `g_n`, defined as the solution
to the equation `\theta(g_n) = \pi n` where `\theta(t)`
is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`).
The first few Gram points are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> grampoint(0)
17.84559954041086081682634
>>> grampoint(1)
23.17028270124630927899664
>>> grampoint(2)
27.67018221781633796093849
>>> grampoint(3)
31.71797995476405317955149
Checking the definition::
>>> siegeltheta(grampoint(3))
9.42477796076937971538793
>>> 3*pi
9.42477796076937971538793
A large Gram point::
>>> grampoint(10**10)
3293531632.728335454561153
Gram points are useful when studying the Z-function
(:func:`~mpmath.siegelz`). See the documentation of that function
for additional examples.
:func:`~mpmath.grampoint` can solve the defining equation for
nonintegral `n`. There is a fixed point where `g(x) = x`::
>>> findroot(lambda x: grampoint(x) - x, 10000)
9146.698193171459265866198
**References**
1. http://mathworld.wolfram.com/GramPoint.html
"""
siegelz = r"""
Computes the Z-function, also known as the Riemann-Siegel Z function,
.. math ::
Z(t) = e^{i \theta(t)} \zeta(1/2+it)
where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`)
and where `\theta(t)` denotes the Riemann-Siegel theta function
(see :func:`~mpmath.siegeltheta`).
Evaluation is supported for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegelz(1)
-0.7363054628673177346778998
>>> siegelz(3+4j)
(-0.1852895764366314976003936 - 0.2773099198055652246992479j)
The first four derivatives are supported, using the
optional *derivative* keyword argument::
>>> siegelz(1234567, derivative=3)
56.89689348495089294249178
>>> diff(siegelz, 1234567, n=3)
56.89689348495089294249178
The Z-function has a Maclaurin expansion::
>>> nprint(chop(taylor(siegelz, 0, 4)))
[-1.46035, 0.0, 2.73588, 0.0, -8.39357]
The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the
critical line `s = 1/2+it` (i.e. for real arguments `t`
to `Z`). Its zeros coincide with those of the Riemann zeta
function::
>>> findroot(siegelz, 14)
14.13472514173469379045725
>>> findroot(siegelz, 20)
21.02203963877155499262848
>>> findroot(zeta, 0.5+14j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+20j)
(0.5 + 21.02203963877155499262848j)
Since the Z-function is real-valued on the critical line
(and unlike `|\zeta(s)|` analytic), it is useful for
investigating the zeros of the Riemann zeta function.
For example, one can use a root-finding algorithm based
on sign changes::
>>> findroot(siegelz, [100, 200], solver='bisect')
176.4414342977104188888926
To locate roots, Gram points `g_n` which can be computed
by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is
positive for two consecutive `n`, then `Z(t)` must have
a zero between those points::
>>> g10 = grampoint(10)
>>> g11 = grampoint(11)
>>> (-1)**10 * siegelz(g10) > 0
True
>>> (-1)**11 * siegelz(g11) > 0
True
>>> findroot(siegelz, [g10, g11], solver='bisect')
56.44624769706339480436776
>>> g10, g11
(54.67523744685325626632663, 57.54516517954725443703014)
"""
riemannr = r"""
Evaluates the Riemann R function, a smooth approximation of the
prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann
R function gives a fast numerical approximation useful e.g. to
roughly estimate the number of primes in a given interval.
The Riemann R function is computed using the rapidly convergent Gram
series,
.. math ::
R(x) = 1 + \sum_{k=1}^{\infty}
\frac{\log^k x}{k k! \zeta(k+1)}.
From the Gram series, one sees that the Riemann R function is a
well-defined analytic function (except for a branch cut along
the negative real half-axis); it can be evaluated for arbitrary
real or complex arguments.
The Riemann R function gives a very accurate approximation
of the prime counting function. For example, it is wrong by at
most 2 for `x < 1000`, and for `x = 10^9` differs from the exact
value of `\pi(x)` by 79, or less than two parts in a million.
It is about 10 times more accurate than the logarithmic integral
estimate (see :func:`~mpmath.li`), which however is even faster to evaluate.
It is orders of magnitude more accurate than the extremely
fast `x/\log x` estimate.
**Examples**
For small arguments, the Riemann R function almost exactly
gives the prime counting function if rounded to the nearest
integer::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> primepi(50), riemannr(50)
(15, 14.9757023241462)
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100))
1
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300))
2
The Riemann R function can be evaluated for arguments far too large
for exact determination of `\pi(x)` to be computationally
feasible with any presently known algorithm::
>>> riemannr(10**30)
1.46923988977204e+28
>>> riemannr(10**100)
4.3619719871407e+97
>>> riemannr(10**1000)
4.3448325764012e+996
A comparison of the Riemann R function and logarithmic integral estimates
for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`.
The fractional error is shown in parentheses::
>>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534]
>>> for n, p in enumerate(exact):
... n += 1
... r, l = riemannr(10**n), li(10**n)
... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3)
... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr))
...
1 4 4.56458314100509(0.141) 6.1655995047873(0.541)
2 25 25.6616332669242(0.0265) 30.1261415840796(0.205)
3 168 168.359446281167(0.00214) 177.609657990152(0.0572)
4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139)
5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394)
6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165)
7 664579 664667.447564748(0.000133) 664918.405048569(0.000511)
8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131)
9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5)
The derivative of the Riemann R function gives the approximate
probability for a number of magnitude `x` to be prime::
>>> diff(riemannr, 1000)
0.141903028110784
>>> mpf(primepi(1050) - primepi(950)) / 100
0.15
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> mp.dps = 30
>>> riemannr(7.5)
3.72934743264966261918857135136
>>> riemannr(-4+2j)
(-0.551002208155486427591793957644 + 2.16966398138119450043195899746j)
"""
primepi = r"""
Evaluates the prime counting function, `\pi(x)`, which gives
the number of primes less than or equal to `x`. The argument
`x` may be fractional.
The prime counting function is very expensive to evaluate
precisely for large `x`, and the present implementation is
not optimized in any way. For numerical approximation of the
prime counting function, it is better to use :func:`~mpmath.primepi2`
or :func:`~mpmath.riemannr`.
Some values of the prime counting function::
>>> from mpmath import *
>>> [primepi(k) for k in range(20)]
[0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8]
>>> primepi(3.5)
2
>>> primepi(100000)
9592
"""
primepi2 = r"""
Returns an interval (as an ``mpi`` instance) providing bounds
for the value of the prime counting function `\pi(x)`. For small
`x`, :func:`~mpmath.primepi2` returns an exact interval based on
the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval
based on Schoenfeld's inequality
.. math ::
|\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi}
is returned. This estimate is rigorous assuming the truth of
the Riemann hypothesis, and can be computed very quickly.
**Examples**
Exact values of the prime counting function for small `x`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> iv.dps = 15; iv.pretty = True
>>> primepi2(10)
[4.0, 4.0]
>>> primepi2(100)
[25.0, 25.0]
>>> primepi2(1000)
[168.0, 168.0]
Loose intervals are generated for moderately large `x`:
>>> primepi2(10000), primepi(10000)
([1209.0, 1283.0], 1229)
>>> primepi2(50000), primepi(50000)
([5070.0, 5263.0], 5133)
As `x` increases, the absolute error gets worse while the relative
error improves. The exact value of `\pi(10^{23})` is
1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant
digits::
>>> p = primepi2(10**23)
>>> p
[1.9253203909477020467e+21, 1.925320392280406229e+21]
>>> mpf(p.delta) / mpf(p.a)
6.9219865355293e-10
A more precise, nonrigorous estimate for `\pi(x)` can be
obtained using the Riemann R function (:func:`~mpmath.riemannr`).
For large enough `x`, the value returned by :func:`~mpmath.primepi2`
essentially amounts to a small perturbation of the value returned by
:func:`~mpmath.riemannr`::
>>> primepi2(10**100)
[4.3619719871407024816e+97, 4.3619719871407032404e+97]
>>> riemannr(10**100)
4.3619719871407e+97
"""
primezeta = r"""
Computes the prime zeta function, which is defined
in analogy with the Riemann zeta function (:func:`~mpmath.zeta`)
as
.. math ::
P(s) = \sum_p \frac{1}{p^s}
where the sum is taken over all prime numbers `p`. Although
this sum only converges for `\mathrm{Re}(s) > 1`, the
function is defined by analytic continuation in the
half-plane `\mathrm{Re}(s) > 0`.
**Examples**
Arbitrary-precision evaluation for real and complex arguments is
supported::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> primezeta(2)
0.452247420041065498506543364832
>>> primezeta(pi)
0.15483752698840284272036497397
>>> mp.dps = 50
>>> primezeta(3)
0.17476263929944353642311331466570670097541212192615
>>> mp.dps = 20
>>> primezeta(3+4j)
(-0.12085382601645763295 - 0.013370403397787023602j)
The prime zeta function has a logarithmic pole at `s = 1`,
with residue equal to the difference of the Mertens and
Euler constants::
>>> primezeta(1)
+inf
>>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps)
-0.31571845205389007685
>>> mertens-euler
-0.31571845205389007685
The analytic continuation to `0 < \mathrm{Re}(s) \le 1`
is implemented. In this strip the function exhibits
very complex behavior; on the unit interval, it has poles at
`1/n` for every squarefree integer `n`::
>>> primezeta(0.5) # Pole at s = 1/2
(-inf + 3.1415926535897932385j)
>>> primezeta(0.25)
(-1.0416106801757269036 + 0.52359877559829887308j)
>>> primezeta(0.5+10j)
(0.54892423556409790529 + 0.45626803423487934264j)
Although evaluation works in principle for any `\mathrm{Re}(s) > 0`,
it should be noted that the evaluation time increases exponentially
as `s` approaches the imaginary axis.
For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`::
>>> primezeta(inf)
0.0
>>> primezeta(10), mpf(2)**-10
(0.00099360357443698021786, 0.0009765625)
>>> primezeta(1000)
9.3326361850321887899e-302
>>> primezeta(1000+1000j)
(-3.8565440833654995949e-302 - 8.4985390447553234305e-302j)
**References**
Carl-Erik Froberg, "On the prime zeta function",
BIT 8 (1968), pp. 187-202.
"""
bernpoly = r"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-0.0333333, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`~mpmath.bernoulli`)::
>>> bernpoly(12, 0), bernoulli(12)
(-0.253113553113553, -0.253113553113553)
>>> bernpoly(13, 0), bernoulli(13)
(0.0, 0.0)
Evaluation is accurate for large `n` and small `z`::
>>> mp.dps = 25
>>> bernpoly(100, 0.5)
2.838224957069370695926416e+78
>>> bernpoly(1000, 10.5)
5.318704469415522036482914e+1769
"""
polylog = r"""
Computes the polylogarithm, defined by the sum
.. math ::
\mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}.
This series is convergent only for `|z| < 1`, so elsewhere
the analytic continuation is implied.
The polylogarithm should not be confused with the logarithmic
integral (also denoted by Li or li), which is implemented
as :func:`~mpmath.li`.
**Examples**
The polylogarithm satisfies a huge number of functional identities.
A sample of polylogarithm evaluations is shown below::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polylog(1,0.5), log(2)
(0.693147180559945, 0.693147180559945)
>>> polylog(2,0.5), (pi**2-6*log(2)**2)/12
(0.582240526465012, 0.582240526465012)
>>> polylog(2,-phi), -log(phi)**2-pi**2/10
(-1.21852526068613, -1.21852526068613)
>>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6
(0.53721319360804, 0.53721319360804)
:func:`~mpmath.polylog` can evaluate the analytic continuation of the
polylogarithm when `s` is an integer::
>>> polylog(2, 10)
(0.536301287357863 - 7.23378441241546j)
>>> polylog(2, -10)
-4.1982778868581
>>> polylog(2, 10j)
(-3.05968879432873 + 3.71678149306807j)
>>> polylog(-2, 10)
-0.150891632373114
>>> polylog(-2, -10)
0.067618332081142
>>> polylog(-2, 10j)
(0.0384353698579347 + 0.0912451798066779j)
Some more examples, with arguments on the unit circle (note that
the series definition cannot be used for computation here)::
>>> polylog(2,j)
(-0.205616758356028 + 0.915965594177219j)
>>> j*catalan-pi**2/48
(-0.205616758356028 + 0.915965594177219j)
>>> polylog(3,exp(2*pi*j/3))
(-0.534247512515375 + 0.765587078525922j)
>>> -4*zeta(3)/9 + 2*j*pi**3/81
(-0.534247512515375 + 0.765587078525921j)
Polylogarithms of different order are related by integration
and differentiation::
>>> s, z = 3, 0.5
>>> polylog(s+1, z)
0.517479061673899
>>> quad(lambda t: polylog(s,t)/t, [0, z])
0.517479061673899
>>> z*diff(lambda t: polylog(s+2,t), z)
0.517479061673899
Taylor series expansions around `z = 0` are::
>>> for n in range(-3, 4):
... nprint(taylor(lambda x: polylog(n,x), 0, 5))
...
[0.0, 1.0, 8.0, 27.0, 64.0, 125.0]
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0]
[0.0, 1.0, 0.5, 0.333333, 0.25, 0.2]
[0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04]
[0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008]
The series defining the polylogarithm is simultaneously
a Taylor series and an L-series. For certain values of `z`, the
polylogarithm reduces to a pure zeta function::
>>> polylog(pi, 1), zeta(pi)
(1.17624173838258, 1.17624173838258)
>>> polylog(pi, -1), -altzeta(pi)
(-0.909670702980385, -0.909670702980385)
Evaluation for arbitrary, nonintegral `s` is supported
for `z` within the unit circle:
>>> polylog(3+4j, 0.25)
(0.24258605789446 - 0.00222938275488344j)
>>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf])
(0.24258605789446 - 0.00222938275488344j)
It is also supported outside of the unit circle::
>>> polylog(1+j, 20+40j)
(-7.1421172179728 - 3.92726697721369j)
>>> polylog(1+j, 200+400j)
(-5.41934747194626 - 9.94037752563927j)
**References**
1. Richard Crandall, "Note on fast polylogarithm computation"
http://www.reed.edu/physics/faculty/crandall/papers/Polylog.pdf
2. http://en.wikipedia.org/wiki/Polylogarithm
3. http://mathworld.wolfram.com/Polylogarithm.html
"""
bell = r"""
For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell
polynomial `B_n(x)`, the first few of which are
.. math ::
B_0(x) = 1
B_1(x) = x
B_2(x) = x^2+x
B_3(x) = x^3+3x^2+x
If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it
gives the `n`-th Bell number `B_n`, which is the number of
partitions of a set with `n` elements. By setting the precision to
at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast
calculation of exact Bell numbers.
In general, :func:`~mpmath.bell` computes
.. math ::
B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right)
where `E_n(x)` is the generalized exponential function implemented
by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1],
where the modification is the sinc term ensuring that `B_n(x)` is
continuous in `n`; :func:`~mpmath.bell` can thus be evaluated,
differentiated, etc for arbitrary complex arguments.
**Examples**
Simple evaluations::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bell(0, 2.5)
1.0
>>> bell(1, 2.5)
2.5
>>> bell(2, 2.5)
8.75
Evaluation for arbitrary complex arguments::
>>> bell(5.75+1j, 2-3j)
(-10767.71345136587098445143 - 15449.55065599872579097221j)
The first few Bell polynomials::
>>> for k in range(7):
... nprint(taylor(lambda x: bell(k,x), 0, k))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 1.0, 3.0, 1.0]
[0.0, 1.0, 7.0, 6.0, 1.0]
[0.0, 1.0, 15.0, 25.0, 10.0, 1.0]
[0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0]
The first few Bell numbers and complementary Bell numbers::
>>> [int(bell(k)) for k in range(10)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147]
>>> [int(bell(k,-1)) for k in range(10)]
[1, -1, 0, 1, 1, -2, -9, -9, 50, 267]
Large Bell numbers::
>>> mp.dps = 50
>>> bell(50)
185724268771078270438257767181908917499221852770.0
>>> bell(50,-1)
-29113173035759403920216141265491160286912.0
Some even larger values::
>>> mp.dps = 25
>>> bell(1000,-1)
-1.237132026969293954162816e+1869
>>> bell(1000)
2.989901335682408421480422e+1927
>>> bell(1000,2)
6.591553486811969380442171e+1987
>>> bell(1000,100.5)
9.101014101401543575679639e+2529
A determinant identity satisfied by Bell numbers::
>>> mp.dps = 15
>>> N = 8
>>> det([[bell(k+j) for j in range(N)] for k in range(N)])
125411328000.0
>>> superfac(N-1)
125411328000.0
**References**
1. http://mathworld.wolfram.com/DobinskisFormula.html
"""
polyexp = r"""
Evaluates the polyexponential function, defined for arbitrary
complex `s`, `z` by the series
.. math ::
E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k.
`E_s(z)` is constructed from the exponential function analogously
to how the polylogarithm is constructed from the ordinary
logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series
It is an entire function of both `s` and `z`.
The polyexponential function provides a generalization of the
Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`.
In terms of the Bell polynomials,
.. math ::
E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s).
Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n`
is a nonzero integer, but not otherwise. In particular, they differ
at `n = 0`.
**Examples**
Evaluating a series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> nsum(lambda k: sqrt(k)/fac(k), [1,inf])
2.101755547733791780315904
>>> polyexp(0.5,1)
2.101755547733791780315904
Evaluation for arbitrary arguments::
>>> polyexp(-3-4j, 2.5+2j)
(2.351660261190434618268706 + 1.202966666673054671364215j)
Evaluation is accurate for tiny function values::
>>> polyexp(4, -100)
3.499471750566824369520223e-36
If `n` is a nonpositive integer, `E_n` reduces to a special
instance of the hypergeometric function `\,_pF_q`::
>>> n = 3
>>> x = pi
>>> polyexp(-n,x)
4.042192318847986561771779
>>> x*hyper([1]*(n+1), [2]*(n+1), x)
4.042192318847986561771779
"""
cyclotomic = r"""
Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by
.. math ::
\Phi_n(x) = \prod_{\zeta} (x - \zeta)
where `\zeta` ranges over all primitive `n`-th roots of unity
(see :func:`~mpmath.unitroots`). An equivalent representation, used
for computation, is
.. math ::
\Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x)
where `\mu(m)` denotes the Moebius function. The cyclotomic
polynomials are integer polynomials, the first of which can be
written explicitly as
.. math ::
\Phi_0(x) = 1
\Phi_1(x) = x - 1
\Phi_2(x) = x + 1
\Phi_3(x) = x^3 + x^2 + 1
\Phi_4(x) = x^2 + 1
\Phi_5(x) = x^4 + x^3 + x^2 + x + 1
\Phi_6(x) = x^2 - x + 1
**Examples**
The coefficients of low-order cyclotomic polynomials can be recovered
using Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(9):
... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10))
... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)])))
...
0 [1.0]
1 [-1.0, 1.0]
2 [1.0, 1.0]
3 [1.0, 1.0, 1.0]
4 [1.0, 0.0, 1.0]
5 [1.0, 1.0, 1.0, 1.0, 1.0]
6 [1.0, -1.0, 1.0]
7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
8 [1.0, 0.0, 0.0, 0.0, 1.0]
The definition as a product over primitive roots may be checked
by computing the product explicitly (for a real argument, this
method will generally introduce numerical noise in the imaginary
part)::
>>> mp.dps = 25
>>> z = 3+4j
>>> cyclotomic(10, z)
(-419.0 - 360.0j)
>>> fprod(z-r for r in unitroots(10, primitive=True))
(-419.0 - 360.0j)
>>> z = 3
>>> cyclotomic(10, z)
61.0
>>> fprod(z-r for r in unitroots(10, primitive=True))
(61.0 - 3.146045605088568607055454e-25j)
Up to permutation, the roots of a given cyclotomic polynomial
can be checked to agree with the list of primitive roots::
>>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3]
>>> for r in polyroots(p[::-1]):
... print(r)
...
(0.5 - 0.8660254037844386467637232j)
(0.5 + 0.8660254037844386467637232j)
>>>
>>> for r in unitroots(6, primitive=True):
... print(r)
...
(0.5 + 0.8660254037844386467637232j)
(0.5 - 0.8660254037844386467637232j)
"""
meijerg = r"""
Evaluates the Meijer G-function, defined as
.. math ::
G^{m,n}_{p,q} \left( \left. \begin{matrix}
a_1, \dots, a_n ; a_{n+1} \dots a_p \\
b_1, \dots, b_m ; b_{m+1} \dots b_q
\end{matrix}\; \right| \; z ; r \right) =
\frac{1}{2 \pi i} \int_L
\frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)}
{\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)}
z^{-s/r} ds
for an appropriate choice of the contour `L` (see references).
There are `p` elements `a_j`.
The argument *a_s* should be a pair of lists, the first containing the
`n` elements `a_1, \ldots, a_n` and the second containing
the `p-n` elements `a_{n+1}, \ldots a_p`.
There are `q` elements `b_j`.
The argument *b_s* should be a pair of lists, the first containing the
`m` elements `b_1, \ldots, b_m` and the second containing
the `q-m` elements `b_{m+1}, \ldots b_q`.
The implicit tuple `(m, n, p, q)` constitutes the order or degree of the
Meijer G-function, and is determined by the lengths of the coefficient
vectors. Confusingly, the indices in this tuple appear in a different order
from the coefficients, but this notation is standard. The many examples
given below should hopefully clear up any potential confusion.
**Algorithm**
The Meijer G-function is evaluated as a combination of hypergeometric series.
There are two versions of the function, which can be selected with
the optional *series* argument.
*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z`
*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z`
The default series is chosen based on the degree and `|z|` in order
to be consistent with Mathematica's. This definition of the Meijer G-function
has a discontinuity at `|z| = 1` for some orders, which can
be avoided by explicitly specifying a series.
Keyword arguments are forwarded to :func:`~mpmath.hypercomb`.
**Examples**
Many standard functions are special cases of the Meijer G-function
(possibly rescaled and/or with branch cut corrections). We define
some test parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a = mpf(0.75)
>>> b = mpf(1.5)
>>> z = mpf(2.25)
The exponential function:
`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \;
\right| \; -z \right)`
>>> meijerg([[],[]], [[0],[]], -z)
9.487735836358525720550369
>>> exp(z)
9.487735836358525720550369
The natural logarithm:
`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0
\end{matrix} \; \right| \; -z \right)`
>>> meijerg([[1,1],[]], [[1],[0]], z)
1.178654996341646117219023
>>> log(1+z)
1.178654996341646117219023
A rational function:
`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1
\end{matrix} \; \right| \; z \right)`
>>> meijerg([[1,1],[]], [[1],[1]], z)
0.6923076923076923076923077
>>> z/(z+1)
0.6923076923076923076923077
The sine and cosine functions:
`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)`
`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)`
>>> meijerg([[],[]], [[0.5],[0]], (z/2)**2)
0.4389807929218676682296453
>>> sin(z)/sqrt(pi)
0.4389807929218676682296453
>>> meijerg([[],[]], [[0],[0.5]], (z/2)**2)
-0.3544090145996275423331762
>>> cos(z)/sqrt(pi)
-0.3544090145996275423331762
Bessel functions:
`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left.
\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2}
\end{matrix} \; \right| \; z \right)`
`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; -z \right)`
`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
As the example with the Bessel *I* function shows, a branch
factor is required for some arguments when inverting the square root.
>>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2)
0.5059425789597154858527264
>>> besselj(a,z)
0.5059425789597154858527264
>>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2)
0.1853868950066556941442559
>>> bessely(a, z)
0.1853868950066556941442559
>>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> (-z)**(a/2) / z**(a/2) * besseli(a, z)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2)
0.09334163695597828403796071
>>> besselk(a,z)
0.09334163695597828403796071
Error functions:
`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left.
\begin{matrix} a \\ a-1, a-\frac{1}{2}
\end{matrix} \; \right| \; z, \frac{1}{2} \right)`
>>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5)
0.00172839843123091957468712
>>> sqrt(pi) * z**(2*a-2) * erfc(z)
0.00172839843123091957468712
A Meijer G-function of higher degree, (1,1,2,3):
>>> meijerg([[a],[b]], [[a],[b,a-1]], z)
1.55984467443050210115617
>>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1)
1.55984467443050210115617
A Meijer G-function of still higher degree, (4,1,2,4), that can
be expanded as a messy combination of exponential integrals:
>>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z)
0.3323667133658557271898061
>>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\
... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z)))
0.3323667133658557271898061
In the following case, different series give different values::
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2))
-0.06417628097442437076207337
>>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1)
0.1428699426155117511873047
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2))
-0.06417628097442437076207337
**References**
1. http://en.wikipedia.org/wiki/Meijer_G-function
2. http://mathworld.wolfram.com/MeijerG-Function.html
3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/
4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/
"""
clsin = r"""
Computes the Clausen sine function, defined formally by the series
.. math ::
\mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}.
The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical
"Clausen function". More generally, the Clausen function is defined for
complex `s` and `z`, even when the series does not converge. The
Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as
.. math ::
\mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) -
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}),
and this representation can be taken to provide the analytic continuation of the
series. The complementary function :func:`~mpmath.clcos` gives the corresponding
cosine sum.
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf])
-0.6533010136329338746275795
-0.6533010136329338746275795
Using `z + \pi` instead of `z` gives an alternating series::
>>> clsin(s, z+pi)
0.8860032351260589402871624
>>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf])
0.8860032351260589402871624
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clsin(1, z)
0.2047709230104579724675985
>>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j))
0.2047709230104579724675985
>>> nsum(lambda k: sin(k*z)/k, [1,inf])
0.2047709230104579724675985
The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the
value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for
`0 < \theta < 2 \pi`::
>>> cl2 = lambda t: clsin(2, t)
>>> cl2(3.5)
-0.2465045302347694216534255
>>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5])
-0.2465045302347694216534255
This function is symmetric about `\theta = \pi` with zeros and extreme
points::
>>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi))
0.0
1.014941606409653625021203
0.0
-1.014941606409653625021203
0.0
Catalan's constant is a special value::
>>> cl2(pi/2)
0.9159655941772190150546035
>>> +catalan
0.9159655941772190150546035
The Clausen sine function can be expressed in closed form when
`s` is an odd integer (becoming zero when `s` < 0)::
>>> z = 1 + sqrt(2)
>>> clsin(1, z); (pi-z)/2
0.3636895456083490948304773
0.3636895456083490948304773
>>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12
0.5661751584451144991707161
0.5661751584451144991707161
>>> clsin(-1, z)
0.0
>>> clsin(-3, z)
0.0
It can also be expressed in closed form for even integer `s \le 0`,
providing a finite sum for series such as
`\sin(z) + \sin(2z) + \sin(3z) + \ldots`::
>>> z = 1 + sqrt(2)
>>> clsin(0, z)
0.1903105029507513881275865
>>> cot(z/2)/2
0.1903105029507513881275865
>>> clsin(-2, z)
-0.1089406163841548817581392
>>> -cot(z/2)*csc(z/2)**2/4
-0.1089406163841548817581392
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clsin(3, 3*pi)
-8.892316224968072424732898e-26
>>> clsin(3, 3, pi=True)
0.0
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clsin(s, z)
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
>>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf])
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
"""
clcos = r"""
Computes the Clausen cosine function, defined formally by the series
.. math ::
\mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}.
This function is complementary to the Clausen sine function
:func:`~mpmath.clsin`. In terms of the polylogarithm,
.. math ::
\mathrm{\widetilde{Cl}}_s(z) =
\frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) +
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}).
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf])
-0.6518926267198991308332759
-0.6518926267198991308332759
Using `z + \pi` instead of `z` gives an alternating series::
>>> s, z = 3, 0.5
>>> clcos(s, z+pi)
-0.8155530586502260817855618
>>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf])
-0.8155530586502260817855618
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clcos(1, z)
-0.6720334373369714849797918
>>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z))))
-0.6720334373369714849797918
>>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real
-0.6720334373369714849797918
>>> nsum(lambda k: cos(k*z)/k, [1,inf])
-0.6720334373369714849797918
It can also be expressed in closed form when `s` is an even integer.
For example,
>>> clcos(2,z)
-0.7805359025135583118863007
>>> pi**2/6 - pi*z/2 + z**2/4
-0.7805359025135583118863007
The case `s = 0` gives the renormalized sum of
`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for
any value of `z`)::
>>> clcos(0, z)
-0.5
>>> nsum(lambda k: cos(k*z), [1,inf])
-0.5
Also the sums
.. math ::
\cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots
and
.. math ::
\cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots
for higher integer powers `n = -s` can be done in closed form. They are zero
when `n` is positive and even (`s` negative and even)::
>>> clcos(-1, z); 1/(2*cos(z)-2)
-0.2607829375240542480694126
-0.2607829375240542480694126
>>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8
0.1472635054979944390848006
0.1472635054979944390848006
>>> clcos(-2, z); clcos(-4, z); clcos(-6, z)
0.0
0.0
0.0
With `z = \pi`, the series reduces to that of the Riemann zeta function
(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta
function values)::
>>> clcos(2.5, 0); zeta(2.5)
1.34148725725091717975677
1.34148725725091717975677
>>> clcos(2.5, pi); -altzeta(2.5)
-0.8671998890121841381913472
-0.8671998890121841381913472
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clcos(-3, 2*pi)
2.997921055881167659267063e+102
>>> clcos(-3, 2, pi=True)
0.008333333333333333333333333
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clcos(s, z)
(0.9407430121562251476136807 + 0.715826296033590204557054j)
>>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf])
(0.9407430121562251476136807 + 0.715826296033590204557054j)
"""
whitm = r"""
Evaluates the Whittaker function `M(k,m,z)`, which gives a solution
to the Whittaker differential equation
.. math ::
\frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+
\frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0.
A second solution is given by :func:`~mpmath.whitw`.
The Whittaker functions are defined in Abramowitz & Stegun, section 13.1.
They are alternate forms of the confluent hypergeometric functions
`\,_1F_1` and `U`:
.. math ::
M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
\,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z)
W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
U(\tfrac{1}{2}+m-k, 1+2m, z).
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitm(1, 1, 1)
0.7302596799460411820509668
>>> whitm(1, 1, -1)
(0.0 - 1.417977827655098025684246j)
>>> whitm(j, j/2, 2+3j)
(3.245477713363581112736478 - 0.822879187542699127327782j)
>>> whitm(2, 3, 100000)
4.303985255686378497193063e+21707
Evaluation at zero::
>>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0)
+inf
nan
0.0
We can verify that :func:`~mpmath.whitm` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitm(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`,
verifying evaluation along the real axis::
>>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf])
3.438869842576800225207341
>>> 128/(21*sqrt(pi))
3.438869842576800225207341
"""
whitw = r"""
Evaluates the Whittaker function `W(k,m,z)`, which gives a second
solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.)
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitw(1, 1, 1)
1.19532063107581155661012
>>> whitw(1, 1, -1)
(-0.9424875979222187313924639 - 0.2607738054097702293308689j)
>>> whitw(j, j/2, 2+3j)
(0.1782899315111033879430369 - 0.01609578360403649340169406j)
>>> whitw(2, 3, 100000)
1.887705114889527446891274e-21705
>>> whitw(-1, -1, 100)
1.905250692824046162462058e-24
Evaluation at zero::
>>> for m in [-1, -0.5, 0, 0.5, 1]:
... whitw(1, m, 0)
...
+inf
nan
0.0
nan
+inf
We can verify that :func:`~mpmath.whitw` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitw(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
"""
ber = r"""
Computes the Kelvin function ber, which for real arguments gives the real part
of the Bessel J function of a rotated argument
.. math ::
J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x).
The imaginary part is given by :func:`~mpmath.bei`.
**Plots**
.. literalinclude :: /plots/ber.py
.. image :: /plots/ber.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 3.5
>>> ber(n,x)
1.442338852571888752631129
>>> bei(n,x)
-0.948359035324558320217678
>>> besselj(n, x*root(1,8,3))
(1.442338852571888752631129 - 0.948359035324558320217678j)
The ber and bei functions are also defined by analytic continuation
for complex arguments::
>>> ber(1+j, 2+3j)
(4.675445984756614424069563 - 15.84901771719130765656316j)
>>> bei(1+j, 2+3j)
(15.83886679193707699364398 + 4.684053288183046528703611j)
"""
bei = r"""
Computes the Kelvin function bei, which for real arguments gives the
imaginary part of the Bessel J function of a rotated argument.
See :func:`~mpmath.ber`.
"""
ker = r"""
Computes the Kelvin function ker, which for real arguments gives the real part
of the (rescaled) Bessel K function of a rotated argument
.. math ::
e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x).
The imaginary part is given by :func:`~mpmath.kei`.
**Plots**
.. literalinclude :: /plots/ker.py
.. image :: /plots/ker.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 4.5
>>> ker(n,x)
0.02542895201906369640249801
>>> kei(n,x)
-0.02074960467222823237055351
>>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1))
(0.02542895201906369640249801 - 0.02074960467222823237055351j)
The ker and kei functions are also defined by analytic continuation
for complex arguments::
>>> ker(1+j, 3+4j)
(1.586084268115490421090533 - 2.939717517906339193598719j)
>>> kei(1+j, 3+4j)
(-2.940403256319453402690132 - 1.585621643835618941044855j)
"""
kei = r"""
Computes the Kelvin function kei, which for real arguments gives the
imaginary part of the (rescaled) Bessel K function of a rotated argument.
See :func:`~mpmath.ker`.
"""
struveh = r"""
Gives the Struve function
.. math ::
\,\mathbf{H}_n(z) =
\sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2})
\Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1}
which is a solution to the Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struveh(0, 3.5)
0.3608207733778295024977797
>>> struveh(-1, 10)
-0.255212719726956768034732
>>> struveh(1, -100.5)
0.5819566816797362287502246
>>> struveh(2.5, 10000000000000)
3153915652525200060.308937
>>> struveh(2.5, -10000000000000)
(0.0 - 3153915652525200060.308937j)
>>> struveh(1+j, 1000000+4000000j)
(-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j)
A Struve function of half-integer order is elementary; for example:
>>> z = 3
>>> struveh(0.5, 3)
0.9167076867564138178671595
>>> sqrt(2/(pi*z))*(1-cos(z))
0.9167076867564138178671595
Numerically verifying the differential equation::
>>> z = mpf(4.5)
>>> n = 3
>>> f = lambda z: struveh(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
17.40359302709875496632744
>>> rhs
17.40359302709875496632744
"""
struvel = r"""
Gives the modified Struve function
.. math ::
\,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z)
which solves to the modified Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struvel(0, 3.5)
7.180846515103737996249972
>>> struvel(-1, 10)
2670.994904980850550721511
>>> struvel(1, -100.5)
1.757089288053346261497686e+42
>>> struvel(2.5, 10000000000000)
4.160893281017115450519948e+4342944819025
>>> struvel(2.5, -10000000000000)
(0.0 - 4.160893281017115450519948e+4342944819025j)
>>> struvel(1+j, 700j)
(-0.1721150049480079451246076 + 0.1240770953126831093464055j)
>>> struvel(1+j, 1000000+4000000j)
(-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j)
Numerically verifying the differential equation::
>>> z = mpf(3.5)
>>> n = 3
>>> f = lambda z: struvel(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
6.368850306060678353018165
>>> rhs
6.368850306060678353018165
"""
appellf1 = r"""
Gives the Appell F1 hypergeometric function of two variables,
.. math ::
F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
This series is only generally convergent when `|x| < 1` and `|y| < 1`,
although :func:`~mpmath.appellf1` can evaluate an analytic continuation
with respecto to either variable, and sometimes both.
**Examples**
Evaluation is supported for real and complex parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf1(1,0,0.5,1,0.5,0.25)
1.154700538379251529018298
>>> appellf1(1,1+j,0.5,1,0.5,0.5j)
(1.138403860350148085179415 + 1.510544741058517621110615j)
For some integer parameters, the F1 series reduces to a polynomial::
>>> appellf1(2,-4,-3,1,2,5)
-816.0
>>> appellf1(-5,1,2,1,4,5)
-20528.0
The analytic continuation with respect to either `x` or `y`,
and sometimes with respect to both, can be evaluated::
>>> appellf1(2,3,4,5,100,0.5)
(0.0006231042714165329279738662 + 0.0000005769149277148425774499857j)
>>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j)
(-0.1782604566893954897128702 + 0.002472407104546216117161499j)
>>> appellf1(1,2,3,4,10,12)
-0.07122993830066776374929313
For certain arguments, F1 reduces to an ordinary hypergeometric function::
>>> appellf1(1,2,3,5,0.5,0.25)
1.547902270302684019335555
>>> 4*hyp2f1(1,2,5,'1/3')/3
1.547902270302684019335555
>>> appellf1(1,2,3,4,0,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
>>> hyp2f1(1,3,4,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
The F1 function satisfies a system of partial differential equations::
>>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25])
>>> F = lambda x,y: appellf1(a,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*(1-x)*diff(F,(x,y),(1,1)) +
... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>>
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*(1-y)*diff(F,(x,y),(1,1)) +
... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
The Appell F1 function allows for closed-form evaluation of various
integrals, such as any integral of the form
`\int x^r (x+a)^p (x+b)^q dx`::
>>> def integral(a,b,p,q,r,x1,x2):
... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2])
... f = lambda x: x**r * (x+a)**p * (x+b)**q
... def F(x):
... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q
... v *= (1+x/a)**(-p)
... v *= (1+x/b)**(-q)
... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b)
... return v
... print("Num. quad: %s" % quad(f, [x1,x2]))
... print("Appell F1: %s" % (F(x2)-F(x1)))
...
>>> integral('1/5','4/3','-2','3','1/2',0,1)
Num. quad: 9.073335358785776206576981
Appell F1: 9.073335358785776206576981
>>> integral('3/2','4/3','-2','3','1/2',0,1)
Num. quad: 1.092829171999626454344678
Appell F1: 1.092829171999626454344678
>>> integral('3/2','4/3','-2','3','1/2',12,25)
Num. quad: 1106.323225040235116498927
Appell F1: 1106.323225040235116498927
Also incomplete elliptic integrals fall into this category [1]::
>>> def E(z, m):
... if (pi/2).ae(z):
... return ellipe(m)
... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\
... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
...
>>> z, m = 1, 0.5
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
0.9273298836244400669659042
0.9273298836244400669659042
>>> z, m = 3, 2
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
(1.057495752337234229715836 + 1.198140234735592207439922j)
(1.057495752337234229715836 + 1.198140234735592207439922j)
**References**
1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/
2. [SrivastavaKarlsson]_
3. [CabralRosetti]_
4. [Vidunas]_
5. [Slater]_
"""
angerj = r"""
Gives the Anger function
.. math ::
\mathbf{J}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \cos(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> angerj(2,3)
0.4860912605858910769078311
>>> angerj(-3+4j, 2+5j)
(-5033.358320403384472395612 + 585.8011892476145118551756j)
>>> angerj(3.25, 1e6j)
(4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j)
>>> angerj(-1.5, 1e6)
0.0002795719747073879393087011
The Anger function coincides with the Bessel J-function when `\nu`
is an integer::
>>> angerj(1,3); besselj(1,3)
0.3390589585259364589255146
0.3390589585259364589255146
>>> angerj(1.5,3); besselj(1.5,3)
0.4088969848691080859328847
0.4777182150870917715515015
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: angerj(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-0.6002108774380707130367995
>>> (z-v)/(pi*z**2) * sinpi(v)
-0.6002108774380707130367995
Verifying the integral representation::
>>> angerj(v,z)
0.1145380759919333180900501
>>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi])
0.1145380759919333180900501
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
webere = r"""
Gives the Weber function
.. math ::
\mathbf{E}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \sin(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> webere(2,3)
-0.1057668973099018425662646
>>> webere(-3+4j, 2+5j)
(-585.8081418209852019290498 - 5033.314488899926921597203j)
>>> webere(3.25, 1e6j)
(-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j)
>>> webere(3.25, 1e6)
-0.00002812518265894315604914453
Up to addition of a rational function of `z`, the Weber function coincides
with the Struve H-function when `\nu` is an integer::
>>> webere(1,3); 2/pi-struveh(1,3)
-0.3834897968188690177372881
-0.3834897968188690177372881
>>> webere(5,3); 26/(35*pi)-struveh(5,3)
0.2009680659308154011878075
0.2009680659308154011878075
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: webere(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-1.097441848875479535164627
>>> -(z+v+(z-v)*cospi(v))/(pi*z**2)
-1.097441848875479535164627
Verifying the integral representation::
>>> webere(v,z)
0.1486507351534283744485421
>>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi])
0.1486507351534283744485421
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
lommels1 = r"""
Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}`
.. math ::
s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)}
\,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2};
-\frac{z^2}{4} \right)
which solves the inhomogeneous Bessel equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}.
A second solution is given by :func:`~mpmath.lommels2`.
**Plots**
.. literalinclude :: /plots/lommels1.py
.. image :: /plots/lommels1.png
**Examples**
An integral representation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> u,v,z = 0.25, 0.125, mpf(0.75)
>>> lommels1(u,v,z)
0.4276243877565150372999126
>>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \
... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2)
0.4276243877565150372999126
A special value::
>>> lommels1(v,v,z)
0.5461221367746048054932553
>>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z)
0.5461221367746048054932553
Verifying the differential equation::
>>> f = lambda z: lommels1(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6979536443265746992059141
>>> z**(u+1)
0.6979536443265746992059141
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
lommels2 = r"""
Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}`
.. math ::
S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1}
\Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right)
\Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times
\left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) -
\cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z)
\right]
which solves the same differential equation as
:func:`~mpmath.lommels1`.
**Plots**
.. literalinclude :: /plots/lommels2.py
.. image :: /plots/lommels2.png
**Examples**
For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> lommels2(10,2,30000)
1.968299831601008419949804e+40
>>> power(30000,9)
1.9683e+40
A special value::
>>> u,v,z = 0.5, 0.125, mpf(0.75)
>>> lommels2(v,v,z)
0.9589683199624672099969765
>>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5)
0.9589683199624672099969765
Verifying the differential equation::
>>> f = lambda z: lommels2(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6495190528383289850727924
>>> z**(u+1)
0.6495190528383289850727924
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
appellf2 = r"""
Gives the Appell F2 hypergeometric function of two variables
.. math ::
F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| + |y| < 1`.
**Examples**
Evaluation for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf2(1,2,3,4,5,0.25,0.125)
1.257417193533135344785602
>>> appellf2(1,-3,-4,2,3,2,3)
-42.8
>>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25)
(0.9880539519421899867041719 + 0.01497616165031102661476978j)
>>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25))
1.201311219287411337955192
>>> appellf2(1,1,1,4,6,0.125,16)
(-0.09455532250274744282125152 - 0.7647282253046207836769297j)
A transformation formula::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125])
>>> appellf2(a,b1,b2,c1,c2,x,y)
0.2299211717841180783309688
>>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x))
0.2299211717841180783309688
A system of partial differential equations satisfied by F2::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625])
>>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf3 = r"""
Gives the Appell F3 hypergeometric function of two variables
.. math ::
F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| < 1, |y| < 1`.
**Examples**
Evaluation for various parameters and variables::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf3(1,2,3,4,5,0.5,0.25)
2.221557778107438938158705
>>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
>>> appellf3(1,-2,-3,1,1,4,6)
-17.4
>>> appellf3(1,2,-3,1,1,4,6)
(17.7876136773677356641825 + 19.54768762233649126154534j)
>>> appellf3(1,2,-3,1,1,6,4)
(85.02054175067929402953645 + 148.4402528821177305173599j)
>>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25))
1.719992169545200286696007
Many transformations and evaluations for special combinations
of the parameters are possible, e.g.:
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf3(a,c-a,b,c-b,c,x,y)
1.093432340896087107444363
>>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y)
1.093432340896087107444363
>>> x**2*appellf3(1,1,1,1,3,x,-x)
0.01568646277445385390945083
>>> polylog(2,x**2)
0.01568646277445385390945083
>>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125])
>>> appellf3(a1,a2,b1,b2,c,x,1)
1.03947361709111140096947
>>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x)
1.03947361709111140096947
The Appell F3 function satisfies a pair of partial
differential equations::
>>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625])
>>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*diff(F,(x,y),(1,1)) +
... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) -
... a1*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*diff(F,(x,y),(1,1)) +
... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) -
... a2*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf4 = r"""
Gives the Appell F4 hypergeometric function of two variables
.. math ::
F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for
`\sqrt{|x|} + \sqrt{|y|} < 1`.
**Examples**
Evaluation for various parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf4(1,1,2,2,0.25,0.125)
1.286182069079718313546608
>>> appellf4(-2,-3,4,5,4,5)
34.8
>>> appellf4(5,4,2,3,0.25j,-0.125j)
(-0.2585967215437846642163352 + 2.436102233553582711818743j)
Reduction to `\,_2F_1` in a special case::
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x))
1.129143488466850868248364
>>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y)
1.129143488466850868248364
A system of partial differential equations satisfied by F4::
>>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625])
>>> F = lambda x,y: appellf4(a,b,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... y**2*diff(F,(x,y),(0,2)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) -
... ((a+b+1)*y)*diff(F,(x,y),(0,1)) -
... a*b*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x**2*diff(F,(x,y),(2,0)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) -
... ((a+b+1)*x)*diff(F,(x,y),(1,0)) -
... a*b*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
zeta = r"""
Computes the Riemann zeta function
.. math ::
\zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots
or, with `a \ne 1`, the more general Hurwitz zeta function
.. math ::
\zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}.
Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with
respect to `s`,
.. math ::
\zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}.
Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz
zeta functions are defined through analytic continuation for arbitrary
complex `s \ne 1` (`s = 1` is a pole).
The implementation uses three algorithms: the Borwein algorithm for
the Riemann zeta function when `s` is close to the real line;
the Riemann-Siegel formula for the Riemann zeta function when `s` is
large imaginary, and Euler-Maclaurin summation in all other cases.
The reflection formula for `\Re(s) < 0` is implemented in some cases.
The algorithm can be chosen with ``method = 'borwein'``,
``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``.
The parameter `a` is usually a rational number `a = p/q`, and may be specified
as such by passing an integer tuple `(p, q)`. Evaluation is supported for
arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for
nonrational `a` or when computing derivatives.
**Examples**
Some values of the Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> zeta(2); pi**2 / 6
1.644934066848226436472415
1.644934066848226436472415
>>> zeta(0)
-0.5
>>> zeta(-1)
-0.08333333333333333333333333
>>> zeta(-2)
0.0
For large positive `s`, `\zeta(s)` rapidly approaches 1::
>>> zeta(50)
1.000000000000000888178421
>>> zeta(100)
1.0
>>> zeta(inf)
1.0
>>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler
0.5772156649015328606065121
0.5772156649015328606065121
>>> nsum(lambda k: zeta(k)-1, [2, inf])
1.0
Evaluation is supported for complex `s` and `a`:
>>> zeta(-3+4j)
(-0.03373057338827757067584698 + 0.2774499251557093745297677j)
>>> zeta(2+3j, -1+j)
(389.6841230140842816370741 + 295.2674610150305334025962j)
The Riemann zeta function has so-called nontrivial zeros on
the critical line `s = 1/2 + it`::
>>> findroot(zeta, 0.5+14j); zetazero(1)
(0.5 + 14.13472514173469379045725j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+21j); zetazero(2)
(0.5 + 21.02203963877155499262848j)
(0.5 + 21.02203963877155499262848j)
>>> findroot(zeta, 0.5+25j); zetazero(3)
(0.5 + 25.01085758014568876321379j)
(0.5 + 25.01085758014568876321379j)
>>> chop(zeta(zetazero(10)))
0.0
Evaluation on and near the critical line is supported for large
heights `t` by means of the Riemann-Siegel formula (currently
for `a = 1`, `n \le 4`)::
>>> zeta(0.5+100000j)
(1.073032014857753132114076 + 5.780848544363503984261041j)
>>> zeta(0.75+1000000j)
(0.9535316058375145020351559 + 0.9525945894834273060175651j)
>>> zeta(0.5+10000000j)
(11.45804061057709254500227 - 8.643437226836021723818215j)
>>> zeta(0.5+100000000j, derivative=1)
(51.12433106710194942681869 + 43.87221167872304520599418j)
>>> zeta(0.5+100000000j, derivative=2)
(-444.2760822795430400549229 - 896.3789978119185981665403j)
>>> zeta(0.5+100000000j, derivative=3)
(3230.72682687670422215339 + 14374.36950073615897616781j)
>>> zeta(0.5+100000000j, derivative=4)
(-11967.35573095046402130602 - 218945.7817789262839266148j)
>>> zeta(1+10000000j) # off the line
(2.859846483332530337008882 + 0.491808047480981808903986j)
>>> zeta(1+10000000j, derivative=1)
(-4.333835494679647915673205 - 0.08405337962602933636096103j)
>>> zeta(1+10000000j, derivative=4)
(453.2764822702057701894278 - 581.963625832768189140995j)
For investigation of the zeta function zeros, the Riemann-Siegel
Z-function is often more convenient than working with the Riemann
zeta function directly (see :func:`~mpmath.siegelz`).
Some values of the Hurwitz zeta function::
>>> zeta(2, 3); -5./4 + pi**2/6
0.3949340668482264364724152
0.3949340668482264364724152
>>> zeta(2, (3,4)); pi**2 - 8*catalan
2.541879647671606498397663
2.541879647671606498397663
For positive integer values of `s`, the Hurwitz zeta function is
equivalent to a polygamma function (except for a normalizing factor)::
>>> zeta(4, (1,5)); psi(3, '1/5')/6
625.5408324774542966919938
625.5408324774542966919938
Evaluation of derivatives::
>>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2
(-2.675565317808456852310934 + 4.742664438034657928194889j)
(-2.675565317808456852310934 + 4.742664438034657928194889j)
>>> zeta(2, 1, 20)
2432902008176640000.000242
>>> zeta(3+4j, 5.5+2j, 4)
(-0.140075548947797130681075 - 0.3109263360275413251313634j)
>>> zeta(0.5+100000j, 1, 4)
(-10407.16081931495861539236 + 13777.78669862804508537384j)
>>> zeta(-100+0.5j, (1,3), derivative=4)
(4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j)
Generating a Taylor series at `s = 2` using derivatives::
>>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k))
...
1.644934066848226436472415 * (s-2)^0
-0.9375482543158437537025741 * (s-2)^1
0.9946401171494505117104293 * (s-2)^2
-1.000024300473840810940657 * (s-2)^3
1.000061933072352565457512 * (s-2)^4
-1.000006869443931806408941 * (s-2)^5
1.000000173233769531820592 * (s-2)^6
-0.9999999569989868493432399 * (s-2)^7
0.9999999937218844508684206 * (s-2)^8
-0.9999999996355013916608284 * (s-2)^9
1.000000000004610645020747 * (s-2)^10
Evaluation at zero and for negative integer `s`::
>>> zeta(0, 10)
-9.5
>>> zeta(-2, (2,3)); mpf(1)/81
0.01234567901234567901234568
0.01234567901234567901234568
>>> zeta(-3+4j, (5,4))
(0.2899236037682695182085988 + 0.06561206166091757973112783j)
>>> zeta(-3.25, 1/pi)
-0.0005117269627574430494396877
>>> zeta(-3.5, pi, 1)
11.156360390440003294709
>>> zeta(-100.5, (8,3))
-4.68162300487989766727122e+77
>>> zeta(-10.5, (-8,3))
(-0.01521913704446246609237979 + 29907.72510874248161608216j)
>>> zeta(-1000.5, (-8,3))
(1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j)
>>> zeta(-1+j, 3+4j)
(-16.32988355630802510888631 - 22.17706465801374033261383j)
>>> zeta(-1+j, 3+4j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
>>> diff(lambda s: zeta(s, 3+4j), -1+j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
**References**
1. http://mathworld.wolfram.com/RiemannZetaFunction.html
2. http://mathworld.wolfram.com/HurwitzZetaFunction.html
3. http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P155.pdf
"""
dirichlet = r"""
Evaluates the Dirichlet L-function
.. math ::
L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}.
where `\chi` is a periodic sequence of length `q` which should be supplied
in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`.
Strictly, `\chi` should be a Dirichlet character, but any periodic
sequence will work.
For example, ``dirichlet(s, [1])`` gives the ordinary
Riemann zeta function and ``dirichlet(s, [-1,1])`` gives
the alternating zeta function (Dirichlet eta function).
Also the derivative with respect to `s` (currently only a first
derivative) can be evaluated.
**Examples**
The ordinary Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> dirichlet(3, [1]); zeta(3)
1.202056903159594285399738
1.202056903159594285399738
>>> dirichlet(1, [1])
+inf
The alternating zeta function::
>>> dirichlet(1, [-1,1]); ln(2)
0.6931471805599453094172321
0.6931471805599453094172321
The following defines the Dirichlet beta function
`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies
several values of this function::
>>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d)
>>> B(0); 1./2
0.5
0.5
>>> B(1); pi/4
0.7853981633974483096156609
0.7853981633974483096156609
>>> B(2); +catalan
0.9159655941772190150546035
0.9159655941772190150546035
>>> B(2,1); diff(B, 2)
0.08158073611659279510291217
0.08158073611659279510291217
>>> B(-1,1); 2*catalan/pi
0.5831218080616375602767689
0.5831218080616375602767689
>>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2)))
0.3915943927068367764719453
0.3915943927068367764719454
>>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25)))
0.1929013167969124293631898
0.1929013167969124293631898
A custom L-series of period 3::
>>> dirichlet(2, [2,0,1])
0.7059715047839078092146831
>>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \
... nsum(lambda k: (3*k+2)**-2, [0,inf])
0.7059715047839078092146831
"""
coulombf = r"""
Calculates the regular Coulomb wave function
.. math ::
F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz)
where the normalization constant `C_l(\eta)` is as calculated by
:func:`~mpmath.coulombc`. This function solves the differential equation
.. math ::
f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0.
A second linearly independent solution is given by the irregular
Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`)
and thus the general solution is
`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary
constants `C_1`, `C_2`.
Physically, the Coulomb wave functions give the radial solution
to the Schrodinger equation for a point particle in a `1/z` potential; `z` is
then the radius and `l`, `\eta` are quantum numbers.
The Coulomb wave functions with real parameters are defined
in Abramowitz & Stegun, section 14. However, all parameters are permitted
to be complex in this implementation (see references).
**Plots**
.. literalinclude :: /plots/coulombf.py
.. image :: /plots/coulombf.png
.. literalinclude :: /plots/coulombf_c.py
.. image :: /plots/coulombf_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombf(2, 1.5, 3.5)
0.4080998961088761187426445
>>> coulombf(-2, 1.5, 3.5)
0.7103040849492536747533465
>>> coulombf(2, 1.5, '1e-10')
4.143324917492256448770769e-33
>>> coulombf(2, 1.5, 1000)
0.4482623140325567050716179
>>> coulombf(2, 1.5, 10**10)
-0.066804196437694360046619
Verifying the differential equation::
>>> l, eta, z = 2, 3, mpf(2.75)
>>> A, B = 1, 2
>>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z)
>>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z))
0.0
A Wronskian relation satisfied by the Coulomb wave functions::
>>> l = 2
>>> eta = 1.5
>>> F = lambda z: coulombf(l,eta,z)
>>> G = lambda z: coulombg(l,eta,z)
>>> for z in [3.5, -1, 2+3j]:
... chop(diff(F,z)*G(z) - F(z)*diff(G,z))
...
1.0
1.0
1.0
Another Wronskian relation::
>>> F = coulombf
>>> G = coulombg
>>> for z in [3.5, -1, 2+3j]:
... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2))
...
0.0
0.0
0.0
An integral identity connecting the regular and irregular wave functions::
>>> l, eta, z = 4+j, 2-j, 5+2j
>>> coulombf(l,eta,z) + j*coulombg(l,eta,z)
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
>>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta)
>>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf])
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
Some test case with complex parameters, taken from Michel [2]::
>>> mp.dps = 15
>>> coulombf(1+0.1j, 50+50j, 100.156)
(-1.02107292320897e+15 - 2.83675545731519e+15j)
>>> coulombg(1+0.1j, 50+50j, 100.156)
(2.83675545731519e+15 - 1.02107292320897e+15j)
>>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j)
(4.30566371247811e-14 - 9.03347835361657e-19j)
>>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j)
(778709182061.134 + 18418936.2660553j)
The following reproduces a table in Abramowitz & Stegun, at twice
the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [5, 4, 3, 2, 1, 0]:
... print("%s %s %s" % (l, coulombf(l,eta,z),
... diff(lambda z: coulombf(l,eta,z), z)))
...
5 0.09079533488 0.1042553261
4 0.2148205331 0.2029591779
3 0.4313159311 0.320534053
2 0.7212774133 0.3952408216
1 0.9935056752 0.3708676452
0 1.143337392 0.2937960375
**References**
1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex
Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986.
2. N. Michel, "Precise Coulomb wave functions for a wide range of
complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1
"""
coulombg = r"""
Calculates the irregular Coulomb wave function
.. math ::
G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)}
where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi`
and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`.
See :func:`~mpmath.coulombf` for additional information.
**Plots**
.. literalinclude :: /plots/coulombg.py
.. image :: /plots/coulombg.png
.. literalinclude :: /plots/coulombg_c.py
.. image :: /plots/coulombg_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombg(-2, 1.5, 3.5)
1.380011900612186346255524
>>> coulombg(2, 1.5, 3.5)
1.919153700722748795245926
>>> coulombg(-2, 1.5, '1e-10')
201126715824.7329115106793
>>> coulombg(-2, 1.5, 1000)
0.1802071520691149410425512
>>> coulombg(-2, 1.5, 10**10)
0.652103020061678070929794
The following reproduces a table in Abramowitz & Stegun,
at twice the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [1, 2, 3, 4, 5]:
... print("%s %s %s" % (l, coulombg(l,eta,z),
... -diff(lambda z: coulombg(l,eta,z), z)))
...
1 1.08148276 0.6028279961
2 1.496877075 0.5661803178
3 2.048694714 0.7959909551
4 3.09408669 1.731802374
5 5.629840456 4.549343289
Evaluation close to the singularity at `z = 0`::
>>> mp.dps = 15
>>> coulombg(0,10,1)
3088184933.67358
>>> coulombg(0,10,'1e-10')
5554866000719.8
>>> coulombg(0,10,'1e-100')
5554866221524.1
Evaluation with a half-integer value for `l`::
>>> coulombg(1.5, 1, 10)
0.852320038297334
"""
coulombc = r"""
Gives the normalizing Gamow constant for Coulomb wave functions,
.. math ::
C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) +
\ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right),
where the log gamma function with continuous imaginary part
away from the negative half axis (see :func:`~mpmath.loggamma`) is implied.
This function is used internally for the calculation of
Coulomb wave functions, and automatically cached to make multiple
evaluations with fixed `l`, `\eta` fast.
"""
ellipfun = r"""
Computes any of the Jacobi elliptic functions, defined
in terms of Jacobi theta functions as
.. math ::
\mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_1(t,q)}{\vartheta_4(t,q)}
\mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_2(t,q)}{\vartheta_4(t,q)}
\mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)}
\frac{\vartheta_3(t,q)}{\vartheta_4(t,q)},
or more generally computes a ratio of two such functions. Here
`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see
:func:`~mpmath.nome`). Optionally, you can specify the nome directly
instead of `m` by passing ``q=<value>``, or you can directly
specify the elliptic parameter `k` with ``k=<value>``.
The first argument should be a two-character string specifying the
function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These
letters respectively denote the basic functions
`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`.
The identifier specifies the ratio of two such functions.
For example, ``'ns'`` identifies the function
.. math ::
\mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)}
and ``'cd'`` identifies the function
.. math ::
\mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}.
If called with only the first argument, a function object
evaluating the chosen function for given arguments is returned.
**Examples**
Basic evaluation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipfun('cd', 3.5, 0.5)
-0.9891101840595543931308394
>>> ellipfun('cd', 3.5, q=0.25)
0.07111979240214668158441418
The sn-function is doubly periodic in the complex plane with periods
`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`)::
>>> sn = ellipfun('sn')
>>> sn(2, 0.25)
0.9628981775982774425751399
>>> sn(2+4*ellipk(0.25), 0.25)
0.9628981775982774425751399
>>> chop(sn(2+2*j*ellipk(1-0.25), 0.25))
0.9628981775982774425751399
The cn-function is doubly periodic with periods `4 K(m)` and `2 K(m) + 2 i K(1-m)`::
>>> cn = ellipfun('cn')
>>> cn(2, 0.25)
-0.2698649654510865792581416
>>> cn(2+4*ellipk(0.25), 0.25)
-0.2698649654510865792581416
>>> chop(cn(2+2*ellipk(0.25)+2*j*ellipk(1-0.25), 0.25))
-0.2698649654510865792581416
The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`::
>>> dn = ellipfun('dn')
>>> dn(2, 0.25)
0.8764740583123262286931578
>>> dn(2+2*ellipk(0.25), 0.25)
0.8764740583123262286931578
>>> chop(dn(2+4*j*ellipk(1-0.25), 0.25))
0.8764740583123262286931578
"""
jtheta = r"""
Computes the Jacobi theta function `\vartheta_n(z, q)`, where
`n = 1, 2, 3, 4`, defined by the infinite series:
.. math ::
\vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
(-1)^n q^{n^2+n\,} \sin((2n+1)z)
\vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
q^{n^{2\,} + n} \cos((2n+1)z)
\vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty}
q^{n^2\,} \cos(2 n z)
\vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty}
(-q)^{n^2\,} \cos(2 n z)
The theta functions are functions of two variables:
* `z` is the *argument*, an arbitrary real or complex number
* `q` is the *nome*, which must be a real or complex number
in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the
series converge very quickly, so the Jacobi theta functions
can efficiently be evaluated to high precision.
The compact notations `\vartheta_n(q) = \vartheta_n(0,q)`
and `\vartheta_n = \vartheta_n(0,q)` are also frequently
encountered. Finally, Jacobi theta functions are frequently
considered as functions of the half-period ratio `\tau`
and then usually denoted by `\vartheta_n(z|\tau)`.
Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes
a `d`-th derivative with respect to `z`.
**Examples and basic properties**
Considered as functions of `z`, the Jacobi theta functions may be
viewed as generalizations of the ordinary trigonometric functions
cos and sin. They are periodic functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> jtheta(1, 0.25, '0.2')
0.2945120798627300045053104
>>> jtheta(1, 0.25 + 2*pi, '0.2')
0.2945120798627300045053104
Indeed, the series defining the theta functions are essentially
trigonometric Fourier series. The coefficients can be retrieved
using :func:`~mpmath.fourier`::
>>> mp.dps = 10
>>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4))
([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0])
The Jacobi theta functions are also so-called quasiperiodic
functions of `z` and `\tau`, meaning that for fixed `\tau`,
`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same
except for an exponential factor::
>>> mp.dps = 25
>>> tau = 3*j/10
>>> q = exp(pi*j*tau)
>>> z = 10
>>> jtheta(4, z+tau*pi, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
>>> -exp(-2*j*z)/q * jtheta(4, z, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
The Jacobi theta functions satisfy a huge number of other
functional equations, such as the following identity (valid for
any `q`)::
>>> q = mpf(3)/10
>>> jtheta(3,0,q)**4
6.823744089352763305137427
>>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4
6.823744089352763305137427
Extensive listings of identities satisfied by the Jacobi theta
functions can be found in standard reference works.
The Jacobi theta functions are related to the gamma function
for special arguments::
>>> jtheta(3, 0, exp(-pi))
1.086434811213308014575316
>>> pi**(1/4.) / gamma(3/4.)
1.086434811213308014575316
:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex
arguments::
>>> mp.dps = 50
>>> jtheta(4, sqrt(2), 0.5)
2.0549510717571539127004115835148878097035750653737
>>> mp.dps = 25
>>> jtheta(4, 1+2j, (1+j)/5)
(7.180331760146805926356634 - 1.634292858119162417301683j)
Evaluation of derivatives::
>>> mp.dps = 25
>>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7)
1.209857192844475388637236
1.209857192844475388637236
>>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2)
-0.2598718791650217206533052
-0.2598718791650217206533052
>>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7)
-1.150231437070259644461474
-1.150231437070259644461474
>>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2)
-0.6226636990043777445898114
-0.6226636990043777445898114
>>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7)
-0.9990312046096634316587882
-0.9990312046096634316587882
>>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2)
-0.1530388693066334936151174
-0.1530388693066334936151174
>>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7)
0.9820995967262793943571139
0.9820995967262793943571139
>>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2)
0.3936902850291437081667755
0.3936902850291437081667755
**Possible issues**
For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises
``ValueError``. This exception is also raised for `|q|` extremely
close to 1 (or equivalently `\tau` very close to 0), since the
series would converge too slowly::
>>> jtheta(1, 10, 0.99999999 * exp(0.5*j))
Traceback (most recent call last):
...
ValueError: abs(q) > THETA_Q_LIM = 1.000000
"""
eulernum = r"""
Gives the `n`-th Euler number, defined as the `n`-th derivative of
`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the
Euler numbers give the coefficients of the Taylor series
.. math ::
\mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n.
The Euler numbers are closely related to Bernoulli numbers
and Bernoulli polynomials. They can also be evaluated in terms of
Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`.
**Examples**
Computing the first few Euler numbers and verifying that they
agree with the Taylor series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> [eulernum(n) for n in range(11)]
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
>>> chop(diffs(sech, 0, 10))
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently
computes numerical approximations for large indices::
>>> eulernum(50)
-6.053285248188621896314384e+54
>>> eulernum(1000)
3.887561841253070615257336e+2371
>>> eulernum(10**20)
4.346791453661149089338186e+1936958564106659551331
Comparing with an asymptotic formula for the Euler numbers::
>>> n = 10**5
>>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n
3.69919063017432362805663e+436961
>>> eulernum(n)
3.699193712834466537941283e+436961
Pass ``exact=True`` to obtain exact values of Euler numbers as integers::
>>> print(eulernum(50, exact=True))
-6053285248188621896314383785111649088103498225146815121
>>> print(eulernum(200, exact=True) % 10**10)
1925859625
>>> eulernum(1001, exact=True)
0
"""
eulerpoly = r"""
Evaluates the Euler polynomial `E_n(z)`, defined by the generating function
representation
.. math ::
\frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}.
The Euler polynomials may also be represented in terms of
Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for
example
.. math ::
E_n(z) = \frac{2}{n+1} \left(
B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right)
\right).
Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see
:func:`~mpmath.eulernum`).
**Examples**
Computing the coefficients of the first few Euler polynomials::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> for n in range(6):
... chop(taylor(lambda z: eulerpoly(n,z), 0, n))
...
[1.0]
[-0.5, 1.0]
[0.0, -1.0, 1.0]
[0.25, 0.0, -1.5, 1.0]
[0.0, 1.0, 0.0, -2.0, 1.0]
[-0.5, 0.0, 2.5, 0.0, -2.5, 1.0]
Evaluation for arbitrary `z`::
>>> eulerpoly(2,3)
6.0
>>> eulerpoly(5,4)
423.5
>>> eulerpoly(35, 11111111112)
3.994957561486776072734601e+351
>>> eulerpoly(4, 10+20j)
(-47990.0 - 235980.0j)
>>> eulerpoly(2, '-3.5e-5')
0.000035001225
>>> eulerpoly(3, 0.5)
0.0
>>> eulerpoly(55, -10**80)
-1.0e+4400
>>> eulerpoly(5, -inf)
-inf
>>> eulerpoly(6, -inf)
+inf
Computing Euler numbers::
>>> 2**26 * eulerpoly(26,0.5)
-4087072509293123892361.0
>>> eulernum(26)
-4087072509293123892361.0
Evaluation is accurate for large `n` and small `z`::
>>> eulerpoly(100, 0.5)
2.29047999988194114177943e+108
>>> eulerpoly(1000, 10.5)
3.628120031122876847764566e+2070
>>> eulerpoly(10000, 10.5)
1.149364285543783412210773e+30688
"""
spherharm = r"""
Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`,
.. math ::
Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}}
P_l^m(\cos \theta) e^{i m \phi}
where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`).
Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging
from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the
azimuthal coordinate on a sphere. Care should be used since many different
conventions for spherical coordinate variables are used.
Usually spherical harmonics are considered for `l \in \mathbb{N}`,
`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi`
are permitted to be complex numbers.
.. note ::
:func:`~mpmath.spherharm` returns a complex number, even if the value is
purely real.
**Plots**
.. literalinclude :: /plots/spherharm40.py
`Y_{4,0}`:
.. image :: /plots/spherharm40.png
`Y_{4,1}`:
.. image :: /plots/spherharm41.png
`Y_{4,2}`:
.. image :: /plots/spherharm42.png
`Y_{4,3}`:
.. image :: /plots/spherharm43.png
`Y_{4,4}`:
.. image :: /plots/spherharm44.png
**Examples**
Some low-order spherical harmonics with reference values::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> theta = pi/4
>>> phi = pi/3
>>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0)
(0.2820947917738781434740397 + 0.0j)
(0.2820947917738781434740397 + 0.0j)
>>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
>>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0)
(0.3454941494713354792652446 + 0.0j)
(0.3454941494713354792652446 + 0.0j)
>>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
With the normalization convention used, the spherical harmonics are orthonormal
on the unit sphere::
>>> sphere = [0,pi], [0,2*pi]
>>> dS = lambda t,p: fp.sin(t) # differential element
>>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p)
>>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p))
>>> l1 = l2 = 3; m1 = m2 = 2
>>> fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))
1.0000000000000007
>>> m2 = 1 # m1 != m2
>>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)))
0.0
Evaluation is accurate for large orders::
>>> spherharm(1000,750,0.5,0.25)
(3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j)
Evaluation works with complex parameter values::
>>> spherharm(1+j, 2j, 2+3j, -0.5j)
(64.44922331113759992154992 + 1981.693919841408089681743j)
"""
scorergi = r"""
Evaluates the Scorer function
.. math ::
\operatorname{Gi}(z) =
\operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt +
\operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. Another
particular solution is given by the Scorer Hi-function
(:func:`~mpmath.scorerhi`). The two functions are related as
`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`.
**Plots**
.. literalinclude :: /plots/gi.py
.. image :: /plots/gi.png
.. literalinclude :: /plots/gi_c.py
.. image :: /plots/gi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3'))
0.2049755424820002450503075
0.2049755424820002450503075
>>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3'))
0.1494294524512754526382746
0.1494294524512754526382746
>>> scorergi(+inf); scorergi(-inf)
0.0
0.0
>>> scorergi(1)
0.2352184398104379375986902
>>> scorergi(-1)
-0.1166722172960152826494198
Evaluation for large arguments::
>>> scorergi(10)
0.03189600510067958798062034
>>> scorergi(100)
0.003183105228162961476590531
>>> scorergi(1000000)
0.0000003183098861837906721743873
>>> 1/(pi*1000000)
0.0000003183098861837906715377675
>>> scorergi(-1000)
-0.08358288400262780392338014
>>> scorergi(-100000)
0.02886866118619660226809581
>>> scorergi(50+10j)
(0.0061214102799778578790984 - 0.001224335676457532180747917j)
>>> scorergi(-50-10j)
(5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j)
>>> scorergi(100000j)
(-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j)
Verifying the connection between Gi and Hi::
>>> z = 0.25
>>> scorergi(z) + scorerhi(z)
0.7287469039362150078694543
>>> airybi(z)
0.7287469039362150078694543
Verifying the differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(diff(scorergi,z,2) - z*scorergi(z))
...
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorergi(z)
0.2447210432765581976910539
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1))
0.2447210432765581976910539
**References**
1. [DLMF]_ section 9.12: Scorer Functions
"""
scorerhi = r"""
Evaluates the second Scorer function
.. math ::
\operatorname{Hi}(z) =
\operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt -
\operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. See also
:func:`~mpmath.scorergi`.
**Plots**
.. literalinclude :: /plots/hi.py
.. image :: /plots/hi.png
.. literalinclude :: /plots/hi_c.py
.. image :: /plots/hi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3'))
0.4099510849640004901006149
0.4099510849640004901006149
>>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3'))
0.2988589049025509052765491
0.2988589049025509052765491
>>> scorerhi(+inf); scorerhi(-inf)
+inf
0.0
>>> scorerhi(1)
0.9722051551424333218376886
>>> scorerhi(-1)
0.2206696067929598945381098
Evaluation for large arguments::
>>> scorerhi(10)
455641153.5163291358991077
>>> scorerhi(100)
6.041223996670201399005265e+288
>>> scorerhi(1000000)
7.138269638197858094311122e+289529652
>>> scorerhi(-10)
0.0317685352825022727415011
>>> scorerhi(-100)
0.003183092495767499864680483
>>> scorerhi(100j)
(-6.366197716545672122983857e-9 + 0.003183098861710582761688475j)
>>> scorerhi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> scorerhi(-1000-1000j)
(0.0001591549432510502796565538 - 0.000159154943091895334973109j)
Verifying the differential equation::
>>> for z in [-3.4, 0, 2, 1+2j]:
... chop(diff(scorerhi,z,2) - z*scorerhi(z))
...
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorerhi(z)
0.6095559998265972956089949
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1))
0.6095559998265972956089949
"""
stirling1 = r"""
Gives the Stirling number of the first kind `s(n,k)`, defined by
.. math ::
x(x-1)(x-2)\cdots(x-n+1) = \sum_{k=0}^n s(n,k) x^k.
The value is computed using an integer recurrence. The implementation
is not optimized for approximating large values quickly.
**Examples**
Comparing with the generating function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> taylor(lambda x: ff(x, 5), 0, 5)
[0.0, 24.0, -50.0, 35.0, -10.0, 1.0]
>>> [stirling1(5, k) for k in range(6)]
[0.0, 24.0, -50.0, 35.0, -10.0, 1.0]
Recurrence relation::
>>> n, k = 5, 3
>>> stirling1(n+1,k) + n*stirling1(n,k) - stirling1(n,k-1)
0.0
The matrices of Stirling numbers of first and second kind are inverses
of each other::
>>> A = matrix(5, 5); B = matrix(5, 5)
>>> for n in range(5):
... for k in range(5):
... A[n,k] = stirling1(n,k)
... B[n,k] = stirling2(n,k)
...
>>> A * B
[1.0 0.0 0.0 0.0 0.0]
[0.0 1.0 0.0 0.0 0.0]
[0.0 0.0 1.0 0.0 0.0]
[0.0 0.0 0.0 1.0 0.0]
[0.0 0.0 0.0 0.0 1.0]
Pass ``exact=True`` to obtain exact values of Stirling numbers as integers::
>>> stirling1(42, 5)
-2.864498971768501633736628e+50
>>> print(stirling1(42, 5, exact=True))
-286449897176850163373662803014001546235808317440000
"""
stirling2 = r"""
Gives the Stirling number of the second kind `S(n,k)`, defined by
.. math ::
x^n = \sum_{k=0}^n S(n,k) x(x-1)(x-2)\cdots(x-k+1)
The value is computed using integer arithmetic to evaluate a power sum.
The implementation is not optimized for approximating large values quickly.
**Examples**
Comparing with the generating function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> taylor(lambda x: sum(stirling2(5,k) * ff(x,k) for k in range(6)), 0, 5)
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
Recurrence relation::
>>> n, k = 5, 3
>>> stirling2(n+1,k) - k*stirling2(n,k) - stirling2(n,k-1)
0.0
Pass ``exact=True`` to obtain exact values of Stirling numbers as integers::
>>> stirling2(52, 10)
2.641822121003543906807485e+45
>>> print(stirling2(52, 10, exact=True))
2641822121003543906807485307053638921722527655
"""
|
JensGrabner/mpmath
|
mpmath/function_docs.py
|
Python
|
bsd-3-clause
| 280,518 | 0.000125 |
# -*- coding: utf-8 -*-
from gensim.models import word2vec
from gensim import models
import jieba
import codecs
import io
from collections import Counter
import operator
import numpy
f = codecs.open("target_article.txt",'r','utf8')
content = f.readlines()
article = []
jieba.set_dictionary('jieba_dict/dict.txt.big')
model = models.Word2Vec.load_word2vec_format('med250.model.bin',binary=True)
# import stopword
stopwordset = set()
with io.open('jieba_dict/stopwords.txt','r',encoding='utf-8') as sw:
for line in sw:
stopwordset.add(line.strip('\n'))
# Cut The Words , Output: short words in article
for line in content:
seg_list = jieba.cut(line)
for gg in seg_list:
if gg not in stopwordset:
article.append(gg)
# Count frequency
raw_data = Counter(article)
raw_data = { key:raw_data[key] for key in raw_data if key in model.vocab}
low_level = 0
for key in raw_data:
low_level += raw_data[key]
low_level = int(round(low_level*0.01))
# Initial Accumalation
words = []
acc_data = dict()
map_words = []
related_word = dict()
for keys in raw_data:
words.append(keys)
# acc_data[keys] = 0
# Pick up the Friends
for word_1 in words:
cand_words = []
for word_2 in words:
if model.similarity(word_1, word_2) >= 0.6:
cand_words.append(word_2)
map_words.append(cand_words)
for i in range(len(map_words)):
friend_list = map_words[i]
value = 0.0
for friend_1 in friend_list:
for friend_2 in friend_list:
if friend_1 == friend_2:
continue
value += model.similarity(friend_1, friend_2)
leng = len(friend_list)
related_word[words[i]] = value/float(leng*leng)
s_imp_words = sorted(related_word.items(), key=operator.itemgetter(1), reverse=True)
for i in s_imp_words[:20]:
print i[0]
print "-----------------------"
#print s_imp_words
# for value in output:
# if value[1] == 0.0:
# continue
# print value[0], value[1]
# print "-----------------------"
keywords = []
fg = numpy.zeros(len(s_imp_words))
for i in range(len(s_imp_words)):
if fg[i] == 1:
continue
for j in range(i+1,len(s_imp_words)):
if fg[j] != 1:
if model.similarity(s_imp_words[i][0], s_imp_words[j][0]) >= 0.7:
fg[j] = 1
keywords.append(s_imp_words[i])
#print s_imp_words[i][0]
for i in keywords[:10]:
print i[0]
# with io.open("target_keywords.txt",'w',encoding='utf-8') as output:
# for text in keywords:
# output.write(text + '\n')
|
chunchih/article-matching
|
experiment/find_key_relation.py
|
Python
|
bsd-3-clause
| 2,408 | 0.023671 |
###############################################################################
#PPAM is a pulseaudio interface.
#Copyright (C) 2013 Denis Doria (Thuck)
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; version 2
#of the License.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
###############################################################################
import curses
from curses import KEY_UP, KEY_DOWN
from ui.basic import draw_info_window
from pulse import pulseaudio as pa
from pulse import components as co
class GenericStream(object):
def __init__(self, win, stream_type, name):
self.win = win
self.height, self.width = self.win.getmaxyx()
self.name = name
self.help = [_("+/- to Increase and decrease volume"),
_("./, to Increase and decrease right volume"),
_("</> to Increase and decrease left volume"),
_("m to Mute"),
_("K to kill the steram")]
self.selected_item = 0
self.max_item = 0
self.playback = getattr(co, stream_type)()
self.streams = []
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, pid):
if self.type_of_info == 'p':
self.info_window_data = self.playback.properties(pid)
elif self.type_of_info == 'i':
self.info_window_data = self.playback.info(pid)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.streams:
pid = self.streams[self.selected_item][1]
self._update_info_window(pid)
if char in (ord('+'), ):
self.playback.increase_volume(pid)
elif char in (ord('-'), ):
self.playback.decrease_volume(pid)
elif char in (ord('m'),):
self.playback.mute(pid)
elif char in (ord('>'), ):
self.playback.increase_left_volume(pid)
elif char in (ord('.'), ):
self.playback.increase_right_volume(pid)
elif char in (ord('<'), ):
self.playback.decrease_left_volume(pid)
elif char in (ord(','), ):
self.playback.decrease_right_volume(pid)
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.playback.properties(pid)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.playback.info(pid)
elif char in (ord('K'), ):
self.playback.kill(pid)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
self.selected_item < self.max_item):
self.selected_item += 1
def draw(self):
self.streams = self.playback.playing()
line_number = 0
self.win.erase()
self.win.box()
for line_number, stream in enumerate(self.streams):
if len(stream) == 5:
(app_name,
app_pid,
volume_left,
volume_right,
mute) = stream
line = '[%s] L:%i%% R:%i%% (%s)' % (app_name, volume_left,
volume_right, app_pid)
else:
(app_name,
app_pid,
volume_left,
mute) = stream
line = '[%s] M:%i%% (%s)' % (app_name, volume_left, app_pid)
if mute:
line = '%s [M]' % (line)
if self.selected_item == line_number:
self.win.addstr(line_number + 1, 1, line, curses.color_pair(1))
else:
self.win.addstr(line_number + 1, 1, line)
self.max_item = line_number
if self.info_window_data:
draw_info_window(self.win, self.info_window_data)
self.win.refresh()
class TabPlayback(GenericStream):
def __init__(self, win):
GenericStream.__init__(self, win, 'Playback', _('Playback'))
class TabRecord(GenericStream):
def __init__(self, win):
GenericStream.__init__(self, win, 'Record', _('Record'))
class GenericDevice(object):
def __init__(self, win, device_type, name):
self.win = win
self.height, self.width = self.win.getmaxyx()
self.name = name
self.help = [_("+/- to Increase and decrease volume"),
_("./, to Increase and decrease right volume"),
_("</> to Increase and decrease left volume"),
_("m to Mute")]
self.selected_item = 0
self.max_item = 0
self.device = getattr(co, device_type)()
self.devices = []
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, info):
if self.type_of_info == 'p':
self.info_window_data = self.device.properties(info)
elif self.type_of_info == 'i':
self.info_window_data = self.device.info(info)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.devices:
name = self.devices[self.selected_item][0]
self._update_info_window(name)
if char in (ord('+'), ):
self.device.increase_volume(name)
elif char in (ord('-'), ):
self.device.decrease_volume(name)
elif char in (ord('m'),):
self.device.mute(name)
elif char in (ord('>'), ):
self.device.increase_left_volume(name)
elif char in (ord('.'), ):
self.device.increase_right_volume(name)
elif char in (ord('<'), ):
self.device.decrease_left_volume(name)
elif char in (ord(','), ):
self.device.decrease_right_volume(name)
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.device.properties(name)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.device.info(name)
elif char in (ord('n'), ):
self.device.change_port_next(name)
elif char in (ord('p'), ):
self.device.change_port_previous(name)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
self.selected_item < self.max_item):
self.selected_item += 1
def draw(self):
self.devices = self.device.get_devices()
line_number = 0
self.win.erase()
self.win.box()
for line_number, device in enumerate(self.devices):
if len(device) == 5:
(device_name,
volume_left,
volume_right,
mute,
port) = device
line = '[%s] L:%i%% R:%i%%' % (
device_name.split('.')[-1].capitalize(),
volume_left, volume_right)
else:
(device_name,
volume,
mute,
port) = device
line = '[%s] M:%i%%' % (
device_name.split('.')[-1].capitalize(),
volume)
if port:
str_port = ''
for i in port:
if i[0] == True:
str_port = '%s (%s)' % (
str_port, i[1].split('-')[-1].capitalize())
else:
str_port = '%s %s' % (
str_port, i[1].split('-')[-1].capitalize())
line = '%s [%s]' % (line, str_port.strip())
if mute:
line = '%s [M]' % (line)
if self.selected_item == line_number:
self.win.addstr(line_number + 1, 1, line, curses.color_pair(1))
else:
self.win.addstr(line_number + 1, 1, line)
self.max_item = line_number
if self.info_window_data:
draw_info_window(self.win, self.info_window_data)
self.win.refresh()
class TabOutputDevices(GenericDevice):
def __init__(self, win):
GenericDevice.__init__(self, win, 'OutputDevices', _('Output Devices'))
class TabInputDevices(GenericDevice):
def __init__(self, win):
GenericDevice.__init__(self, win, 'InputDevices', _('Input Devices'))
class TabCards(object):
def __init__(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
self.name = _('Cards')
self.help = [_("Nothing here")]
self.conn = pa.dbus_connection()
self.core = pa.Core(self.conn)
self.card = co.Cards()
self.cards = []
self.selected_item = 0
self.max_item = 0
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, info):
if self.type_of_info == 'p':
self.info_window_data = self.card.properties(info)
elif self.type_of_info == 'i':
self.info_window_data = self.card.info(info)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.cards:
info = self.cards[self.selected_item]
self._update_info_window(info)
if char == ord('a'):
#testing code - Doesn't work
# self.cards[0].active_profile =
# self.profiles[self.selected_item].profile_name
pass
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.card.properties(info)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.card.info(info)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
self.selected_item < self.max_item):
self.selected_item += 1
def draw(self):
self.cards = self.card.get_cards()
line_number = 0
self.win.erase()
self.win.box()
for line_number, card in enumerate(self.cards):
(card_name,
profile_name,
active) = card
line = '[%s] %s' % (card_name, profile_name.replace('output', _('Out')).replace('input', _('In')).replace('+', ' '))
if active:
line = '%s [A]' % (line)
if self.selected_item == line_number:
self.win.addstr(line_number + 1, 1, line, curses.color_pair(1))
else:
self.win.addstr(line_number + 1, 1, line)
self.max_item = line_number
if self.info_window_data:
draw_info_window(self.win, self.info_window_data)
self.win.refresh()
|
thuck/ppam
|
ui/tabs.py
|
Python
|
gpl-2.0
| 13,310 | 0.003681 |
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath(os.path.join('..', '_extensions')))
# We want sphinx to document the ansible modules contained in this repository,
# not those that may happen to be installed in the version
# of Python used to run sphinx. When sphinx loads in order to document,
# the repository version needs to be the one that is loaded:
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
VERSION = 'devel'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TEST: 'sphinxcontrib.fulltoc'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013-2018 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
# OBSOLETE - removing this - dharmabumstead 2018-02-06
# exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
# |br| is useful for formatting fields inside of tables
# |_| is a nonbreaking space; similarly useful inside of tables
rst_epilog = """
.. |br| raw:: html
<br>
.. |_| unicode:: 0xA0
:trim:
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'sphinx_rtd_theme'
html_short_title = 'Ansible Documentation'
html_theme_options = {
'canonical_url': "https://docs.ansible.com/ansible/latest/",
'collapse_navigation': "True",
'vcs_pageview_mode': 'edit'
}
html_context = {
'display_github': 'True',
'github_user': 'ansible',
'github_repo': 'ansible',
'github_version': 'devel/docs/docsite/rst/',
'github_module_version': 'devel/lib/ansible/modules/',
'current_version': version,
'latest_version': '2.8',
# list specifically out of order to make latest work
'available_versions': ('latest', '2.7', '2.6', 'devel'),
'css_files': ('_static/ansible.css', # overrides to the standard theme
),
}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Configuration for sphinx-notfound-pages
# with no 'notfound_template' and no 'notfound_context' set,
# the extension builds 404.rst into a location-agnostic 404 page
#
# default is `en` - using this for the sub-site:
notfound_default_language = "ansible"
# default is `latest`:
# setting explicitly - docsite serves up /ansible/latest/404.html
# so keep this set to `latest` even on the `devel` branch
# then no maintenance is needed when we branch a new stable_x.x
notfound_default_version = "latest"
# makes default setting explicit:
notfound_no_urls_prefix = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2-2.7.13.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3-3.6.2.inv')),
'jinja2': ('http://jinja.pocoo.org/docs/', (None, '../jinja2-2.9.7.inv'))}
|
aperigault/ansible
|
docs/docsite/rst/conf.py
|
Python
|
gpl-3.0
| 8,952 | 0.00067 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
# type: ignore
"""Interface to MTG-LI L2 product NetCDF files
The reader is based on preliminary test data provided by EUMETSAT.
The data description is described in the
"LI L2 Product User Guide [LIL2PUG] Draft version" documentation.
"""
import logging
from datetime import datetime
import h5netcdf
import numpy as np
from pyresample import geometry
# FIXME: This is not xarray/dask compatible
# TODO: Once migrated to xarray/dask, remove ignored path in setup.cfg
from satpy.dataset import Dataset
from satpy.readers.file_handlers import BaseFileHandler
logger = logging.getLogger(__name__)
class LIFileHandler(BaseFileHandler):
"""MTG LI File Reader."""
def __init__(self, filename, filename_info, filetype_info):
super(LIFileHandler, self).__init__(filename, filename_info, filetype_info)
self.nc = h5netcdf.File(self.filename, 'r')
# Get grid dimensions from file
refdim = self.nc['grid_position'][:]
# Get number of lines and columns
self.nlines = int(refdim[2])
self.ncols = int(refdim[3])
self.cache = {}
logger.debug('Dimension : {}'.format(refdim))
logger.debug('Row/Cols: {} / {}'.format(self.nlines, self.ncols))
logger.debug('Reading: {}'.format(self.filename))
logger.debug('Start: {}'.format(self.start_time))
logger.debug('End: {}'.format(self.end_time))
@property
def start_time(self):
return datetime.strptime(self.nc.attrs['sensing_start'], '%Y%m%d%H%M%S')
@property
def end_time(self):
return datetime.strptime(self.nc.attrs['end_time'], '%Y%m%d%H%M%S')
def get_dataset(self, key, info=None, out=None):
"""Load a dataset
"""
if key in self.cache:
return self.cache[key]
# Type dictionary
typedict = {"af": "flash_accumulation",
"afa": "accumulated_flash_area",
"afr": "flash_radiance",
"lgr": "radiance",
"lef": "radiance",
"lfl": "radiance"}
# Get lightning data out of NetCDF container
logger.debug("Key: {}".format(key['name']))
# Create reference grid
grid = np.full((self.nlines, self.ncols), np.NaN)
# Get product values
values = self.nc[typedict[key['name']]]
rows = self.nc['row']
cols = self.nc['column']
logger.debug('[ Number of values ] : {}'.format((len(values))))
logger.debug('[Min/Max] : <{}> / <{}>'.format(np.min(values),
np.max(values)))
# Convert xy coordinates to flatten indices
ids = np.ravel_multi_index([rows, cols], grid.shape)
# Replace NaN values with data
np.put(grid, ids, values)
# Correct for bottom left origin in LI row/column indices.
rotgrid = np.flipud(grid)
# Rotate the grid by 90 degree clockwise
rotgrid = np.rot90(rotgrid, 3)
logger.warning("LI data has been rotated to fit to reference grid. \
Works only for test dataset")
# Mask invalid values
ds = np.ma.masked_where(np.isnan(rotgrid), rotgrid)
# Create dataset object
out.data[:] = np.ma.getdata(ds)
out.mask[:] = np.ma.getmask(ds)
out.info.update(key.to_dict())
return out
def get_area_def(self, key, info=None):
"""Create AreaDefinition for specified product.
Projection information are hard coded for 0 degree geos projection
Test dataset doesn't provide the values in the file container.
Only fill values are inserted.
"""
# TODO Get projection information from input file
a = 6378169.
h = 35785831.
b = 6356583.8
lon_0 = 0.
# area_extent = (-5432229.9317116784, -5429229.5285458621,
# 5429229.5285458621, 5432229.9317116784)
area_extent = (-5570248.4773392612, -5567248.074173444,
5567248.074173444, 5570248.4773392612)
proj_dict = {'a': float(a),
'b': float(b),
'lon_0': float(lon_0),
'h': float(h),
'proj': 'geos',
'units': 'm'}
area = geometry.AreaDefinition(
'LI_area_name',
"LI area",
'geosli',
proj_dict,
self.ncols,
self.nlines,
area_extent)
self.area = area
logger.debug("Dataset area definition: \n {}".format(area))
return area
|
pytroll/satpy
|
satpy/readers/li_l2.py
|
Python
|
gpl-3.0
| 5,368 | 0.000373 |
#!/usr/bin/env python3
import unittest
import tempfile
from lofar.common.dbcredentials import *
def setUpModule():
pass
def tearDownModule():
pass
class TestCredentials(unittest.TestCase):
def test_default_values(self):
c = Credentials()
self.assertEqual(c.type, "postgres")
self.assertEqual(c.host, "localhost")
self.assertEqual(c.port, 0)
#self.assertEqual(c.user, "")
self.assertEqual(c.password, "")
self.assertEqual(c.database, "")
def test_pg_connect_options(self):
c = Credentials()
self.assertEqual(
c.pg_connect_options(),
{ "host": "localhost",
"port": -1,
"user": c.user,
"passwd": "",
"dbname": "",
})
class TestDBCredentials(unittest.TestCase):
def test_set_get(self):
dbc = DBCredentials(filepatterns=[])
c_in = Credentials()
c_in.host = "example.com"
c_in.port = 1234
c_in.user = "root"
c_in.password = "secret"
c_in.database = "mydb"
dbc.set("DATABASE", c_in)
c_out = dbc.get("DATABASE")
self.assertEqual(str(c_out), str(c_in))
def test_get_non_existing(self):
dbc = DBCredentials(filepatterns=[])
with self.assertRaises(DBCredentials.NoSectionError):
dbc.get("UNKNOWN")
def test_list(self):
dbc = DBCredentials(filepatterns=[])
c = Credentials()
c.host = "foo"
dbc.set("FOO", c)
c = Credentials()
c.host = "bar"
dbc.set("BAR", c)
self.assertEqual(sorted(dbc.list()), ["BAR", "FOO"])
def test_config(self):
f = tempfile.NamedTemporaryFile()
f.write(b"""
[database:DATABASE]
type = postgres
host = example.com
port = 1234
user = root
password = secret
database = mydb
""")
f.flush() # don't close since that will delete the TemporaryFile
# test if DATABASE is there
dbc = DBCredentials(filepatterns=[f.name])
self.assertEqual(dbc.list(), ["DATABASE"])
# test if credentials match with what we've written
c_in = Credentials()
c_in.host = "example.com"
c_in.port = 1234
c_in.user = "root"
c_in.password = "secret"
c_in.database = "mydb"
c_out = dbc.get("DATABASE")
self.assertEqual(str(c_out), str(c_in))
def test_freeform_config_option(self):
f = tempfile.NamedTemporaryFile()
f.write(b"""
[database:DATABASE]
foo = bar
test = word word
""")
f.flush() # don't close since that will delete the TemporaryFile
# extract our config
dbc = DBCredentials(filepatterns=[f.name])
c_out = dbc.get("DATABASE")
# test if the free-form config options got through
self.assertEqual(c_out.config["foo"], "bar")
self.assertEqual(c_out.config["test"], "word word")
def main(argv):
unittest.main()
if __name__ == "__main__":
# run all tests
import sys
main(sys.argv[1:])
|
kernsuite-debian/lofar
|
LCS/PyCommon/test/t_dbcredentials.py
|
Python
|
gpl-3.0
| 2,789 | 0.008964 |
exec(open("tmp<caret>.txt").read())
|
asedunov/intellij-community
|
python/testData/refactoring/introduceConstant/py1840.py
|
Python
|
apache-2.0
| 35 | 0.028571 |
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The maximum value a signed INT type may have
DB_MAX_INT = 0x7FFFFFFF
# The cinder services binaries and topics' names
API_BINARY = "cinder-api"
SCHEDULER_BINARY = "cinder-scheduler"
VOLUME_BINARY = "cinder-volume"
BACKUP_BINARY = "cinder-backup"
SCHEDULER_TOPIC = SCHEDULER_BINARY
VOLUME_TOPIC = VOLUME_BINARY
BACKUP_TOPIC = BACKUP_BINARY
LOG_BINARIES = (SCHEDULER_BINARY, VOLUME_BINARY, BACKUP_BINARY, API_BINARY)
# The encryption key ID used by the legacy fixed-key ConfKeyMgr
FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000'
|
openstack/cinder
|
cinder/common/constants.py
|
Python
|
apache-2.0
| 1,167 | 0 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
# Load extensions
(
sh % "cat"
<< r"""
[extensions]
arcconfig=$TESTDIR/../edenscm/hgext/extlib/phabricator/arcconfig.py
arcdiff=
"""
>> "$HGRCPATH"
)
# Diff with no revision
sh % "hg init repo"
sh % "cd repo"
sh % "touch foo"
sh % "hg add foo"
sh % "hg ci -qm 'No rev'"
sh % "hg diff --since-last-submit" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
# Fake a diff
sh % "echo bleet" > "foo"
sh % "hg ci -qm 'Differential Revision: https://phabricator.fb.com/D1'"
sh % "hg diff --since-last-submit" == r"""
abort: no .arcconfig found
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: no .arcconfig found
[255]"""
# Prep configuration
sh % "echo '{}'" > ".arcrc"
sh % 'echo \'{"config" : {"default" : "https://a.com/api"}, "hosts" : {"https://a.com/api/" : { "user" : "testuser", "oauth" : "garbage_cert"}}}\'' > ".arcconfig"
# Now progressively test the response handling for variations of missing data
sh % "cat" << r"""
[{}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"differential_diffs": {"count": 3},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
# This is the case when the diff is up to date with the current commit;
# there is no diff since what was landed.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"2e6531b7dada2a3e5638e136de05f51e94a427f4\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "2e6531b7dada2a3e5638e136de05f51e94a427f4 Differential Revision: https://phabricator.fb.com/D1"
# This is the case when the diff points at our parent commit, we expect to
# see the bleet text show up. There's a fake hash that I've injected into
# the commit list returned from our mocked phabricator; it is present to
# assert that we order the commits consistently based on the time field.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"88dd5a13bf28b99853a24bddfc93d4c44e07c6bd\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit-2o" == r"""
Phabricator rev: 88dd5a13bf28b99853a24bddfc93d4c44e07c6bd
Local rev: 2e6531b7dada2a3e5638e136de05f51e94a427f4 (.)
Changed: foo
| ...
| +bleet"""
# Make a new commit on top, and then use -r to look at the previous commit
sh % "echo other" > "foo"
sh % "hg commit -m 'Other commmit'"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates -r 2e6531b" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(2e6531b)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
|
facebookexperimental/eden
|
eden/scm/tests/test-fb-hgext-diff-since-last-submit-t.py
|
Python
|
gpl-2.0
| 6,148 | 0.00244 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
from __future__ import absolute_import, division
import time
import struct
from zope.interface import implementer
from twisted.internet import protocol, interfaces
from twisted.python.compat import _PY3
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
pass
@implementer(interfaces.IProducer)
class Chargen(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
"""Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(self):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
__all__ = ["Echo", "Discard", "Chargen", "QOTD", "Who", "Daytime", "Time"]
if _PY3:
__all3__ = ["Echo"]
for name in __all__[:]:
if name not in __all3__:
__all__.remove(name)
del globals()[name]
del name, __all3__
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/protocols/wire.py
|
Python
|
gpl-3.0
| 2,659 | 0.003009 |
# Time: O(n) ~ O(n^2)
# Space: O(1)
from random import randint
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer}
def findKthLargest(self, nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = self.PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def PartitionAroundPivot(self, left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
|
kamyu104/LeetCode
|
Python/kth-largest-element-in-an-array.py
|
Python
|
mit
| 1,142 | 0.002627 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, traceback, Ice, threading, time, os
import IceStorm
# Ctrl+c handling
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Qt interface
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSvg import *
# Check that RoboComp has been correctly detected
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except:
pass
if len(ROBOCOMP)<1:
print 'ROBOCOMP environment variable not set! Exiting.'
sys.exit()
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRPublish.ice")
import RoboCompASRPublish
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRCommand.ice")
import RoboCompASRCommand
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRComprehension.ice")
import RoboCompASRComprehension
class MainClass(object):
def __init__(self, commandTopic):
print 'Esta clase podria ser la clase principal del programa'
self.commandTopic = commandTopic
def newText(self, text, current=None):
print 'Nos ha llegado', text
command = RoboCompASRCommand.Command()
partes = text.split()
if len(partes) > 0:
command.action = partes[0]
if len(partes) > 1:
command.complements = partes[1:]
print 'Action', command.action, '(', command.complements,')'
else:
print 'Action', command.action
self.commandTopic.newCommand(command)
else:
print 'Comando vacio?'
def mode(self, text):
print 'Nos llega por la interfaz ASRComprehension', text
class ASRPublishTopicI (RoboCompASRPublish.ASRPublish):
def __init__(self, _handler):
self.handler = _handler
def newText(self, text, current=None):
self.handler.newText(text)
class ASRComprehensionI (RoboCompASRComprehension.ASRComprehension):
def __init__(self, _handler):
self.handler = _handler
def mode(self, text, current=None):
self.handler.mode(text)
class Server (Ice.Application):
def run (self, argv):
status = 0
try:
# Proxy to publish ASRCommand
proxy = self.communicator().getProperties().getProperty("IceStormProxy")
obj = self.communicator().stringToProxy(proxy)
topicManager = IceStorm.TopicManagerPrx.checkedCast(obj)
try:
topic = False
topic = topicManager.retrieve("ASRCommand")
except:
pass
while not topic:
try:
topic = topicManager.retrieve("ASRCommand")
except IceStorm.NoSuchTopic:
try:
topic = topicManager.create("ASRCommand")
except:
print 'Another client created the ASRCommand topic... ok'
pub = topic.getPublisher().ice_oneway()
commandTopic = RoboCompASRCommand.ASRCommandPrx.uncheckedCast(pub)
mainObject = MainClass(commandTopic)
# Subscribe to ASRPublishTopic
proxy = self.communicator().getProperties().getProperty( "IceStormProxy")
topicManager = IceStorm.TopicManagerPrx.checkedCast(self.communicator().stringToProxy(proxy))
adapterT = self.communicator().createObjectAdapter("ASRPublishTopic")
asrTopic = ASRPublishTopicI(mainObject)
proxyT = adapterT.addWithUUID(asrTopic).ice_oneway()
ASRPublishTopic_subscription = False
while not ASRPublishTopic_subscription:
try:
topic = topicManager.retrieve("ASRPublishTopic")
qos = {}
topic.subscribeAndGetPublisher(qos, proxyT)
adapterT.activate()
ASRPublishTopic_subscription = True
except IceStorm.NoSuchTopic:
print "Error! No topic found! Sleeping for a while..."
time.sleep(1)
print 'ASRPublishTopic subscription ok'
# Implement ASRComprehension
asrcomprehensionI = ASRComprehensionI(mainObject)
adapterASRComprehension = self.communicator().createObjectAdapter('ASRComprehension')
adapterASRComprehension.add(asrcomprehensionI, self.communicator().stringToIdentity('asrcomprehension'))
adapterASRComprehension.activate()
self.communicator().waitForShutdown()
except:
traceback.print_exc()
status = 1
if self.communicator():
try:
self.communicator().destroy()
except:
traceback.print_exc()
status = 1
Server( ).main(sys.argv)
|
robocomp/robocomp-ursus-rockin
|
components/comprehension/comprehension.py
|
Python
|
gpl-3.0
| 4,066 | 0.029759 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.client import Client
class CinderHealth(object):
def __init__(self, creds):
self.cinderclient = Client(**creds)
def cinder_list(self):
try:
cinder_list = self.cinderclient.volumes.list()
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_list)
def cinder_volume_create(self, volume_name, volume_size):
try:
cinder_ret = self.cinderclient.volumes.create(volume_size,
name=volume_name)
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_ret)
def cinder_volume_delete(self, volume_id):
try:
cinder_ret = self.cinderclient.volumes.delete(volume_id)
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_ret)
|
anand1712/cloudpulse
|
cloudpulse/openstack/api/cinder_api.py
|
Python
|
apache-2.0
| 1,489 | 0 |
'''
Created on 30-07-2014
@author: mateusz
'''
from threading import Thread
import gumtreeofferparser as Parser
from injectdependency import Inject, InjectDependency
@InjectDependency('urlfetcher')
class OfferFetcher(Thread):
urlfetcher = Inject
def __init__(self, inQueue, outQueue):
Thread.__init__(self, name="OfferFetcher")
self.inQueue = inQueue
self.outQueue = outQueue
def run(self):
while (True): # this is ok for daemon thread
url = self.inQueue.get()
offer = self.getOffer(url)
self.outQueue.put(offer)
self.inQueue.task_done()
def getOffer(self, url):
html = self.urlfetcher.fetchDocument(url)
offer = Parser.extractOffer(html)
offer["url"] = url
return offer
|
mateuszmidor/GumtreeOnMap
|
src/offerfetcher.py
|
Python
|
gpl-2.0
| 848 | 0.008255 |
from syslog import syslog
module_name = "Syslog"
config = {
"prefix": "Default Prefix"
}
def handle_alert(message):
syslog("{} - {}".format(config["prefix"], message))
|
camerongray1515/Prophasis
|
application/prophasis_common/prophasis_common/alert_modules/syslog.py
|
Python
|
bsd-2-clause
| 179 | 0.005587 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 20:42:20 2016
@author: haell
"""
def dicho(f, a, b, epsilon):
assert f(a) * f(b) <= 0 and epsilon > 0
g, d = a, b
fg, fd = f(g), f(d)
n = 0
while d - g > 2 * epsilon:
n += 1
m = (g + d) / 2.
fm = f(m)
if fg * fm <= 0:
d, fd = m, fm
else:
g, fg = m, fm
print(d, g, fd, fg)
return (g + d) / 2., n
print(dicho(lambda x : x*x*10**(-8) - 4*x / 5 + 10**(-8), 7*10**7, 9*10**7, 10**-8))
|
Haellsigh/travaux-pratiques
|
TP/TP8-Exo2.py
|
Python
|
mit
| 532 | 0.011278 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pyarrow as pa
import pyarrow.types as types
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_is_decimal():
assert types.is_decimal(pa.decimal(19, 4))
assert not types.is_decimal(pa.int32())
def test_is_list():
assert types.is_list(pa.list_(pa.int32()))
assert not types.is_list(pa.int32())
def test_is_dictionary():
assert types.is_dictionary(
pa.dictionary(pa.int32(),
pa.array(['a', 'b', 'c'])))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert not types.is_nested(pa.int32())
# TODO(wesm): Union types not yet implemented in pyarrow
# def test_is_union():
# assert types.is_union(pa.union([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]))
# assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
for case in date_types + time_types + timestamp_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_temporal(pa.int32())
def test_timestamp_type():
# See ARROW-1683
assert isinstance(pa.timestamp('ns'), pa.TimestampType)
|
NonVolatileComputing/arrow
|
python/pyarrow/tests/test_types.py
|
Python
|
apache-2.0
| 4,240 | 0 |
import unittest
from word_treasure import *
class WordTreasureTestCase(unittest.TestCase):
"""Test for functions in word treasure.
The major aim is to check if there is any
unexpected crash.
Doesnot check the validity of the response"""
def test_definition_call(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_definitions(word1), True)
self.assertEqual(display_definitions(word2), None)
def test_random_words(self):
limit = 10
self.assertEqual(display_random_words(limit), True)
def test_display_examples(self):
limit = 10
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_examples(word1, limit), True)
self.assertEqual(display_examples(word2, limit), None)
def test_display_top_examples(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_top_examples(word1), True)
self.assertEqual(display_top_examples(word2), None)
def test_display_related_words(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_related_words(word1), True)
self.assertEqual(display_related_words(word2), None)
def test_display_compact(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_compact(word1), True)
self.assertEqual(display_compact(word2), None)
def test_help_display(self):
self.assertEqual(display_help(), True)
if __name__=='__main__':
unittest.main()
|
harsimrans/word-treasure
|
word_treasure/test_wordt.py
|
Python
|
gpl-3.0
| 1,648 | 0.005461 |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for the Mellanox plugin
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'brocadenetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vlan', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'brocadeports',
sa.Column('port_id', sa.String(length=36), nullable=False,
server_default=''),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('physical_interface', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.String(length=36), nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['brocadenetworks.id'], ),
sa.PrimaryKeyConstraint('port_id'))
op.create_table(
'ml2_brocadenetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vlan', sa.String(length=10), nullable=True),
sa.Column('segment_id', sa.String(length=36), nullable=True),
sa.Column('network_type', sa.String(length=10), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'ml2_brocadeports',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('physical_interface', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.String(length=36), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['network_id'], ['ml2_brocadenetworks.id']))
|
wolverineav/neutron
|
neutron/db/migration/alembic_migrations/brocade_init_ops.py
|
Python
|
apache-2.0
| 2,634 | 0 |
# -*- encoding: utf8 -*-
# A daemon to keep SSH forwarding connected
from __future__ import print_function, absolute_import
import os
import sys
import time
import socket
import logging
class Daemon(object):
def __init__(self):
self.heartbeat = 50
def run(self):
logging.basicConfig(filename='daemon.log')
logging.error('daemon started')
self.daemonize()
while True:
if not self.check_connection():
self.reconnect()
logging.warn('reconnecting')
time.sleep(self.heartbeat)
def check_connection(self):
c = socket.socket()
try:
c.connect(('localhost', 3366))
c.close()
return True
except socket.error:
return False
def daemonize(self):
pid = os.fork()
if pid:
os.waitpid(pid, os.WNOHANG)
sys.exit(0)
return
def reconnect(self):
pid = os.fork()
if pid == 0: # child
err = os.execlp('/usr/bin/ssh', 'ssh', '-i',
'/home/xu/.ssh/id_rsa', '-L',
'3366:127.0.0.1:3306', '-p', '42022', 'xu@abc.com')
if err:
logging.error("error to execlp")
sys.exit(1)
elif pid > 0:
os.waitpid(pid, 0)
else:
logging.error('error to fork')
sys.exit(2)
if __name__ == '__main__':
Daemon().run()
|
dlutxx/memo
|
python/daemon.py
|
Python
|
mit
| 1,497 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Search through the subfolders of the current folder. For each subfolder found,
chdir() to it, then run all executable scripts ending in .SH in that folder.
Does not exhaustively search for subfolders of subfolders, or subfolders of
subfolders of subfolders, etc.; it only does exactly what was described in that
first sentence, without recursion.
Note that this calls scripts in an insecure way:
subprocess.call(script_name, shell=True)
so it should only be called on scripts that are trusted completely.
This script is copyright 2017-20 by Patrick Mooney. It is licensed under the GNU
GPL, either version 3 or (at your option) any later version. See the file
LICENSE.md for details.
"""
import glob, os, subprocess
from pprint import pprint
the_dirs = [ d for d in glob.glob("*") if os.path.isdir(d) ]
for which_dir in the_dirs:
olddir = os.getcwd()
try:
os.chdir(which_dir)
print("changed directory to %s" % os.getcwd())
exec_scripts = [ which_script for which_script in list(set(glob.glob('*SH') + glob.glob('*sh'))) if os.access(which_script, os.X_OK) ]
pprint("exec_scripts are: %s" % exec_scripts)
for which_script in exec_scripts:
print("About to call script: %s" % which_script)
subprocess.call('./' + which_script, shell=True)
subprocess.call('chmod a-x %s' % which_script)
except BaseException as e:
print('Something went wrong; the system said %s' % e)
finally:
os.chdir(olddir)
|
patrick-brian-mooney/python-personal-library
|
run_subfolder_scripts.py
|
Python
|
gpl-3.0
| 1,560 | 0.005128 |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 4/26/14
###Function: Incidence per 100,000 vs. week number for flu weeks (wks 40-20). Incidence is per 100,000 for the US population in the second calendar year of the flu season.
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv
###Command Line: python F2_incid_time.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# plot values
for s in ps:
plt.plot(xrange(fw), d_incid53ls[s][:fw], marker = 'o', color = colvec[s-2], label = sl[s-2], linewidth = 2)
plt.xlim([0, fw-1])
plt.xticks(range(fw)[::5], wklab[:fw:5])
plt.ylim([0, 60])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('Incidence per 100,000', fontsize=fs)
plt.legend(loc='upper left')
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/F2/incid_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# 7/28/14: does 'week' variable in SDI refer to week before or after referenced date? Thanksgiving week does not correpond with correct week number for dip in incidence plot
print [d_incid[wk] for wk in sorted(d_wk) if d_wk[wk]==2]
print [wk for wk in sorted(d_wk) if d_wk[wk]==2]
|
eclee25/flu-SDI-exploratory-age
|
scripts/create_fluseverity_figs/F2_incid_time.py
|
Python
|
mit
| 2,579 | 0.01551 |
from functools import wraps
from threading import RLock
import traceback
def Synchronized(lock=None):
"""
:param lock: if None - global lock will used, unique for each function
:return:
"""
if not lock:
lock=RLock()
def decorator(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
lock.acquire()
try:
return fn(*args, **kwargs)
finally:
lock.release()
return wrapped
return decorator
|
shaddyx/simpleDecorators
|
simpledecorators/Synchronized.py
|
Python
|
mit
| 511 | 0.005871 |
import json
from django import template
register = template.Library()
@register.filter
def jsonify(value):
return json.dumps(value)
|
Code4SA/municipal-data
|
municipal_finance/templatetags/jsonify.py
|
Python
|
mit
| 140 | 0 |
from PIL import Image
from math import ceil, floor
def load_img(src):
return Image.open(src)
def create_master(width, height):
return Image.new("RGBA", (width, height))
def closest_power_two(num):
result = 2
while result < num:
result = result * 2
return result
def create_matrix(cols, rows, images):
x, y = images[0].size # We assume that all images are same size
width = closest_power_two(x)
height = closest_power_two(y)
print("Width: {0} Height: {1}".format(width, height))
offset_x = int((width - x) / 2)
offset_y = int((height - y) / 2)
master = create_master(width * cols, height * rows)
for index, img in enumerate(images):
row = floor(index / cols)
col = index % cols
master.paste(img, (width * col + offset_x, height * row - offset_y))
return master
def hero_sprites(name, action, frames):
from functools import reduce
def generator(name, action, position, frames):
if frames > 1:
return [load_img("img/png/1x/{0}/{1}{2} ({3}).png".format(name, action, position, frame)) for frame in range(1, frames + 1)]
else:
return [load_img("img/png/1x/{0}/{1}{2}.png".format(name, action, position))]
imgs = list(reduce(lambda a, b: a + b, [generator(name, action, pos, frames) for pos in ["Back", "Front", "Left", "Right"]], []))
return imgs
if __name__ == "__main__":
matrix = create_matrix(4, 4, hero_sprites("hero1", "Dead", 3))
matrix.save("img/hero1_dead.png", "PNG")
|
zaibacu/DamnedQuest
|
sprite_creator.py
|
Python
|
mit
| 1,532 | 0.00718 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`airflow.providers.amazon.aws.transfers.redshift_to_s3`.
"""
import warnings
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.redshift_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
class RedshiftToS3Transfer(RedshiftToS3Operator):
"""
This class is deprecated.
Please use: :class:`airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator`.
"""
def __init__(self, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(**kwargs)
|
apache/incubator-airflow
|
airflow/operators/redshift_to_s3_operator.py
|
Python
|
apache-2.0
| 1,680 | 0.002381 |
from django.db import models
from .bleachfield import BleachField
class BleachCharField(BleachField, models.CharField):
def pre_save(self, model_instance, add):
new_value = getattr(model_instance, self.attname)
clean_value = self.clean_text(new_value)
setattr(model_instance, self.attname, clean_value)
return super(BleachCharField, self).pre_save(model_instance, add)
|
BetterWorks/django-bleachfields
|
bleachfields/bleachchar.py
|
Python
|
mit
| 409 | 0 |
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
class Controller(object):
def __init__(self, db=None):
self.db_api = db or glance.db.get_api()
self.db_api.configure_db()
@utils.mutating
def update(self, req, image_id, tag_value):
context = req.context
if tag_value not in self.db_api.image_tag_get_all(context, image_id):
self.db_api.image_tag_create(context, image_id, tag_value)
@utils.mutating
def delete(self, req, image_id, tag_value):
try:
self.db_api.image_tag_delete(req.context, image_id, tag_value)
except exception.NotFound:
raise webob.exc.HTTPNotFound()
class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result):
response.status_int = 204
def delete(self, response, result):
response.status_int = 204
def create_resource():
"""Images resource factory method"""
serializer = ResponseSerializer()
controller = Controller()
return wsgi.Resource(controller, serializer=serializer)
|
tylertian/Openstack
|
openstack F/glance/glance/api/v2/image_tags.py
|
Python
|
apache-2.0
| 1,791 | 0 |
# Copyright 2014-2017 by Akira Yoshiyama <akirayoshiyama@gmail.com>.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource class and its manager for servers in Compute API v2
"""
import time
from osclient2 import base
from osclient2 import exception
from osclient2 import mapper
from osclient2 import utils
from . import volume_attachment
from . import interface_attachment
from osclient2.neutron.v2.network import Resource as Network
from osclient2.neutron.v2.port import Resource as Port
from osclient2.cinder.v2.volume import Resource as Volume
from osclient2.cinder.v2.snapshot import Resource as Snapshot
from osclient2.glance.v2.image import Resource as Image
ATTRIBUTE_MAPPING = [
('id', 'id', mapper.Noop),
('name', 'name', mapper.Noop),
('access_ipv4', 'accessIPv4', mapper.Noop),
('access_ipv6', 'accessIPv6', mapper.Noop),
('addresses', 'addresses', mapper.Noop),
('host', 'OS-EXT-SRV-ATTR:host', mapper.Noop),
('networks', 'networks', mapper.Noop),
('disks', 'block_device_mapping_v2', mapper.Noop),
('user_data', 'user_data', mapper.Base64),
('progress', 'progress', mapper.Noop),
('status', 'status', mapper.Noop),
('task_state', 'OS-EXT-STS:task_state', mapper.Noop),
('created_at', 'created', mapper.DateTime),
('updated_at', 'updated', mapper.DateTime),
('flavor', 'flavorRef', mapper.Resource('nova.flavor')),
('image', 'imageRef', mapper.Resource('image')),
('project', 'tenant_id', mapper.Resource('project')),
('user', 'user_id', mapper.Resource('user')),
('key_pair', 'key_name', mapper.Resource('nova.key_pair')),
('error_reason', 'fault', mapper.Noop),
]
class Resource(base.Resource):
"""Resource class for servers in Compute API v2"""
_sub_manager_list = {
'volume': volume_attachment.Manager,
'interface': interface_attachment.Manager,
}
def wait_for_finished(self, count=10, interval=10):
"""
Wait for task finished
@keyword count: Maximum polling time
@type count: int
@keyword interval: Polling interval in seconds
@type interval: int
@rtype: None
"""
for i in range(count):
time.sleep(interval)
try:
self.reload()
except exception.NotFound:
return
if not self.task_state:
return
def start(self):
"""
Start a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("os-start"))
def stop(self):
"""
Stop a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("os-stop"))
def reboot(self, force=False):
"""
Reboot a server
@keyword force: Whether reboot type is hard or soft. force=True means
hard reboot.
@type type: bool
@rtype: None
"""
if force:
type = "HARD"
else:
type = "SOFT"
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("reboot", type=type))
def pause(self):
"""
Pause a server (save to RAM if server is a VM)
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("pause"))
def unpause(self):
"""
Unpause a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unpause"))
def suspend(self):
"""
Suspend a server (save to disk if server is a VM)
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("suspend"))
def resume(self):
"""
Resume a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("resume"))
def reset_network(self):
"""
Reset networking of a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("resetNetwork"))
def inject_network_info(self):
"""
Inject network information to a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("injectNetworkInfo"))
def lock(self):
"""
Lock a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("lock"))
def unlock(self):
"""
Unlock a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unlock"))
def force_delete(self):
"""
Force to delete a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("forceDelete"))
def restore(self):
"""
Restore a defered-deleted server if available
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("restore"))
def rescue(self, password=None):
"""
Create rescue environment for the server
@keyword password: password of the rescue OS
@type password: str
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("rescue", dminPass=password))
def unrescue(self):
"""
Terminate the rescue environment
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unrescue"))
def shelve(self):
"""
Shelve a running server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("shelve"))
def unshelve(self):
"""
Restore a shelved server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unshelve"))
def delete_shelve(self):
"""
Delete a shelved server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("shelveOffload"))
def create_image(self, name=None, metadata=None):
"""
Create server image
@keyword name: Image name
@type name: str
@keyword metadata: Metadata
@type metadata: dict
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"createImage",
name=name,
metadata=metadata))
def backup(self, name=None, backup_type=None, rotation=None):
"""
Create server backup
@keyword name: name of the backup data
@type name: str
@keyword backup_type: 'daily' or 'weekly'
@type backup_type: str
@keyword rotation: number of backups to maintain
@type rotation: int
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"createBackup",
name=name,
backup_type=backup_type,
rotation=rotation))
def live_migration(self, host=None, disk_over_commit=False):
"""
Move a server to another host without rebooting
@keyword host: Destination host
@type host: str
@keyword disk_over_commit: do disk over commit or not
@type disk_over_commit: bool
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-migrateLive",
host=host,
block_migration=False,
disk_over_commit=disk_over_commit))
def block_migration(self, host=None, disk_over_commit=False):
"""
Move a server to another host without rebooting, with disk copy
@keyword host: Destination host
@type host: str
@keyword disk_over_commit: do disk over commit or not
@type disk_over_commit: bool
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-migrateLive",
host=host,
block_migration=True,
disk_over_commit=disk_over_commit))
def evacuate(self, host=None, password=None, shared=True):
"""
Move a server to another host without rebooting, with disk copy
@keyword host: Destination host
@type host: str
@keyword password: new administrator password
@type password: str
@keyword shared: whether the vm is on the shared storage
@type shared: bool
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"evacuate",
host=host,
adminPass=password,
onSharedStorage=shared))
def reset_status(self, status=None):
"""
Move a server to another host
@keyword status: new status of the server ('active', 'pause', ...)
@type status: str
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-resetState", state=status))
def get_vnc_console(self, type='novnc'):
"""
Get VNC console
@keyword type: 'novnc' or 'xvpvnc' (required)
@type type: str
@return: Console information
@rtype: dict
"""
ret = self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-getVNCConsole",
type=type))
return ret.get('console')
def get_console_log(self, lines=50):
"""
Get console output
@keyword lines: number of lines
@type lines: int
@return: Console logs
@rtype: dict
"""
ret = self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-getConsoleOutput",
length=lines))
return ret.get('output')
def get_diagnostics(self):
"""
Get diagnostics
@return: Diagnostics
@rtype: dict
"""
return self._http.get(self._url_resource_path, self._id, 'diagnostics')
def resize(self, flavor=None, disk_config='AUTO'):
"""
Get console output
@keyword flavor: Flavor (required)
@type flavor: osclient2.nova.v2.flavor.Resource
@keyword disk_config: disk configuration ('AUTO')
@type disk_config: str
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data={"resize": {
"flavorRef": flavor.id,
"OS-DCF:diskConfig": disk_config}})
def confirm_resize(self):
"""
Confirm resizing of a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("confirmResize"))
def revert_resize(self):
"""
Revert resizing of a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("revertResize"))
def rebuild(self, image=None, disk_config='AUTO', password=None,
ipv4=None, ipv6=None, personality=None):
"""
Rebuild a server
@keyword image: Image
@type image: osclient2.image.Resource
@keyword disk_config: disk configuration ('AUTO')
@type disk_config: str
@keyword password: admin password
@type password: str
@keyword ipv4: IPv4 address
@type ipv4: str
@keyword ipv6: IPv6 address
@type ipv6: str
@keyword persoality: personality data
@type persoality: [str]
@rtype: None
"""
json_body = utils.get_json_body(
"rebuild",
imageRef=image.id,
adminPass=password,
accessIPv4=ipv4,
accessIPv6=ipv6,
personality=personality)
if disk_config is not None:
json_body['rebuild']['OS-DCF:diskConfig'] = disk_config
self._http.post(self._url_resource_path, self._id, 'action',
data=json_body)
def get_actions(self):
"""
Get instance actions
@rtype: dict
"""
ret = self._http.get(self._url_resource_path, self._id,
'os-instance-actions')
return ret.get("instanceActions")
def get_password(self):
"""
Get instance password
@rtype: dict
"""
ret = self._http.get(self._url_resource_path, self._id,
'os-server-password')
return ret.get("password")
def clear_password(self):
"""
Clear instance password
@rtype: None
"""
self._http.delete(self._url_resource_path, self._id,
'os-server-password')
def get_security_groups(self):
"""
Get security group list for a server
@return: Security group list
@rtype: [str]
"""
ret = self._http.get(self._url_resource_path, self._id,
'os-security-groups')
return [self._client.security_group_nova.
get_empty(x.get('id'))
for x in ret.get('security_groups', [])]
def metadata_get(self):
"""
Get instance metadata
@return: Metadata
@rtype: dict
"""
ret = self._http.get(self._url_resource_path, self._id, 'metadata')
return ret.get('metadata')
def metadata_replace(self, metadata=None):
"""
Replace instance metadata
@keyword metadata: metadata with key=value
@type metadata: dict
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'metadata',
data={'metadata': metadata})
def metadata_update(self, metadata=None):
"""
Update instance metadata
@keyword metadata: metadata with key=value
@type metadata: dict
@rtype: None
"""
for key, value in metadata.items():
self._http.put(self._url_resource_path, self._id, 'metadata', key,
data={'meta': {key: value}})
def metadata_delete(self, key):
"""
Delete instance metadata
@param key: key of the metadata
@type key: str
@rtype: None
"""
self._http.delete(self._url_resource_path, self._id, 'metadata', key)
class Manager(base.Manager):
"""Manager class for servers in Compute API v2"""
resource_class = Resource
service_type = 'compute'
_attr_mapping = ATTRIBUTE_MAPPING
_hidden_methods = ["update"]
_json_resource_key = 'server'
_json_resources_key = 'servers'
_url_resource_path = '/servers'
_url_resource_list_path = '/servers/detail'
def create(self, name=None, image=None, flavor=None,
personality=None, disks=None, max_count=None,
min_count=None, networks=None, security_groups=None,
config_drive=False, key_pair=None, user_data=None):
"""Create a new server
@keyword name: name of the new server (required)
@type name: str
@keyword flavor: Flavor object to use (required)
@type flavor: osclient2.nova.v2.flavor.Resource
@keyword image: Image object to use for ephemeral disk
@type image: osclient2.image.Resource
@keyword key_pair: KeyPair object to use
@type key_pair: osclient2.nova.v2.key_pair.Resource
@keyword networks: list of networks or ones with tag and/or fixed IP
@type networks: [osclient2.network.Resource]
@keyword security_groups: list of SecurityGroup object(s) to use
@type security_groups: [osclient2.nova.v2.security_group.Resource]
@keyword disks: block device mapping
@type disks: [dict]
@keyword personality: file path and the content to embed
@type personality: dict
@keyword max_count: the maximum number of server(s) to create
@type max_count: int
@keyword min_count: the minimun number of server(s) to create
@type min_count: int
@keyword config_drive: config drive exists or not (bool)
@type config_drive: bool
@keyword user_data: content of a batch file (str)
@type user_data: str
@return: Created server
@rtype: osclient2.nova.v2.server.Resource
"""
networks = networks or []
disks = disks or []
_networks = []
for net in networks:
_network = {}
if isinstance(net, dict):
if 'tag' in net:
_network['tag'] = net['tag']
if 'fixed_ip' in net:
_network['fixed_ip'] = net['fixed_ip']
net = net.get('network', net.get('port'))
if isinstance(net, Network):
_network['uuid'] = net.get_id()
if isinstance(net, Port):
_network['port'] = net.get_id()
_networks.append(_network)
_disks = []
boot_index = 0
for disk in disks:
_disk = {}
if 'tag' in disk:
_disk['tag'] = disk['tag']
if 'size' in disk:
_disk['volume_size'] = disk['size']
if 'source' in disk:
_disk['uuid'] = disk['source'].get_id()
if isinstance(disk['source'], Volume):
_disk['source_type'] = 'volume'
_disk['destination_type'] = 'volume'
elif isinstance(disk['source'], Snapshot):
_disk['source_type'] = 'snapshot'
_disk['destination_type'] = 'volume'
elif isinstance(disk['source'], Image):
_disk['source_type'] = 'image'
_disk['destination_type'] = \
disk.get('destination_type', 'volume')
else:
_disk['source_type'] = 'blank'
_disk['destination_type'] = \
disk.get('destination_type', 'volume')
if 'delete_on_termination' in disk:
_disk['delete_on_termination'] = \
disk['delete_on_termination']
if 'guest_format' in disk:
_disk['guest_format'] = disk['guest_format']
_disk['boot_index'] = boot_index
_disks.append(_disk)
boot_index += 1
return super(Manager, self).create(name=name, image=image,
flavor=flavor,
personality=personality,
disks=_disks,
max_count=max_count,
min_count=min_count,
networks=_networks,
security_groups=security_groups,
config_drive=config_drive,
key_pair=key_pair,
user_data=user_data)
|
yosshy/osclient2
|
osclient2/nova/v2/server.py
|
Python
|
apache-2.0
| 21,602 | 0 |
from django.db import models
from jsonfield import JSONField
class Sensor(models.Model):
name = models.CharField(max_length=25)
activated = models.BooleanField(default=False)
type = models.CharField(max_length=10)
meta = JSONField()
|
skitoo/chac
|
chac/sensors/models.py
|
Python
|
gpl-3.0
| 251 | 0 |
import functools
from . import (
constants,
utils,
)
class Card():
def __init__(self, kind=None, strength=None, value=None, verbose=None, **kwargs):
if kind is None:
raise(TypeError("Missing required 'kind' argument."))
self.kind = kind
self.strength = strength
self.value = value
self.verbose = verbose if verbose is not None else kind
super().__init__(**kwargs)
def __valid_comparision(self, arg):
return hasattr(arg, "kind") and hasattr(arg, "strength")
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return NotImplemented
if self.strength is not None:
if value.strength is not None:
return self.strength < value.strength
else:
return False
elif value.strength is not None:
return True
return self.kind < value.kind
def __str__(self):
return self.kind
class SimpleCard(Card):
def __init__(self, colour=None, kind=None, strength=None, **kwargs):
if colour is None:
raise(TypeError("Missing required 'colour' argument."))
self.colour = colour
if kind is None:
if strength is not None:
kind = str(strength)
super().__init__(kind=kind, strength=strength, **kwargs)
def __valid_comparision(self, arg):
if super()._valid_comparision(arg):
if hasattr(arg, "colour") and (arg.colour is not None):
if arg.strength is not None:
return True
return False
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return super().__lt__(value)
if self.strength < value.strength:
return True
if self.strength == value.strength:
return self.colour < value.colour
return False
def __eq__(self, value):
if not self._valid_comparision(value):
return False
if (self.strength == value.strength) and (self.colour == value.colour):
return True
def __str__(self):
return self.kind + self.colour[0]
class MahJongg(Card):
def __init__(self):
super().__init__(kind='1', strength=1)
class Dragon(Card):
def __init__(self):
super().__init__(kind='R', value=25, verbose="Dragon")
class Pheonix(Card):
def __init__(self):
super().__init__(kind='P', value=-25, verbose="Pheonix")
class Dog(Card):
def __init__(self):
super().__init__(kind="D", verbose="Dog")
|
julka2010/games
|
games/tichu/cards.py
|
Python
|
mpl-2.0
| 2,702 | 0.002591 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper classes for Falken observation and action specs."""
# pylint: disable=g-bad-import-order
import collections
import common.generate_protos # pylint: disable=unused-import
import action_pb2
import brain_pb2
import observation_pb2
import primitives_pb2
# Optional fields of ObservationData and ObservationSpec protobufs.
OBSERVATION_OPTIONAL_ENTITIES = ['player', 'camera']
# Optional fields of EntityType and Entity protobufs.
ENTITY_OPTIONAL_FIELDS = ['position', 'rotation']
class InvalidSpecError(Exception):
"""Raised when the spec is invalid."""
class TypingError(Exception):
"""Raised when data doesn't match the spec."""
class BrainSpec:
"""A wrapper class for an observation and action spec proto."""
def __init__(self, brain_spec_pb, spec_base_class=None,
action_spec_class=None, observation_spec_class=None):
"""Parse and validate the provided spec proto.
Args:
brain_spec_pb: BrainSpec protobuf to parse and validate.
spec_base_class: SpecBase class to use for brain validation.
action_spec_class: SpecBase class to use for action validation and
conversion.
observation_spec_class: SpecBase class to use for action validation and
conversion.
"""
assert isinstance(brain_spec_pb, brain_pb2.BrainSpec)
spec_base_class = spec_base_class if spec_base_class else SpecBase
action_spec_class = action_spec_class if action_spec_class else ActionSpec
observation_spec_class = (observation_spec_class
if observation_spec_class else ObservationSpec)
_ = spec_base_class(brain_spec_pb)
self.action_spec = action_spec_class(brain_spec_pb.action_spec)
self.observation_spec = observation_spec_class(
brain_spec_pb.observation_spec)
self.validate_joystick_references()
def validate_joystick_references(self):
"""Validate joystick actions reference existing entities.
Raises:
InvalidSpecError: If invalid references are found or referenced entities
do not have positions or rotations.
"""
references_by_joystick_name = collections.defaultdict(set)
for node in self.action_spec.proto_node.children:
action = node.proto
if isinstance(action, action_pb2.JoystickType):
if action.control_frame:
references_by_joystick_name[node.name].add(action.control_frame)
if action.controlled_entity:
references_by_joystick_name[node.name].add(action.controlled_entity)
joystick_names_by_reference = collections.defaultdict(set)
invalid_references_by_joystick = []
for joystick_name, references in sorted(
references_by_joystick_name.items()):
invalid_references = []
for reference in references:
# In future, we may support named entities as well in addition to the
# fixed player and camera entities.
if reference in OBSERVATION_OPTIONAL_ENTITIES:
joystick_names_by_reference[reference].add(joystick_name)
else:
invalid_references.append(reference)
if invalid_references:
invalid_references_by_joystick.append(
f'{joystick_name} --> {sorted(invalid_references)}')
# Report all invalid entity references by joysticks.
if invalid_references_by_joystick:
msg = ', '.join(invalid_references_by_joystick)
raise InvalidSpecError(f'Joystick(s) reference invalid entities: {msg}.')
# Get all entities by name.
observation_node = self.observation_spec.proto_node
entities_by_name = {}
for optional_field in OBSERVATION_OPTIONAL_ENTITIES:
entity_node = observation_node.child_by_proto_field_name(optional_field)
if entity_node:
entities_by_name[entity_node.name] = entity_node
global_entities = observation_node.child_by_proto_field_name(
'global_entities')
if global_entities:
for entity_node in global_entities.children:
entities_by_name[entity_node.name] = entity_node
# Check that all referenced entities exist and have positions and rotations.
for reference, joystick_names in joystick_names_by_reference.items():
joystick_names = sorted(joystick_names)
entity_node = entities_by_name.get(reference)
if not entity_node:
raise InvalidSpecError(f'Missing entity {reference} referenced by '
f'joysticks {joystick_names}.')
if not entity_node.child_by_proto_field_name('position'):
raise InvalidSpecError(f'Entity {reference} referenced by joysticks '
f'{joystick_names} has no position.')
if not entity_node.child_by_proto_field_name('rotation'):
raise InvalidSpecError(f'Entity {reference} referenced by joysticks '
f'{joystick_names} has no rotation.')
class SpecBase:
"""Base class for an action or observation spec."""
def __init__(self, spec):
"""Parse and validate the provided spec proto.
Args:
spec: Spec protobuf to parse and validate.
"""
self._spec_proto = spec
self._spec_proto_node = ProtobufNode.from_spec(spec)
self._node_nest = self._spec_proto_node.as_nest(include_self=False)
super().__init__()
@property
def proto(self):
"""Return the underlying proto buffer."""
return self._spec_proto
@property
def proto_node(self):
"""Get the ProtobufNode referencing the underlying protocol buffer."""
return self._spec_proto_node
def __str__(self):
"""String representation of the proto owned by this object."""
return str(self._spec_proto)
class ObservationSpec(SpecBase):
"""A wrapper class for an ObservationSpec proto.
ObservationSpec proto defines the observation space for an agent. This class
is a helper class used to translate spec information and value to a TF Agents
compatible format.
"""
def __init__(self, spec_proto):
assert isinstance(spec_proto, observation_pb2.ObservationSpec)
super().__init__(spec_proto)
class ActionSpec(SpecBase):
"""A wrapper class for an ActionSpec proto.
An ActionSpec proto defines the action space for an agent. This class
is a helper class used to translate spec information and value to a TF Agents
compatible format.
"""
def __init__(self, spec_proto):
assert isinstance(spec_proto, action_pb2.ActionSpec)
super().__init__(spec_proto)
def _concat_path(prefix, component):
"""Add a component to a path.
Args:
prefix: Prefix of the path. If this is empty, it isn't included in the
returned path.
component: Component to add to the path.
Returns:
Concatenated path string.
"""
return f'{prefix}/{component}' if prefix else component
def _get_optional_fields_from_proto(proto, field_names):
"""Get optional fields from the specified proto.
Args:
proto: Proto to query.
field_names: Names of the fields to find.
Returns:
List of (field_name, field_proto) tuples where field_name is the
name of the field and field_proto is the sub-message proto.
"""
return [(f, getattr(proto, f)) for f in field_names if proto.HasField(f)]
def _label_repeated_field(proto, field_name):
"""Label elements of a repeated proto field.
Args:
proto: Proto to query.
field_name: Repeated field name to enumerate.
Yields:
(index, name, field_proto) tuples where index is the index of the field,
name is `field_name[index]` and field_proto is the proto in the
repeated field at the index index.
"""
repeated_field = getattr(proto, field_name)
for i in range(len(repeated_field)):
yield (i, f'{field_name}[{i}]', repeated_field[i])
return
def _get_proto_name(proto):
"""Get the value of a proto's name field or return the proto type name.
Args:
proto: Proto to query.
Returns:
Human readable name of the proto.
"""
if 'name' in proto.DESCRIPTOR.fields_by_name:
return proto.name
return proto .DESCRIPTOR.name
class ProtobufValidator:
"""Validates a protobuf."""
# Lazily initialized by check_spec().
_SPEC_PROTO_CLASS_TO_VALIDATOR = None
# Lazily initialized by check_data().
_DATA_PROTO_CLASS_TO_VALIDATOR = None
@staticmethod
def _check_category_type(spec, path_prefix):
"""Validate a CategoryType proto.
Args:
spec: CategoryType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If there are less than 2 categories.
"""
if len(spec.enum_values) < 2:
raise InvalidSpecError(
f'{path_prefix} has less than two categories: {spec.enum_values}.')
@staticmethod
def _check_number_type(spec, path_prefix):
"""Validate a NumberType proto.
Args:
spec: NumberType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If the minimum value is the same as or exceeds the
maximum.
"""
if spec.minimum >= spec.maximum:
raise InvalidSpecError(
f'{path_prefix} has invalid or missing range: '
f'[{spec.minimum}, {spec.maximum}].')
@staticmethod
def _check_feeler_type(spec, path_prefix):
"""Validate a FeelerType proto.
Args:
spec: FeelerType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If the count is less than 1, yaw angles don't match
the count or distance and experimental data ranges are invalid.
"""
if spec.count < 1:
raise InvalidSpecError(
f'{path_prefix} has feeler count {spec.count}, requires at least 1.')
ProtobufValidator._check_number_type(spec.distance,
_concat_path(path_prefix, 'distance'))
if len(spec.yaw_angles) != spec.count:
raise InvalidSpecError(
f'{path_prefix} has {len(spec.yaw_angles)} yaw_angles that '
f'mismatch feeler count {spec.count}.')
for i, experimental_data in enumerate(spec.experimental_data):
ProtobufValidator._check_number_type(
experimental_data,
_concat_path(path_prefix, f'experimental_data[{i}]'))
@staticmethod
def _check_joystick_type(spec, path_prefix):
"""Validate a JoystickType proto.
Args:
spec: JoystickType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If the axes mode isn't set, a controlled entity isn't
set or the control frame is set with an incompatible axes mode.
"""
if spec.axes_mode == action_pb2.UNDEFINED:
raise InvalidSpecError(f'{path_prefix} has undefined axes_mode.')
if not spec.controlled_entity:
raise InvalidSpecError(f'{path_prefix} has no controlled_entity.')
if spec.control_frame and spec.axes_mode != action_pb2.DIRECTION_XZ:
raise InvalidSpecError(
f'{path_prefix} has invalid control frame "{spec.control_frame}". '
f'control_frame should only be set if axes_mode is DIRECTION_XZ, '
'axes_mode is currently '
f'{action_pb2.JoystickAxesMode.Name(spec.axes_mode)}.')
@staticmethod
def _check_oneof_field_set(proto, path, oneof_name):
"""Validate a proto oneof field is set.
Args:
proto: Proto to check.
path: Human readable path to the proto.
oneof_name: "oneof" field name to check to see whether it's set.
Raises:
InvalidSpecError: If the proto does not contain one of the specified
fields.
"""
if not proto.WhichOneof(oneof_name):
expected_fields = ', '.join([
f.name for f in proto.DESCRIPTOR.oneofs_by_name[oneof_name].fields])
raise InvalidSpecError(
f'{path} {oneof_name} must have one of [{expected_fields}] set.')
@staticmethod
def _check_entity_field_type(spec, path_prefix):
"""Validate an EntityFieldType proto.
Args:
spec: EntityType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If the proto does not contain a required type field.
"""
ProtobufValidator._check_oneof_field_set(spec, path_prefix, 'type')
@staticmethod
def _check_entity_type(spec, path_prefix):
"""Validate an EntityType proto.
Args:
spec: EntityType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If any custom entity fields have no name, have a
reserved name or duplicate name.
"""
existing_names = set()
for i, entity_field_type in enumerate(spec.entity_fields):
path = _concat_path(path_prefix, f'entity_field[{i}]')
if not entity_field_type.name:
raise InvalidSpecError(f'{path} has no name.')
if entity_field_type.name in ENTITY_OPTIONAL_FIELDS:
raise InvalidSpecError(
f'{path} has reserved name "{entity_field_type.name}".')
if entity_field_type.name in existing_names:
raise InvalidSpecError(
f'{path} has name "{entity_field_type.name}" '
f'that already exists in {path_prefix}')
existing_names.add(entity_field_type.name)
@staticmethod
def _check_action_type(spec, path_prefix):
"""Validate an ActionType proto.
Args:
spec: ActionType proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If a required type isn't present.
"""
ProtobufValidator._check_oneof_field_set(spec, path_prefix, 'action_types')
@staticmethod
def _check_action_spec(spec, path_prefix):
"""Validate an ActionSpec proto.
Args:
spec: ActionSpec proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If any actions have no names or duplicate names.
"""
existing_names = set()
if not spec.actions:
raise InvalidSpecError(f'{path_prefix} is empty.')
for i, action_type in enumerate(spec.actions):
path = _concat_path(path_prefix, f'actions[{i}]')
if not action_type.name:
raise InvalidSpecError(f'{path} has no name.')
if action_type.name in existing_names:
raise InvalidSpecError(
f'{path} has duplicate name "{action_type.name}".')
existing_names.add(action_type.name)
@staticmethod
def _check_observation_spec(spec, path_prefix):
"""Validate a ObservationSpec proto.
Args:
spec: ObservationSpec proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If any actions have no names or duplicate names.
"""
if not spec.HasField('player') and not spec.global_entities:
raise InvalidSpecError(
f'{path_prefix} must contain at least one non-camera entity.')
@staticmethod
def _check_brain_spec(spec, path_prefix):
"""Validate a BrainSpec proto.
Args:
spec: BrainSpec proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If any actions have no names or duplicate names.
"""
if not spec.HasField(
'observation_spec') or not spec.HasField('action_spec'):
raise InvalidSpecError(
f'{path_prefix} must have an observation spec and action spec.')
@staticmethod
def _null_spec_check(unused_spec, unused_path_prefix):
"""Do not perform any validation of the specified proto.
Args:
unused_spec: Ignored.
unused_path_prefix: Ignored.
"""
pass
@staticmethod
def check_spec(spec, path_prefix):
"""Validate a spec proto.
To keep traversal of the spec proto separate from validation, this method
does not recursively validate the provided spec proto. To traverse and
validate a spec proto use ProtobufNode.from_spec().
Args:
spec: Spec proto to validate.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
InvalidSpecError: If validation fails or no validator is found for the
specified proto.
"""
if not ProtobufValidator._SPEC_PROTO_CLASS_TO_VALIDATOR:
ProtobufValidator._SPEC_PROTO_CLASS_TO_VALIDATOR = {
brain_pb2.BrainSpec:
ProtobufValidator._check_brain_spec,
action_pb2.ActionSpec:
ProtobufValidator._check_action_spec,
action_pb2.ActionType:
ProtobufValidator._check_action_type,
action_pb2.JoystickType:
ProtobufValidator._check_joystick_type,
observation_pb2.EntityFieldType:
ProtobufValidator._check_entity_field_type,
observation_pb2.EntityType:
ProtobufValidator._check_entity_type,
observation_pb2.FeelerType:
ProtobufValidator._check_feeler_type,
observation_pb2.ObservationSpec:
ProtobufValidator._check_observation_spec,
primitives_pb2.CategoryType:
ProtobufValidator._check_category_type,
primitives_pb2.NumberType:
ProtobufValidator._check_number_type,
primitives_pb2.PositionType:
ProtobufValidator._null_spec_check,
primitives_pb2.RotationType:
ProtobufValidator._null_spec_check
}
validator = ProtobufValidator._SPEC_PROTO_CLASS_TO_VALIDATOR.get(type(spec))
if not validator:
raise InvalidSpecError(
f'Validator not found for {type(spec).__qualname__} at '
f'"{path_prefix}".')
validator(spec, path_prefix)
@staticmethod
def _check_spec_proto_class(data, spec, spec_proto_class, path_prefix):
"""Check that a spec proto matches the expected protobuf type.
Args:
data: Data proto to report if the spec doesn't match.
spec: Spec proto instance to check.
spec_proto_class: Expected proto class / message type.
path_prefix: Path to the proto to report if the spec doesn't match.
Raises:
TypingError: If the spec doesn't match the expected class.
"""
# isinstance() is a lot slower than checking directly against the class.
# pylint: disable=unidiomatic-typecheck
if type(spec) != spec_proto_class:
raise TypingError(f'{path_prefix} data does not match spec. data is '
f'{type(data).__qualname__}, spec is '
f'{type(spec).__qualname__}')
@staticmethod
def _check_category(data, spec, path_prefix, check_spec_class):
"""Check that a Category proto conforms to the provided spec proto.
Args:
data: Category protobuf.
spec: CategoryType protobuf to check data with.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data is out of range.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, primitives_pb2.CategoryType, path_prefix)
number_of_values = len(spec.enum_values) - 1
if data.value < 0 or data.value > number_of_values:
enum_items = ', '.join(spec.enum_values)
raise TypingError(
f'{path_prefix} category has value {data.value} that is out of the '
f'specified range [0, {number_of_values}] ({enum_items}).')
@staticmethod
def _check_number(data, spec, path_prefix, check_spec_class):
"""Check that a Number proto conforms to the provided spec proto.
Args:
data: Number protobuf.
spec: NumberType protobuf to check data with.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data is out of range.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, primitives_pb2.NumberType, path_prefix)
if data.value < spec.minimum or data.value > spec.maximum:
raise TypingError(
f'{path_prefix} number has value {data.value} that is out of the '
f'specified range [{spec.minimum}, {spec.maximum}].')
@staticmethod
def _check_repeated_count_matches(data_repeated_field,
data_repeated_fieldname,
data_typename,
spec_repeated_field,
spec_repeated_fieldname, path_prefix):
"""Check the length of two repeated proto fields match.
Args:
data_repeated_field: Proto repeated field to check.
data_repeated_fieldname: Name of the repeated field in the data proto.
data_typename: String type to report in the exception.
spec_repeated_field: Proto repeated field to validate against.
spec_repeated_fieldname: Name of the expected repeated field.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
TypingError: If the length of the two repeated fields do not match.
"""
if len(data_repeated_field) != len(spec_repeated_field):
raise TypingError(
f'{path_prefix} {data_typename} contains '
f'{len(data_repeated_field)} {data_repeated_fieldname} vs. '
f'expected {len(spec_repeated_field)} {spec_repeated_fieldname}.')
@staticmethod
def _check_feeler(data, spec, path_prefix, check_spec_class):
"""Check that a Feeler proto conforms to the provided spec proto.
Args:
data: Feeler protobuf.
spec: FeelerType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the number of feelers mismatches the spec, the number of
experimental_data measures for each feeler differs from the spec,
the distance of each feeler or experimental_data are out of range.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, observation_pb2.FeelerType, path_prefix)
measurements = data.measurements
measurements_count = len(data.measurements)
if measurements_count != spec.count:
raise TypingError(
f'{path_prefix} feeler has an invalid number of measurements '
f'{measurements_count} vs. expected {spec.count}.')
for i in range(measurements_count):
measurement = measurements[i]
measurement_path = _concat_path(path_prefix, f'measurements[{i}]')
ProtobufValidator._check_number(measurement.distance, spec.distance,
_concat_path(measurement_path,
'distance'), False)
experimental_data = measurement.experimental_data
ProtobufValidator._check_repeated_count_matches(
experimental_data, 'experimental_data', 'feeler',
spec.experimental_data, 'experimental_data', measurement_path)
for j in range(len(experimental_data)):
experimental_data_path = _concat_path(measurement_path,
f'experimental_data[{j}]')
ProtobufValidator._check_number(
experimental_data[j], spec.experimental_data[j],
experimental_data_path, False)
@staticmethod
def _check_matching_oneof(data, data_oneof, data_typename, spec, spec_oneof,
path_prefix):
"""Check that a oneof field of data is present in spec.
Args:
data: Data protobuf.
data_oneof: Name of the data oneof field to check.
data_typename: Name of the data type to report in the exception.
spec: Spec protobuf.
spec_oneof: Name of the spec oneof field to check.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
TypingError: If the data doesn't have the expected type.
"""
spec_type = spec.WhichOneof(spec_oneof)
data_type = data.WhichOneof(data_oneof)
if spec_type != data_type:
raise TypingError(f'{path_prefix}/{data_oneof} {data_typename} '
f'"{data_type}" does not match the spec {spec_oneof} '
f'"{spec_type}".')
@staticmethod
def _check_entity_field(data, spec, path_prefix, check_spec_class):
"""Check that an EntityField proto conforms to the provided spec proto.
Args:
data: EntityField protobuf.
spec: EntityFieldType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, observation_pb2.EntityFieldType, path_prefix)
ProtobufValidator._check_matching_oneof(
data, 'value', 'entity field', spec, 'type', path_prefix)
@staticmethod
def _check_optional_fields(data, optional_fields, spec, path_prefix):
"""Check that a data proto contains the same optional fields as a spec.
Args:
data: Data protobuf.
optional_fields: List of optional field names.
spec: Spec protobuf.
path_prefix: Prefix to add to the path when reporting errors.
Raises:
TypingError: If the optional fields in the data don't conform to the spec.
"""
has_field_message = (
lambda has, name: f'has "{name}"' if has else f'does not have "{name}"')
for field in optional_fields:
data_has_field = data.HasField(field)
spec_has_field = spec.HasField(field)
if data_has_field != spec_has_field:
raise TypingError(f'{path_prefix} entity '
f'{has_field_message(data_has_field, field)} but '
f'spec {has_field_message(spec_has_field, field)}.')
@staticmethod
def _check_entity(data, spec, path_prefix, unused_check_spec_class):
"""Check that an Entity proto conforms to the provided spec proto.
Args:
data: Entity protobuf.
spec: EntityType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
unused_check_spec_class: Not used.
Raises:
TypingError: If the optional fields in the data don't conform to the spec
or the number of entity fields mismatch.
"""
ProtobufValidator._check_optional_fields(data, ENTITY_OPTIONAL_FIELDS,
spec, path_prefix)
ProtobufValidator._check_repeated_count_matches(
data.entity_fields, 'entity_fields', 'entity',
spec.entity_fields, 'entity_fields', path_prefix)
@staticmethod
def _check_action(data, spec, path_prefix, check_spec_class):
"""Check that an Action proto conforms to the provided spec proto.
Args:
data: Action protobuf.
spec: ActionType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, action_pb2.ActionType, path_prefix)
ProtobufValidator._check_matching_oneof(
data, 'action', 'action', spec, 'action_types', path_prefix)
@staticmethod
def _check_position(data, spec, path_prefix, check_spec_class):
"""Check that a Position proto conforms to the provided spec proto.
Args:
data: Position protobuf.
spec: PositionType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, primitives_pb2.PositionType, path_prefix)
@staticmethod
def _check_rotation(data, spec, path_prefix, check_spec_class):
"""Check that a Rotation proto conforms to the provided spec proto.
Args:
data: Rotation protobuf.
spec: RotationType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, primitives_pb2.RotationType, path_prefix)
@staticmethod
def _check_action_data(data, spec, path_prefix, check_spec_class):
"""Check that an ActionData proto conforms to the provided spec proto.
Args:
data: ActionData protobuf.
spec: ActionSpec protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data has a unexpected number of actions or the action
source is unknown.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, action_pb2.ActionSpec, path_prefix)
if data.source == action_pb2.ActionData.SOURCE_UNKNOWN:
raise TypingError(f'{path_prefix} action data\'s source is unknown.')
ProtobufValidator._check_repeated_count_matches(
data.actions, 'actions', 'actions', spec.actions, 'actions',
path_prefix)
@staticmethod
def _check_joystick(data, spec, path_prefix, check_spec_class):
"""Check that a Joystick proto conforms to the provided spec proto.
Args:
data: Joystick protobuf.
spec: JoystickType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If an axis value is out of range.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, action_pb2.JoystickType, path_prefix)
def check_axis(value, axis):
"""Check that a joystick axis value is within range.
Args:
value: Value to check.
axis: Name of the axis.
Raises:
TypingError: If an axis value is out of range.
"""
if value < -1.0 or value > 1.0:
raise TypingError(f'{path_prefix} joystick {axis} value {value} is out '
'of range [-1.0, 1.0].')
check_axis(data.x_axis, 'x_axis')
check_axis(data.y_axis, 'y_axis')
@staticmethod
def _check_observation(data, spec, path_prefix, check_spec_class):
"""Check that an Observation proto conforms to the provided spec proto.
Args:
data: Observation protobuf.
spec: ObservationSpec protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data contains an unexpected number of entities.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, observation_pb2.ObservationSpec, path_prefix)
ProtobufValidator._check_optional_fields(
data, OBSERVATION_OPTIONAL_ENTITIES, spec, path_prefix)
ProtobufValidator._check_repeated_count_matches(
data.global_entities, 'global_entities', 'observations',
spec.global_entities, 'global_entities', path_prefix)
@staticmethod
def check_data(data, spec, path_prefix, check_spec_class):
"""Check that a data proto conforms to the provided spec proto.
To keep traversal of the data and spec protos separate from validation,
this method does not recursively validate the provided data proto. To
traverse and validate a data proto use ProtobufNode.data_to_proto_nest().
Args:
data: Data protobuf.
spec: Spec protobuf.
path_prefix: String path to the protobuf.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data protobuf does not conform the spec protobuf or
the data proto is not supported.
"""
if not ProtobufValidator._DATA_PROTO_CLASS_TO_VALIDATOR:
ProtobufValidator._DATA_PROTO_CLASS_TO_VALIDATOR = {
action_pb2.ActionData: ProtobufValidator._check_action_data,
action_pb2.Action: ProtobufValidator._check_action,
action_pb2.Joystick: ProtobufValidator._check_joystick,
observation_pb2.EntityField: ProtobufValidator._check_entity_field,
observation_pb2.Entity: ProtobufValidator._check_entity,
observation_pb2.Feeler: ProtobufValidator._check_feeler,
observation_pb2.ObservationData: ProtobufValidator._check_observation,
primitives_pb2.Category: ProtobufValidator._check_category,
primitives_pb2.Number: ProtobufValidator._check_number,
primitives_pb2.Position: ProtobufValidator._check_position,
primitives_pb2.Rotation: ProtobufValidator._check_rotation,
}
validator = (
ProtobufValidator._DATA_PROTO_CLASS_TO_VALIDATOR.get(type(data)))
if not validator:
raise TypingError(f'Validator not found for {type(data).__qualname__} at '
f'"{path_prefix}".')
validator(data, spec, path_prefix, check_spec_class)
class ProtobufDataValidationOptions:
"""Options that control validation of data protos against spec protos."""
def __init__(self, check_feeler_data_with_spec=True):
"""Initialize options.
Args:
check_feeler_data_with_spec: Whether to check each feeler proto against
the spec. This operation is very expensive with large numbers of
feelers. If the data has already been validated it is preferable to
disable feeler validation.
"""
self.check_feeler_data_with_spec = check_feeler_data_with_spec
class ProtobufNode:
"""Class that references a node in a protobuf.
Attributes:
proto: Referenced protobuf instance.
"""
# Lazily initialized by _from_spec().
_SPEC_PROTOCLASS_TO_PARSER = None
def __init__(self, name, proto, proto_field_name):
"""Initialize this instance.
Args:
name: Human readable name of the node.
proto: Protobuf instance.
proto_field_name: Name of this field in the parent proto.
"""
self._proto = proto
self._name = name
self._proto_field_name = proto_field_name
self._children_by_proto_field_name = collections.OrderedDict()
self._parent = None
self._update_path()
@property
def children(self):
"""Get the sequence of children of this node."""
return tuple(self._children_by_proto_field_name.values())
@property
def proto(self):
"""Get the protobuf owned by this node."""
return self._proto
@property
def proto_field_name(self):
"""Get the name of this field in the parent proto."""
return self._proto_field_name
def child_by_proto_field_name(self, proto_field_name):
"""Get a child node by proto_field_name.
Args:
proto_field_name: Name of the child field.
Returns:
ProtobufNode instance if found, None otherwise.
"""
return self._children_by_proto_field_name.get(proto_field_name)
@property
def parent(self):
"""Get the parent node of this node."""
return self._parent
@property
def name(self):
"""Name of this node."""
return self._name
@property
def path(self):
"""Human readable path of this node relative to its' ancestors."""
return self._path
def _update_path(self):
"""Update the human readable path of this node relative to ancestors."""
parent = self.parent
if parent:
parent_path = parent._path # pylint: disable=protected-access
self._path = '/'.join([parent_path, self.name])
else:
self._path = self.name
for child in self.children:
child._update_path() # pylint: disable=protected-access
def add_children(self, children):
"""Add children node to this instance.
Args:
children: Sequence of ProtobufNode instances to add as children of this
node.
Returns:
Reference to this node.
"""
for child in children:
assert child.proto_field_name not in self._children_by_proto_field_name
# pylint: disable=protected-access
child._remove_from_parent(update_path=False)
child._parent = self # pylint: disable=protected-access
self._children_by_proto_field_name[child.proto_field_name] = child
child._update_path() # pylint: disable=protected-access
return self
def remove_from_parent(self):
"""Remove this node from its parent."""
self._remove_from_parent()
def _remove_from_parent(self, update_path=True):
"""Remove this node from its parent.
Args:
update_path: Whether to update this node's cached path.
"""
if self._parent:
# pylint: disable=protected-access
del self._parent._children_by_proto_field_name[self.proto_field_name]
self._parent = None
if update_path:
self._update_path()
def __eq__(self, other):
"""Compare for equality with another ProtobufNode instance.
Args:
other: ProtobufNode instance to compare with.
Returns:
True if they're equivalent, False otherwise.
"""
if not (other and issubclass(type(other), type(self))):
return False
if self.name != other.name:
return False
if self.proto_field_name != other.proto_field_name:
return False
if self.proto != other.proto:
return False
if len(self.children) != len(other.children):
return False
for this_child, other_child in zip(self.children, other.children):
if this_child == other_child:
continue
return False
return True
def __ne__(self, other):
"""Compare for inequality with another ProtobufNode instance.
Args:
other: ProtobufNode instance to compare with.
Returns:
True if they're not equivalent, False otherwise.
"""
return not self.__eq__(other)
def __str__(self):
"""Construct a string representation.
Returns:
String representation of this instance.
"""
children_string = ', '.join([str(child) for child in self.children])
return (f'{self.name}: '
f'(proto=({type(self.proto).__qualname__}, '
f'{self.proto_field_name}: {self.proto}), '
f'children=[{children_string}])')
def as_nest(self, include_self=True):
"""Generate a nested dictionary from this node.
Args:
include_self: Whether to include this node in the returned nest.
Returns:
Nested dictionary with leaf values referencing ProtobufNode instances that
correspond to leaves in this tree.
"""
children = self.children
if children:
child_nest = {}
nest = {self.name: child_nest}
for child in self.children:
child_nest[child.name] = child.as_nest(include_self=False)
else:
nest = {self.name: self}
return nest if include_self else nest[self.name]
@staticmethod
def _infer_path_components_from_spec(spec, name, parent_path):
"""Infer path components from a spec proto.
Args:
spec: Spec proto to query.
name: Override for the proto name.
parent_path: String path to the proto.
Returns:
(name, parent_path, path) where:
* name is the overridden name or the name derived from the spec proto.
* parent_path is the supplied parent_path or an empty string if it was
None.
* path is the constructed path to the proto
"""
parent_path = parent_path if parent_path else ''
name = name if name else _get_proto_name(spec)
path = _concat_path(parent_path, name)
return (name, parent_path, path)
@staticmethod
def _from_leaf_spec(spec, name, unused_parent_path, proto_field_name):
"""Parse a leaf spec protobuf into a ProtobufNode instance.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
unused_parent_path: Ignored.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
return ProtobufNode(name, spec, proto_field_name)
@staticmethod
def _from_entity_field_type(spec, name, parent_path, proto_field_name):
"""Parse an EntityFieldType protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
spec_field = getattr(spec, spec.WhichOneof('type'))
return ProtobufNode._from_spec(spec_field, name, parent_path,
proto_field_name)
@staticmethod
def _from_entity_type(spec, name, parent_path, proto_field_name):
"""Parse an EntityType protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(
spec, name, parent_path)
# Add top level "entity" node that is assigned the name of the entity.
node = ProtobufNode(name, spec, proto_field_name)
# Add built-in fields and custom named fields as children.
fields = []
for field_name, field_proto in _get_optional_fields_from_proto(
spec, ENTITY_OPTIONAL_FIELDS):
fields.append((field_name, field_proto, field_name))
for _, field_name, field_proto in _label_repeated_field(
spec, 'entity_fields'):
fields.append((field_proto.name, field_proto, field_name))
for node_name, field_proto, field_name in fields:
node.add_children([ProtobufNode._from_spec(field_proto, node_name, path,
field_name)])
return node
@staticmethod
def _from_brain_spec(spec, name, parent_path, proto_field_name):
"""Parse an BrainSpec protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(
spec, name, parent_path)
# Add top level "brain spec" node.
node = ProtobufNode(name, spec, proto_field_name)
# Add observation and action specs.
node.add_children([
ProtobufNode._from_observation_spec(spec.observation_spec,
'observation_spec', path,
'observation_spec'),
ProtobufNode._from_action_spec(spec.action_spec, 'action_spec', path,
'action_spec')
])
return node
@staticmethod
def _from_observation_spec(spec, name, parent_path, proto_field_name):
"""Parse an ObservationSpec protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(spec, name,
parent_path)
# Add top level "observations" node.
node = ProtobufNode(name, spec, proto_field_name)
# Add observations/{entity_name} for each of the built-in entities.
for field_name, field_proto in _get_optional_fields_from_proto(
spec, OBSERVATION_OPTIONAL_ENTITIES):
node.add_children([ProtobufNode._from_spec(field_proto, field_name, path,
field_name)])
# Add observations/global_entities/{i} for each of the global entities.
if spec.global_entities:
global_entities_node = ProtobufNode('global_entities',
spec.global_entities,
'global_entities')
node.add_children([global_entities_node])
for i, global_entity_spec in enumerate(spec.global_entities):
global_entities_node.add_children([
ProtobufNode._from_spec(global_entity_spec, str(i), path,
f'global_entities[{i}]')])
return node
@staticmethod
def _from_action_type(spec, name, parent_path, proto_field_name):
"""Parse an ActionType protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
# Create a node for the action using the name supplied by the caller.
spec_field = getattr(spec, spec.WhichOneof('action_types'))
return ProtobufNode._from_spec(spec_field, name, parent_path,
proto_field_name)
@staticmethod
def _from_action_spec(spec, name, parent_path, proto_field_name):
"""Parse an ActionSpec protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(spec, name,
parent_path)
# Add top level "actions" node.
node = ProtobufNode(name, spec, proto_field_name)
# Add actions/{actions_type.name} node for each named action.
for i, action_type in enumerate(spec.actions):
node.add_children([
ProtobufNode._from_spec(action_type, action_type.name, path,
f'actions[{i}]')])
return node
@staticmethod
def _from_spec(spec, name, parent_path, proto_field_name):
"""Parse a spec protobuf into a tree of ProtobufNode instances.
Args:
spec: Protobuf to parse and validate.
name: Name of the top level node to create. If this isn't specified it's
derived from spec.name or the name of the protobuf type.
parent_path: String path to the current node.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance that references the spec protobuf and its' children.
Raises:
InvalidSpecError: If the spec is missing required fields, observations
or actions are missing names, use reserved names or have duplicate
names.
"""
_, _, path = ProtobufNode._infer_path_components_from_spec(spec, name,
parent_path)
ProtobufValidator.check_spec(spec, path)
if not ProtobufNode._SPEC_PROTOCLASS_TO_PARSER:
ProtobufNode._SPEC_PROTOCLASS_TO_PARSER = {
action_pb2.ActionSpec: ProtobufNode._from_action_spec,
action_pb2.ActionType: ProtobufNode._from_action_type,
action_pb2.JoystickType: ProtobufNode._from_leaf_spec,
brain_pb2.BrainSpec: ProtobufNode._from_brain_spec,
observation_pb2.EntityFieldType: ProtobufNode._from_entity_field_type,
observation_pb2.EntityType: ProtobufNode._from_entity_type,
observation_pb2.FeelerType: ProtobufNode._from_leaf_spec,
observation_pb2.ObservationSpec: ProtobufNode._from_observation_spec,
primitives_pb2.CategoryType: ProtobufNode._from_leaf_spec,
primitives_pb2.NumberType: ProtobufNode._from_leaf_spec,
primitives_pb2.PositionType: ProtobufNode._from_leaf_spec,
primitives_pb2.RotationType: ProtobufNode._from_leaf_spec
}
parser = ProtobufNode._SPEC_PROTOCLASS_TO_PARSER.get(type(spec))
if not parser:
raise InvalidSpecError(
f'Unknown spec type: {type(spec).__qualname__} ({spec} at '
f'"{parent_path}")')
return parser(spec, name, parent_path, proto_field_name)
@staticmethod
def from_spec(spec, name=None, parent_path=None):
"""Parse a spec protobuf into a tree of ProtobufNode instances.
Args:
spec: Protobuf to parse and validate.
name: Name of the top level node to create. If this isn't specified it's
derived from spec.name or the name of the protobuf type.
parent_path: String path to the current node.
Returns:
ProtobufNode instance that references the spec protobuf and its' children.
Raises:
InvalidSpecError: If the spec is missing required fields, observations
or actions are missing names, use reserved names or have duplicate
names.
"""
return ProtobufNode._from_spec(spec, name, parent_path, '')
def _leaf_data_to_proto_nest(self, data, mapper, check_spec_class,
unused_options):
"""Wrap a data proto in a nest.
Args:
data: Data proto to wrap in a dictionary..
mapper: Optional (proto, path) callable to execute for each leaf proto.
check_spec_class: Whether this method should check the spec proto class.
unused_options: Unused.
Returns:
Dictionary containing the supplied data keyed by the node name.
"""
ProtobufValidator.check_data(data, self.proto, self.path,
check_spec_class)
return {self.name: mapper(data, self.path)}
def _feeler_to_proto_nest(self, data, mapper, check_spec_class, options):
"""Wrap a feeler data proto in a nest.
Args:
data: Data proto to wrap in a dictionary.
mapper: Optional (proto, path) callable to execute for each leaf proto.
check_spec_class: Whether this method should check the spec proto class.
options: ProtobufDataValidationOptions instance.
Returns:
Dictionary containing the supplied data keyed by the node name.
"""
if options.check_feeler_data_with_spec:
ProtobufValidator.check_data(data, self.proto, self.path,
check_spec_class)
return {self.name: mapper(data, self.path)}
def _observation_data_to_proto_nest(self, data, mapper, check_spec_class,
options):
"""Convert an ObservationData proto into a nest of protos.
Args:
data: ObservationData proto.
mapper: Optional (proto, path) callable to execute for each leaf proto.
check_spec_class: Whether this method should check the spec proto class.
options: ProtobufDataValidationOptions instance.
Returns:
Nest of data protos.
"""
ProtobufValidator.check_data(data, self.proto, self.path, check_spec_class)
child_nest = {}
nest = {self.name: child_nest}
for data_field_name, data_field_proto in _get_optional_fields_from_proto(
data, OBSERVATION_OPTIONAL_ENTITIES):
spec_node = self.child_by_proto_field_name(data_field_name)
# pylint: disable=protected-access
child_nest.update(spec_node._entity_to_proto_nest(data_field_proto,
mapper, False, options))
if data.global_entities:
global_entities_spec_node = self.child_by_proto_field_name(
'global_entities')
global_entities_nest = {}
child_nest['global_entities'] = global_entities_nest
for _, data_field_name, data_field_proto in _label_repeated_field(
data, 'global_entities'):
spec_node = global_entities_spec_node.child_by_proto_field_name(
data_field_name)
# pylint: disable=protected-access
global_entities_nest.update(spec_node._entity_to_proto_nest(
data_field_proto, mapper, False, options))
return nest
def _entity_to_proto_nest(self, data, mapper, check_spec_class, options):
"""Convert an Entity proto into a nest of protos.
Args:
data: Entity proto.
mapper: Optional (proto, path) callable to execute for each leaf proto.
check_spec_class: Whether this method should check the spec proto class.
options: ProtobufDataValidationOptions instance.
Returns:
Nest of data protos.
"""
ProtobufValidator.check_data(data, self.proto, self.path, check_spec_class)
child_nest = {}
nest = {self.name: child_nest}
for data_field_name, data_field_proto in _get_optional_fields_from_proto(
data, ENTITY_OPTIONAL_FIELDS):
spec_node = self.child_by_proto_field_name(data_field_name)
# pylint: disable=protected-access
child_nest.update(spec_node._data_to_proto_nest(
data_field_proto, mapper, False, options))
for i, data_field_name, data_field_proto in _label_repeated_field(
data, 'entity_fields'):
spec_node = self.child_by_proto_field_name(data_field_name)
ProtobufValidator.check_data(data_field_proto,
self.proto.entity_fields[i],
spec_node.path, False)
# pylint: disable=protected-access
child_nest.update(spec_node._data_to_proto_nest(
getattr(data_field_proto, data_field_proto.WhichOneof('value')),
mapper, False, options))
return nest
def _action_data_to_proto_nest(self, data, mapper, check_spec_class, options):
"""Convert an ActionData proto into a nest of protos.
Args:
data: ActionData proto.
mapper: Optional (proto, path) callable to execute for each leaf proto.
check_spec_class: Whether this method should check the spec proto class.
options: ProtobufDataValidationOptions instance.
Returns:
Nest of data protos.
"""
ProtobufValidator.check_data(data, self.proto, self.path, check_spec_class)
child_nest = {}
nest = {self.name: child_nest}
for i, data_field_name, data_field_proto in _label_repeated_field(
data, 'actions'):
spec_node = self.child_by_proto_field_name(data_field_name)
ProtobufValidator.check_data(data_field_proto,
self.proto.actions[i],
spec_node.path, False)
# pylint: disable=protected-access
child_nest.update(spec_node._data_to_proto_nest(
getattr(data_field_proto, data_field_proto.WhichOneof('action')),
mapper, False, options))
return nest
def _data_to_proto_nest(self, data, mapper, check_spec_class, options):
"""Convert the provided data into a dictionary tree (nest) of protos.
Args:
data: Data protobuf to parse, validate against the provided spec and
split into a nest.
mapper: Optional (proto, path) callable to execute for each leaf proto.
proto is the leaf data proto and path is the ProtobufNode.path.
The return value of this method replaces the leaf proto in the nest.
check_spec_class: Whether this method should check the spec proto class.
options: ProtobufDataValidationOptions instance.
Returns:
Nest of data protos.
Raises:
TypingError: If the data protobuf does not conform this node's spec
protobuf.
"""
mapper = mapper if mapper else lambda data, path: data
proto_class_to_parser = {
primitives_pb2.Category: self._leaf_data_to_proto_nest,
primitives_pb2.Number: self._leaf_data_to_proto_nest,
primitives_pb2.Position: self._leaf_data_to_proto_nest,
primitives_pb2.Rotation: self._leaf_data_to_proto_nest,
observation_pb2.Feeler: self._feeler_to_proto_nest,
action_pb2.Joystick: self._leaf_data_to_proto_nest,
observation_pb2.ObservationData: self._observation_data_to_proto_nest,
observation_pb2.Entity: self._entity_to_proto_nest,
action_pb2.ActionData: self._action_data_to_proto_nest,
}
parser = proto_class_to_parser.get(type(data))
if not parser:
raise TypingError(f'Data proto {type(data).__qualname__} is not '
'supported.')
return parser(data, mapper, check_spec_class, options)
def data_to_proto_nest(self, data, mapper=None, options=None):
"""Convert the provided data into a dictionary tree (nest) of protos.
Args:
data: Data protobuf to parse, validate against the provided spec and
split into a nest.
mapper: Optional (proto, path) callable to execute for each leaf proto.
proto is the leaf data proto and path is the ProtobufNode.path.
The return value of this method replaces the leaf proto in the nest.
options: Optional ProtobufDataValidationOptions instance.
Returns:
Nest of data protos.
Raises:
TypingError: If the data protobuf does not conform this node's spec
protobuf.
"""
return self._data_to_proto_nest(
data, mapper, True,
options if options else ProtobufDataValidationOptions())
|
google-research/falken
|
service/learner/brains/specs.py
|
Python
|
apache-2.0
| 58,084 | 0.005113 |
# coding=utf-8
"""
The NetworkCollector class collects metrics on network interface usage
using /proc/net/dev.
#### Dependencies
* /proc/net/dev
"""
import diamond.collector
from diamond.collector import str_to_bool
import diamond.convertor
import os
import re
try:
import psutil
except ImportError:
psutil = None
class NetworkCollector(diamond.collector.Collector):
PROC = '/proc/net/dev'
def get_default_config_help(self):
config_help = super(NetworkCollector, self).get_default_config_help()
config_help.update({
'interfaces': 'List of interface types to collect',
'greedy': 'Greedy match interfaces',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetworkCollector, self).get_default_config()
config.update({
'path': 'network',
'interfaces': ['eth', 'bond', 'em', 'p1p', 'eno', 'enp', 'ens',
'enx'],
'byte_unit': ['bit', 'byte'],
'greedy': 'true',
})
return config
def collect(self):
"""
Collect network interface stats.
"""
# Initialize results
results = {}
if os.access(self.PROC, os.R_OK):
# Open File
file = open(self.PROC)
# Build Regular Expression
greed = ''
if str_to_bool(self.config['greedy']):
greed = '\S*'
exp = (('^(?:\s*)((?:%s)%s):(?:\s*)' +
'(?P<rx_bytes>\d+)(?:\s*)' +
'(?P<rx_packets>\w+)(?:\s*)' +
'(?P<rx_errors>\d+)(?:\s*)' +
'(?P<rx_drop>\d+)(?:\s*)' +
'(?P<rx_fifo>\d+)(?:\s*)' +
'(?P<rx_frame>\d+)(?:\s*)' +
'(?P<rx_compressed>\d+)(?:\s*)' +
'(?P<rx_multicast>\d+)(?:\s*)' +
'(?P<tx_bytes>\d+)(?:\s*)' +
'(?P<tx_packets>\w+)(?:\s*)' +
'(?P<tx_errors>\d+)(?:\s*)' +
'(?P<tx_drop>\d+)(?:\s*)' +
'(?P<tx_fifo>\d+)(?:\s*)' +
'(?P<tx_colls>\d+)(?:\s*)' +
'(?P<tx_carrier>\d+)(?:\s*)' +
'(?P<tx_compressed>\d+)(?:.*)$') %
(('|'.join(self.config['interfaces'])), greed))
reg = re.compile(exp)
# Match Interfaces
for line in file:
match = reg.match(line)
if match:
device = match.group(1)
results[device] = match.groupdict()
# Close File
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No network metrics retrieved')
return None
network_stats = psutil.network_io_counters(True)
for device in network_stats.keys():
network_stat = network_stats[device]
results[device] = {}
results[device]['rx_bytes'] = network_stat.bytes_recv
results[device]['tx_bytes'] = network_stat.bytes_sent
results[device]['rx_packets'] = network_stat.packets_recv
results[device]['tx_packets'] = network_stat.packets_sent
for device in results:
stats = results[device]
for s, v in stats.items():
# Get Metric Name
metric_name = '.'.join([device, s])
# Get Metric Value
metric_value = self.derivative(metric_name,
long(v),
diamond.collector.MAX_COUNTER)
# Convert rx_bytes and tx_bytes
if s == 'rx_bytes' or s == 'tx_bytes':
convertor = diamond.convertor.binary(value=metric_value,
unit='byte')
for u in self.config['byte_unit']:
# Public Converted Metric
self.publish(metric_name.replace('bytes', u),
convertor.get(unit=u), 2)
else:
# Publish Metric Derivative
self.publish(metric_name, metric_value)
return None
|
dcsquared13/Diamond
|
src/collectors/network/network.py
|
Python
|
mit
| 4,536 | 0.007496 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import flt, get_datetime, getdate, date_diff, cint, nowdate
from frappe.model.document import Document
from erpnext.manufacturing.doctype.bom.bom import validate_bom_no, get_bom_items_as_dict
from dateutil.relativedelta import relativedelta
from erpnext.stock.doctype.item.item import validate_end_of_life
from erpnext.manufacturing.doctype.workstation.workstation import WorkstationHolidayError
from erpnext.projects.doctype.timesheet.timesheet import OverlapError
from erpnext.stock.doctype.stock_entry.stock_entry import get_additional_costs
from erpnext.manufacturing.doctype.manufacturing_settings.manufacturing_settings import get_mins_between_operations
from erpnext.stock.stock_balance import get_planned_qty, update_bin_qty
from frappe.utils.csvutils import getlink
from erpnext.stock.utils import get_bin, validate_warehouse_company, get_latest_stock_qty
from erpnext.utilities.transaction_base import validate_uom_is_integer
class OverProductionError(frappe.ValidationError): pass
class StockOverProductionError(frappe.ValidationError): pass
class OperationTooLongError(frappe.ValidationError): pass
class ItemHasVariantError(frappe.ValidationError): pass
from six import string_types
form_grid_templates = {
"operations": "templates/form_grid/work_order_grid.html"
}
class WorkOrder(Document):
def onload(self):
ms = frappe.get_doc("Manufacturing Settings")
self.set_onload("material_consumption", ms.material_consumption)
self.set_onload("backflush_raw_materials_based_on", ms.backflush_raw_materials_based_on)
def validate(self):
self.validate_production_item()
if self.bom_no:
validate_bom_no(self.production_item, self.bom_no)
self.validate_sales_order()
self.set_default_warehouse()
self.validate_warehouse_belongs_to_company()
self.calculate_operating_cost()
self.validate_qty()
self.validate_operation_time()
self.status = self.get_status()
validate_uom_is_integer(self, "stock_uom", ["qty", "produced_qty"])
self.set_required_items(reset_only_qty = len(self.get("required_items")))
def validate_sales_order(self):
if self.sales_order:
self.check_sales_order_on_hold_or_close()
so = frappe.db.sql("""
select so.name, so_item.delivery_date, so.project
from `tabSales Order` so
inner join `tabSales Order Item` so_item on so_item.parent = so.name
left join `tabProduct Bundle Item` pk_item on so_item.item_code = pk_item.parent
where so.name=%s and so.docstatus = 1 and (
so_item.item_code=%s or
pk_item.item_code=%s )
""", (self.sales_order, self.production_item, self.production_item), as_dict=1)
if not so:
so = frappe.db.sql("""
select
so.name, so_item.delivery_date, so.project
from
`tabSales Order` so, `tabSales Order Item` so_item, `tabPacked Item` packed_item
where so.name=%s
and so.name=so_item.parent
and so.name=packed_item.parent
and so_item.item_code = packed_item.parent_item
and so.docstatus = 1 and packed_item.item_code=%s
""", (self.sales_order, self.production_item), as_dict=1)
if len(so):
if not self.expected_delivery_date:
self.expected_delivery_date = so[0].delivery_date
if so[0].project:
self.project = so[0].project
if not self.material_request:
self.validate_work_order_against_so()
else:
frappe.throw(_("Sales Order {0} is not valid").format(self.sales_order))
def check_sales_order_on_hold_or_close(self):
status = frappe.db.get_value("Sales Order", self.sales_order, "status")
if status in ("Closed", "On Hold"):
frappe.throw(_("Sales Order {0} is {1}").format(self.sales_order, status))
def set_default_warehouse(self):
if not self.wip_warehouse:
self.wip_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_wip_warehouse")
if not self.fg_warehouse:
self.fg_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_fg_warehouse")
def validate_warehouse_belongs_to_company(self):
warehouses = [self.fg_warehouse, self.wip_warehouse]
for d in self.get("required_items"):
if d.source_warehouse not in warehouses:
warehouses.append(d.source_warehouse)
for wh in warehouses:
validate_warehouse_company(wh, self.company)
def calculate_operating_cost(self):
self.planned_operating_cost, self.actual_operating_cost = 0.0, 0.0
for d in self.get("operations"):
d.planned_operating_cost = flt(d.hour_rate) * (flt(d.time_in_mins) / 60.0)
d.actual_operating_cost = flt(d.hour_rate) * (flt(d.actual_operation_time) / 60.0)
self.planned_operating_cost += flt(d.planned_operating_cost)
self.actual_operating_cost += flt(d.actual_operating_cost)
variable_cost = self.actual_operating_cost if self.actual_operating_cost \
else self.planned_operating_cost
self.total_operating_cost = flt(self.additional_operating_cost) + flt(variable_cost)
def validate_work_order_against_so(self):
# already ordered qty
ordered_qty_against_so = frappe.db.sql("""select sum(qty) from `tabWork Order`
where production_item = %s and sales_order = %s and docstatus < 2 and name != %s""",
(self.production_item, self.sales_order, self.name))[0][0]
total_qty = flt(ordered_qty_against_so) + flt(self.qty)
# get qty from Sales Order Item table
so_item_qty = frappe.db.sql("""select sum(stock_qty) from `tabSales Order Item`
where parent = %s and item_code = %s""",
(self.sales_order, self.production_item))[0][0]
# get qty from Packing Item table
dnpi_qty = frappe.db.sql("""select sum(qty) from `tabPacked Item`
where parent = %s and parenttype = 'Sales Order' and item_code = %s""",
(self.sales_order, self.production_item))[0][0]
# total qty in SO
so_qty = flt(so_item_qty) + flt(dnpi_qty)
allowance_percentage = flt(frappe.db.get_single_value("Manufacturing Settings",
"overproduction_percentage_for_sales_order"))
if total_qty > so_qty + (allowance_percentage/100 * so_qty):
frappe.throw(_("Cannot produce more Item {0} than Sales Order quantity {1}")
.format(self.production_item, so_qty), OverProductionError)
def update_status(self, status=None):
'''Update status of work order if unknown'''
if status != "Stopped":
status = self.get_status(status)
if status != self.status:
self.db_set("status", status)
self.update_required_items()
return status
def get_status(self, status=None):
'''Return the status based on stock entries against this work order'''
if not status:
status = self.status
if self.docstatus==0:
status = 'Draft'
elif self.docstatus==1:
if status != 'Stopped':
stock_entries = frappe._dict(frappe.db.sql("""select purpose, sum(fg_completed_qty)
from `tabStock Entry` where work_order=%s and docstatus=1
group by purpose""", self.name))
status = "Not Started"
if stock_entries:
status = "In Process"
produced_qty = stock_entries.get("Manufacture")
if flt(produced_qty) >= flt(self.qty):
status = "Completed"
else:
status = 'Cancelled'
return status
def update_work_order_qty(self):
"""Update **Manufactured Qty** and **Material Transferred for Qty** in Work Order
based on Stock Entry"""
allowance_percentage = flt(frappe.db.get_single_value("Manufacturing Settings",
"overproduction_percentage_for_work_order"))
for purpose, fieldname in (("Manufacture", "produced_qty"),
("Material Transfer for Manufacture", "material_transferred_for_manufacturing")):
if (purpose == 'Material Transfer for Manufacture' and
self.operations and self.transfer_material_against == 'Job Card'):
continue
qty = flt(frappe.db.sql("""select sum(fg_completed_qty)
from `tabStock Entry` where work_order=%s and docstatus=1
and purpose=%s""", (self.name, purpose))[0][0])
completed_qty = self.qty + (allowance_percentage/100 * self.qty)
if qty > completed_qty:
frappe.throw(_("{0} ({1}) cannot be greater than planned quantity ({2}) in Work Order {3}").format(\
self.meta.get_label(fieldname), qty, completed_qty, self.name), StockOverProductionError)
self.db_set(fieldname, qty)
if self.production_plan:
self.update_production_plan_status()
def update_production_plan_status(self):
production_plan = frappe.get_doc('Production Plan', self.production_plan)
production_plan.run_method("update_produced_qty", self.produced_qty, self.production_plan_item)
def on_submit(self):
if not self.wip_warehouse:
frappe.throw(_("Work-in-Progress Warehouse is required before Submit"))
if not self.fg_warehouse:
frappe.throw(_("For Warehouse is required before Submit"))
self.update_work_order_qty_in_so()
self.update_reserved_qty_for_production()
self.update_completed_qty_in_material_request()
self.update_planned_qty()
self.update_ordered_qty()
self.create_job_card()
def on_cancel(self):
self.validate_cancel()
frappe.db.set(self,'status', 'Cancelled')
self.update_work_order_qty_in_so()
self.delete_job_card()
self.update_completed_qty_in_material_request()
self.update_planned_qty()
self.update_ordered_qty()
self.update_reserved_qty_for_production()
def create_job_card(self):
for row in self.operations:
if not row.workstation:
frappe.throw(_("Row {0}: select the workstation against the operation {1}")
.format(row.idx, row.operation))
create_job_card(self, row, auto_create=True)
def validate_cancel(self):
if self.status == "Stopped":
frappe.throw(_("Stopped Work Order cannot be cancelled, Unstop it first to cancel"))
# Check whether any stock entry exists against this Work Order
stock_entry = frappe.db.sql("""select name from `tabStock Entry`
where work_order = %s and docstatus = 1""", self.name)
if stock_entry:
frappe.throw(_("Cannot cancel because submitted Stock Entry {0} exists").format(stock_entry[0][0]))
def update_planned_qty(self):
update_bin_qty(self.production_item, self.fg_warehouse, {
"planned_qty": get_planned_qty(self.production_item, self.fg_warehouse)
})
if self.material_request:
mr_obj = frappe.get_doc("Material Request", self.material_request)
mr_obj.update_requested_qty([self.material_request_item])
def update_ordered_qty(self):
if self.production_plan and self.production_plan_item:
qty = self.qty if self.docstatus == 1 else 0
frappe.db.set_value('Production Plan Item',
self.production_plan_item, 'ordered_qty', qty)
doc = frappe.get_doc('Production Plan', self.production_plan)
doc.set_status()
doc.db_set('status', doc.status)
def update_work_order_qty_in_so(self):
if not self.sales_order and not self.sales_order_item:
return
total_bundle_qty = 1
if self.product_bundle_item:
total_bundle_qty = frappe.db.sql(""" select sum(qty) from
`tabProduct Bundle Item` where parent = %s""", (frappe.db.escape(self.product_bundle_item)))[0][0]
if not total_bundle_qty:
# product bundle is 0 (product bundle allows 0 qty for items)
total_bundle_qty = 1
cond = "product_bundle_item = %s" if self.product_bundle_item else "production_item = %s"
qty = frappe.db.sql(""" select sum(qty) from
`tabWork Order` where sales_order = %s and docstatus = 1 and {0}
""".format(cond), (self.sales_order, (self.product_bundle_item or self.production_item)), as_list=1)
work_order_qty = qty[0][0] if qty and qty[0][0] else 0
frappe.db.set_value('Sales Order Item',
self.sales_order_item, 'work_order_qty', flt(work_order_qty/total_bundle_qty, 2))
def update_completed_qty_in_material_request(self):
if self.material_request:
frappe.get_doc("Material Request", self.material_request).update_completed_qty([self.material_request_item])
def set_work_order_operations(self):
"""Fetch operations from BOM and set in 'Work Order'"""
self.set('operations', [])
if not self.bom_no \
or cint(frappe.db.get_single_value("Manufacturing Settings", "disable_capacity_planning")):
return
if self.use_multi_level_bom:
bom_list = frappe.get_doc("BOM", self.bom_no).traverse_tree()
else:
bom_list = [self.bom_no]
operations = frappe.db.sql("""
select
operation, description, workstation, idx,
base_hour_rate as hour_rate, time_in_mins,
"Pending" as status, parent as bom
from
`tabBOM Operation`
where
parent in (%s) order by idx
""" % ", ".join(["%s"]*len(bom_list)), tuple(bom_list), as_dict=1)
self.set('operations', operations)
if self.use_multi_level_bom and self.get('operations') and self.get('items'):
raw_material_operations = [d.operation for d in self.get('items')]
operations = [d.operation for d in self.get('operations')]
for operation in raw_material_operations:
if operation not in operations:
self.append('operations', {
'operation': operation
})
self.calculate_time()
def calculate_time(self):
bom_qty = frappe.db.get_value("BOM", self.bom_no, "quantity")
for d in self.get("operations"):
d.time_in_mins = flt(d.time_in_mins) / flt(bom_qty) * flt(self.qty)
self.calculate_operating_cost()
def get_holidays(self, workstation):
holiday_list = frappe.db.get_value("Workstation", workstation, "holiday_list")
holidays = {}
if holiday_list not in holidays:
holiday_list_days = [getdate(d[0]) for d in frappe.get_all("Holiday", fields=["holiday_date"],
filters={"parent": holiday_list}, order_by="holiday_date", limit_page_length=0, as_list=1)]
holidays[holiday_list] = holiday_list_days
return holidays[holiday_list]
def update_operation_status(self):
for d in self.get("operations"):
if not d.completed_qty:
d.status = "Pending"
elif flt(d.completed_qty) < flt(self.qty):
d.status = "Work in Progress"
elif flt(d.completed_qty) == flt(self.qty):
d.status = "Completed"
else:
frappe.throw(_("Completed Qty can not be greater than 'Qty to Manufacture'"))
def set_actual_dates(self):
self.actual_start_date = None
self.actual_end_date = None
if self.get("operations"):
actual_start_dates = [d.actual_start_time for d in self.get("operations") if d.actual_start_time]
if actual_start_dates:
self.actual_start_date = min(actual_start_dates)
actual_end_dates = [d.actual_end_time for d in self.get("operations") if d.actual_end_time]
if actual_end_dates:
self.actual_end_date = max(actual_end_dates)
def delete_job_card(self):
for d in frappe.get_all("Job Card", ["name"], {"work_order": self.name}):
frappe.delete_doc("Job Card", d.name)
def validate_production_item(self):
if frappe.db.get_value("Item", self.production_item, "has_variants"):
frappe.throw(_("Work Order cannot be raised against a Item Template"), ItemHasVariantError)
if self.production_item:
validate_end_of_life(self.production_item)
def validate_qty(self):
if not self.qty > 0:
frappe.throw(_("Quantity to Manufacture must be greater than 0."))
def validate_operation_time(self):
for d in self.operations:
if not d.time_in_mins > 0:
frappe.throw(_("Operation Time must be greater than 0 for Operation {0}".format(d.operation)))
def update_required_items(self):
'''
update bin reserved_qty_for_production
called from Stock Entry for production, after submit, cancel
'''
if self.docstatus==1:
# calculate transferred qty based on submitted stock entries
self.update_transaferred_qty_for_required_items()
# update in bin
self.update_reserved_qty_for_production()
# calculate consumed qty based on submitted stock entries
self.update_consumed_qty_for_required_items()
def update_reserved_qty_for_production(self, items=None):
'''update reserved_qty_for_production in bins'''
for d in self.required_items:
if d.source_warehouse:
stock_bin = get_bin(d.item_code, d.source_warehouse)
stock_bin.update_reserved_qty_for_production()
def get_items_and_operations_from_bom(self):
self.set_required_items()
self.set_work_order_operations()
return check_if_scrap_warehouse_mandatory(self.bom_no)
def set_available_qty(self):
for d in self.get("required_items"):
if d.source_warehouse:
d.available_qty_at_source_warehouse = get_latest_stock_qty(d.item_code, d.source_warehouse)
if self.wip_warehouse:
d.available_qty_at_wip_warehouse = get_latest_stock_qty(d.item_code, self.wip_warehouse)
def set_required_items(self, reset_only_qty=False):
'''set required_items for production to keep track of reserved qty'''
if not reset_only_qty:
self.required_items = []
if self.bom_no and self.qty:
item_dict = get_bom_items_as_dict(self.bom_no, self.company, qty=self.qty,
fetch_exploded = self.use_multi_level_bom)
if reset_only_qty:
for d in self.get("required_items"):
if item_dict.get(d.item_code):
d.required_qty = item_dict.get(d.item_code).get("qty")
else:
# Attribute a big number (999) to idx for sorting putpose in case idx is NULL
# For instance in BOM Explosion Item child table, the items coming from sub assembly items
for item in sorted(item_dict.values(), key=lambda d: d['idx'] or 9999):
self.append('required_items', {
'operation': item.operation,
'item_code': item.item_code,
'item_name': item.item_name,
'description': item.description,
'allow_alternative_item': item.allow_alternative_item,
'required_qty': item.qty,
'source_warehouse': item.source_warehouse or item.default_warehouse,
'include_item_in_manufacturing': item.include_item_in_manufacturing
})
self.set_available_qty()
def update_transaferred_qty_for_required_items(self):
'''update transferred qty from submitted stock entries for that item against
the work order'''
for d in self.required_items:
transferred_qty = frappe.db.sql('''select sum(qty)
from `tabStock Entry` entry, `tabStock Entry Detail` detail
where
entry.work_order = %(name)s
and entry.purpose = "Material Transfer for Manufacture"
and entry.docstatus = 1
and detail.parent = entry.name
and (detail.item_code = %(item)s or detail.original_item = %(item)s)''', {
'name': self.name,
'item': d.item_code
})[0][0]
d.db_set('transferred_qty', flt(transferred_qty), update_modified = False)
def update_consumed_qty_for_required_items(self):
'''update consumed qty from submitted stock entries for that item against
the work order'''
for d in self.required_items:
consumed_qty = frappe.db.sql('''select sum(qty)
from `tabStock Entry` entry, `tabStock Entry Detail` detail
where
entry.work_order = %(name)s
and (entry.purpose = "Material Consumption for Manufacture"
or entry.purpose = "Manufacture")
and entry.docstatus = 1
and detail.parent = entry.name
and (detail.item_code = %(item)s or detail.original_item = %(item)s)''', {
'name': self.name,
'item': d.item_code
})[0][0]
d.db_set('consumed_qty', flt(consumed_qty), update_modified = False)
def make_bom(self):
data = frappe.db.sql(""" select sed.item_code, sed.qty, sed.s_warehouse
from `tabStock Entry Detail` sed, `tabStock Entry` se
where se.name = sed.parent and se.purpose = 'Manufacture'
and (sed.t_warehouse is null or sed.t_warehouse = '') and se.docstatus = 1
and se.work_order = %s""", (self.name), as_dict=1)
bom = frappe.new_doc("BOM")
bom.item = self.production_item
bom.conversion_rate = 1
for d in data:
bom.append('items', {
'item_code': d.item_code,
'qty': d.qty,
'source_warehouse': d.s_warehouse
})
if self.operations:
bom.set('operations', self.operations)
bom.with_operations = 1
bom.set_bom_material_details()
return bom
@frappe.whitelist()
def get_item_details(item, project = None):
res = frappe.db.sql("""
select stock_uom, description
from `tabItem`
where disabled=0
and (end_of_life is null or end_of_life='0000-00-00' or end_of_life > %s)
and name=%s
""", (nowdate(), item), as_dict=1)
if not res:
return {}
res = res[0]
filters = {"item": item, "is_default": 1}
if project:
filters = {"item": item, "project": project}
res["bom_no"] = frappe.db.get_value("BOM", filters = filters)
if not res["bom_no"]:
variant_of= frappe.db.get_value("Item", item, "variant_of")
if variant_of:
res["bom_no"] = frappe.db.get_value("BOM", filters={"item": variant_of, "is_default": 1})
if not res["bom_no"]:
if project:
res = get_item_details(item)
frappe.msgprint(_("Default BOM not found for Item {0} and Project {1}").format(item, project), alert=1)
else:
frappe.throw(_("Default BOM for {0} not found").format(item))
bom_data = frappe.db.get_value('BOM', res['bom_no'],
['project', 'allow_alternative_item', 'transfer_material_against', 'item_name'], as_dict=1)
res['project'] = project or bom_data.pop("project")
res.update(bom_data)
res.update(check_if_scrap_warehouse_mandatory(res["bom_no"]))
return res
@frappe.whitelist()
def check_if_scrap_warehouse_mandatory(bom_no):
res = {"set_scrap_wh_mandatory": False }
if bom_no:
bom = frappe.get_doc("BOM", bom_no)
if len(bom.scrap_items) > 0:
res["set_scrap_wh_mandatory"] = True
return res
@frappe.whitelist()
def set_work_order_ops(name):
po = frappe.get_doc('Work Order', name)
po.set_work_order_operations()
po.save()
@frappe.whitelist()
def make_stock_entry(work_order_id, purpose, qty=None):
work_order = frappe.get_doc("Work Order", work_order_id)
if not frappe.db.get_value("Warehouse", work_order.wip_warehouse, "is_group") \
and not work_order.skip_transfer:
wip_warehouse = work_order.wip_warehouse
else:
wip_warehouse = None
stock_entry = frappe.new_doc("Stock Entry")
stock_entry.purpose = purpose
stock_entry.work_order = work_order_id
stock_entry.company = work_order.company
stock_entry.from_bom = 1
stock_entry.bom_no = work_order.bom_no
stock_entry.use_multi_level_bom = work_order.use_multi_level_bom
stock_entry.fg_completed_qty = qty or (flt(work_order.qty) - flt(work_order.produced_qty))
if work_order.bom_no:
stock_entry.inspection_required = frappe.db.get_value('BOM',
work_order.bom_no, 'inspection_required')
if purpose=="Material Transfer for Manufacture":
stock_entry.to_warehouse = wip_warehouse
stock_entry.project = work_order.project
else:
stock_entry.from_warehouse = wip_warehouse
stock_entry.to_warehouse = work_order.fg_warehouse
stock_entry.project = work_order.project
if purpose=="Manufacture":
additional_costs = get_additional_costs(work_order, fg_qty=stock_entry.fg_completed_qty)
stock_entry.set("additional_costs", additional_costs)
stock_entry.set_stock_entry_type()
stock_entry.get_items()
return stock_entry.as_dict()
@frappe.whitelist()
def get_default_warehouse():
wip_warehouse = frappe.db.get_single_value("Manufacturing Settings",
"default_wip_warehouse")
fg_warehouse = frappe.db.get_single_value("Manufacturing Settings",
"default_fg_warehouse")
return {"wip_warehouse": wip_warehouse, "fg_warehouse": fg_warehouse}
@frappe.whitelist()
def stop_unstop(work_order, status):
""" Called from client side on Stop/Unstop event"""
if not frappe.has_permission("Work Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
pro_order = frappe.get_doc("Work Order", work_order)
pro_order.update_status(status)
pro_order.update_planned_qty()
frappe.msgprint(_("Work Order has been {0}").format(status))
pro_order.notify_update()
return pro_order.status
@frappe.whitelist()
def query_sales_order(production_item):
out = frappe.db.sql_list("""
select distinct so.name from `tabSales Order` so, `tabSales Order Item` so_item
where so_item.parent=so.name and so_item.item_code=%s and so.docstatus=1
union
select distinct so.name from `tabSales Order` so, `tabPacked Item` pi_item
where pi_item.parent=so.name and pi_item.item_code=%s and so.docstatus=1
""", (production_item, production_item))
return out
@frappe.whitelist()
def make_job_card(work_order, operation, workstation, qty=0):
work_order = frappe.get_doc('Work Order', work_order)
row = get_work_order_operation_data(work_order, operation, workstation)
if row:
return create_job_card(work_order, row, qty)
def create_job_card(work_order, row, qty=0, auto_create=False):
doc = frappe.new_doc("Job Card")
doc.update({
'work_order': work_order.name,
'operation': row.operation,
'workstation': row.workstation,
'posting_date': nowdate(),
'for_quantity': qty or work_order.get('qty', 0),
'operation_id': row.name,
'bom_no': work_order.bom_no,
'project': work_order.project,
'company': work_order.company,
'wip_warehouse': work_order.wip_warehouse
})
if work_order.transfer_material_against == 'Job Card' and not work_order.skip_transfer:
doc.get_required_items()
if auto_create:
doc.flags.ignore_mandatory = True
doc.insert()
frappe.msgprint(_("Job card {0} created").format(doc.name))
return doc
def get_work_order_operation_data(work_order, operation, workstation):
for d in work_order.operations:
if d.operation == operation and d.workstation == workstation:
return d
|
brownharryb/erpnext
|
erpnext/manufacturing/doctype/work_order/work_order.py
|
Python
|
gpl-3.0
| 25,289 | 0.025031 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Core.IdentifiedObject import IdentifiedObject
class GeographicalRegion(IdentifiedObject):
"""A geographical region of a power system network model.
"""
def __init__(self, Regions=None, *args, **kw_args):
"""Initialises a new 'GeographicalRegion' instance.
@param Regions: The association is used in the naming hierarchy.
"""
self._Regions = []
self.Regions = [] if Regions is None else Regions
super(GeographicalRegion, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Regions"]
_many_refs = ["Regions"]
def getRegions(self):
"""The association is used in the naming hierarchy.
"""
return self._Regions
def setRegions(self, value):
for x in self._Regions:
x.Region = None
for y in value:
y._Region = self
self._Regions = value
Regions = property(getRegions, setRegions)
def addRegions(self, *Regions):
for obj in Regions:
obj.Region = self
def removeRegions(self, *Regions):
for obj in Regions:
obj.Region = None
|
rwl/PyCIM
|
CIM14/CPSM/Equipment/Core/GeographicalRegion.py
|
Python
|
mit
| 2,316 | 0.000864 |
import argparse
from pdnssync.database import Database
from pdnssync.parse import Parser
from pdnssync.error import get_warn, get_err
parser = Parser()
def validate():
domains = parser.get_domains()
for d in sorted(domains):
domains[d].validate(domains)
def sync(db):
all_db_domains = db.get_domains()
all_domains = parser.get_domains()
list_domains = all_domains.keys()
list_db_domains = all_db_domains.keys()
create_list = list(set(list_domains) - set(list_db_domains))
delete_list = list(set(list_db_domains) - set(list_domains))
db.create_domains(create_list)
db.delete_domains(delete_list)
for i in sorted(list_domains):
d = all_domains[i]
d.sync_domain(db)
def export(db):
all_db_domain = db.get_domains()
for d in all_db_domain:
print('# %s' % d)
records = db.get_records(d)
soa = records[(d, 'SOA')][0].data.split(' ')
print('D %s %s %s' % (d, soa[0], soa[1]))
if (d, 'NS') in records:
ns = records[(d, 'NS')]
ns_names = []
for i in ns:
ns_names.append(i.data)
print('N %s' % ' '.join(ns_names))
if (d, 'MX') in records:
mx = records[(d, 'MX')]
mx_names = []
for i in mx:
mx_names.append("%s %s" % (i.prio, i.data))
print('M %s' % ' '.join(mx_names))
for i in records:
if i[1] == 'A':
for j in records[i]:
print('%s %s' % (j.data, i[0]))
if i[1] == 'AAAA':
for j in records[i]:
print('%s %s' % (j.data, i[0]))
if i[1] == 'CNAME':
for j in records[i]:
print('C %s %s' % (i[0], j.data))
if i[1] == 'SRV':
for j in records[i]:
print('S %s %s %s' % (i[0], j.prio, j.data))
if i[1] == 'TXT':
for j in records[i]:
print('X %s %s' % (i[0], j.data))
print()
def do_sync():
aparser = argparse.ArgumentParser()
aparser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
aparser.add_argument("-w", "--werror", action="store_true", help="also break on warnings")
aparser.add_argument('files', metavar='file', nargs='+', help='the files to parse')
args = aparser.parse_args()
for fname in args.files:
parser.parse(fname)
parser.assign()
validate()
err = get_err()
warn = get_warn()
print('%d error(s) and %d warning(s)' % (err, warn))
if err == 0 and (not args.werror or warn == 0):
db = Database()
sync(db)
else:
print('Errors found, not syncing')
def do_export():
db = Database()
export(db)
|
Quiphius/pdns-sync
|
pdnssync/main.py
|
Python
|
mit
| 2,853 | 0.001052 |
from __future__ import absolute_import
from .base import *
from bundle_config import config
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config['postgres']['database'],
'USER': config['postgres']['username'],
'PASSWORD': config['postgres']['password'],
'HOST': config['postgres']['host'],
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{host}:{port}'.format(
host=config['redis']['host'],
port=config['redis']['port']),
'OPTIONS': {
'PASSWORD': config['redis']['password'],
},
'VERSION': config['core']['version'],
},
}
DEBUG = False
|
almet/whiskerboard
|
settings/epio.py
|
Python
|
mit
| 751 | 0.001332 |
import numpy as np
import canal as canal
from .util import NumpyTestCase
class FromJSONTestCase(NumpyTestCase):
class Measurement(canal.Measurement):
int_field = canal.IntegerField()
alternate_db_name = canal.IntegerField(db_name="something_else")
float_field = canal.FloatField()
bool_field = canal.BooleanField()
string_field = canal.StringField()
tag_1 = canal.Tag()
tag_2 = canal.Tag()
def test_from_json_iso_time(self):
test_data = 5*[
[
"2015-01-29T21:55:43.702900257Z",
1,
2,
1.2,
True,
"some content",
"1",
"2"
],
[
"2015-01-29T21:55:43.702900345Z",
2,
3,
2.3,
False,
"some other content",
"1",
"2"
]
]
json_data = dict(
results=[dict(
series=[dict(
name="Measurement",
columns=[
"time",
"int_field",
"something_else",
"float_field",
"bool_field",
"string_field",
"tag_1",
"tag_2"
],
values=test_data
)]
)]
)
test_series = self.Measurement.from_json(json_data)
self.assertndArrayEqual(
test_series.time,
np.array(
5*[
"2015-01-29T21:55:43.702900257Z",
"2015-01-29T21:55:43.702900345Z"
],
dtype='datetime64'
)
)
self.assertndArrayEqual(
test_series.int_field,
np.array(5*[1, 2])
)
self.assertndArrayEqual(
test_series.alternate_db_name,
np.array(5*[2, 3])
)
self.assertndArrayEqual(
test_series.float_field,
np.array(5*[1.2, 2.3])
)
self.assertndArrayEqual(
test_series.bool_field,
np.array(5*[True, False])
)
self.assertndArrayEqual(
test_series.string_field,
np.array(5*["some content", "some other content"])
)
self.assertndArrayEqual(
test_series.tag_1,
np.array(10*["1"])
)
self.assertndArrayEqual(
test_series.tag_2,
np.array(10*["2"])
)
def test_from_json_bad_input(self):
with self.assertRaises(ValueError):
list(self.Measurement.from_json({"bad": "input"}))
def test_empty_json(self):
content = dict()
with self.assertRaises(ValueError):
self.Measurement.from_json(content)
def test_from_json_wrong_measurement(self):
test_json = dict(
results=[dict(
series=[dict(
name="SomeOtherMeasurement",
columns=[
"time",
"int_field",
"float_field",
"bool_field",
"string_field",
"tag_1",
"tag_2"
],
values=[]
)]
)]
)
with self.assertRaises(ValueError):
self.Measurement.from_json(test_json)
|
dan-stone/canal
|
canal/tests/test_from_json.py
|
Python
|
mit
| 3,665 | 0 |
import os
import sys
def main(args):
if len(args) != 2:
print("Usage: python project-diff.py [path-to-project-1] [path-to-project-2]")
return
dir1 = args[0]
dir2 = args[1]
project1 = collect_text_files(dir1)
project2 = collect_text_files(dir2)
files_only_in_1 = []
files_only_in_2 = []
files_in_both = []
perform_venn_analysis(set(project1.keys()), set(project2.keys()), files_only_in_1, files_only_in_2, files_in_both)
if len(files_only_in_1) > 0:
print("The following files are only in Project 1:")
for file in files_only_in_1:
print(" " + file)
print("")
if len(files_only_in_2) > 0:
print("The following files are only in Project 2:")
for file in files_only_in_2:
print(" " + file)
print("")
print(str(len(files_in_both)) + " files in both projects.")
print("")
files_in_both.sort()
files_with_diffs = []
for file in files_in_both:
text_1 = project1[file]
text_2 = project2[file]
diff = perform_diff(text_1, text_2)
if len(diff) > 0:
files_with_diffs.append(file)
print("There's a difference in " + file)
print("\n".join(diff))
print("")
if len(files_with_diffs) == 0:
print("No files with text differences.")
else:
print("Diffs were in the following files:")
print("\n".join(files_with_diffs))
print("")
def perform_venn_analysis(set_a, set_b, only_in_a_out, only_in_b_out, in_both_out):
for item in set_a:
if item not in set_b:
only_in_a_out.append(item)
else:
in_both_out.append(item)
for item in set_b:
if item not in set_a:
only_in_b_out.append(item)
def collect_text_files(root):
output = {}
root = root.replace('\\', '/')
if root.endswith('/'):
root = root[:-1]
collect_text_files_impl(root, '', output)
return output
def get_file_extension(file):
if '.' in file:
return file.split('.')[-1].lower()
return ''
FILE_EXTENSION_IGNORE_LIST = set([
'png', 'jpg',
'xcuserstate',
])
def is_text_file(path):
ext = get_file_extension(path)
return ext not in FILE_EXTENSION_IGNORE_LIST
def collect_text_files_impl(root, current_dir, output):
full_dir = root
if current_dir != '':
full_dir += '/' + current_dir
for file in os.listdir(full_dir.replace('/', os.sep)):
full_file = full_dir + '/' + file
if os.path.isdir(full_file.replace('/', os.sep)):
next_cd = file if current_dir == '' else (current_dir + '/' + file)
collect_text_files_impl(root, next_cd, output)
else:
rel_file = file if current_dir == '' else (current_dir + '/' + file)
if is_text_file(rel_file):
c = open(full_file.replace('/', os.sep), 'rt')
text = c.read()
c.close()
output[rel_file] = text
else:
output[rel_file] = '\n'.join([
"Binary file:",
"size X", # TODO: get file size
"first 20 bytes: ...", # TODO: this
"last 20 bytes: ...", # TODO: do this as well
])
def perform_diff(text_1, text_2):
if text_1 == text_2:
return []
lines_1 = text_1.split('\n')
lines_2 = text_2.split('\n')
trimmed_front = 0
trimmed_back = 0
# Remove identical lines at the beginning and end of the file
while len(lines_1) > trimmed_front and len(lines_2) > trimmed_front and lines_1[trimmed_front] == lines_2[trimmed_front]:
trimmed_front += 1
lines_1 = lines_1[trimmed_front:]
lines_2 = lines_2[trimmed_front:]
while len(lines_1) > trimmed_back and len(lines_2) > trimmed_back and lines_1[-1 - trimmed_back] == lines_2[-1 - trimmed_back]:
trimmed_back += 1
lines_1 = lines_1[:-trimmed_back]
lines_2 = lines_2[:-trimmed_back]
length_1 = len(lines_1)
length_2 = len(lines_2)
grid = []
for x in range(length_2 + 1):
column = []
for y in range(length_1 + 1):
column.append(None)
grid.append(column)
# Perform levenshtein difference
# each grid cell will consist of a tuple: (diff-size, previous-path: up|left|diag)
# Each step to the right indicates taking a line from lines 2
# Each step downwards indicates taking a line from lines 1
# Prepopulate the left and top rows indicating starting the diff by removing all
# lines from lines 1 and adding all lines from lines 2.
for x in range(length_2 + 1):
grid[x][0] = (x, 'left')
for y in range(length_1 + 1):
grid[0][y] = (y, 'up')
grid[0][0] = (0, 'diag')
# Populate the grid. Figure out the minimum diff to get to each point.
for y in range(1, length_1 + 1):
for x in range(1, length_2 + 1):
if lines_1[y - 1] == lines_2[x - 1]:
grid[x][y] = (grid[x - 1][y - 1][0], 'diag')
elif (grid[x - 1][y][0] <= grid[x][y - 1][0]):
grid[x][y] = (grid[x - 1][y][0] + 1, 'left')
else:
grid[x][y] = (grid[x][y - 1][0] + 1, 'up')
# Start from the bottom right corner and walk backwards to the origin
x = length_2
y = length_1
diff_chain = []
ellipsis_used = False
while x != 0 and y != 0:
node = grid[x][y]
if node[1] == 'diag':
if not ellipsis_used:
diff_chain.append('...')
ellipsis_used = True
x -= 1
y -= 1
elif node[1] == 'left':
diff_chain.append('+ [' + str(trimmed_front + x) + '] ' + lines_2[x - 1])
x -= 1
ellipsis_used = False
else:
diff_chain.append('- [' + str(trimmed_front + y) + '] ' + lines_1[y - 1])
y -= 1
ellipsis_used = False
diff_chain.reverse()
return diff_chain
main(sys.argv[1:])
|
blakeohare/crayon
|
Scripts/project-diff.py
|
Python
|
mit
| 5,511 | 0.020323 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import os
import json
from weblab.util import data_filename
from voodoo.gen.caller_checker import caller_check
from voodoo.log import logged
from voodoo.override import Override
import experiments.ud_xilinx.server as UdXilinxExperiment
import weblab.data.server_type as ServerType
import weblab.experiment.util as ExperimentUtil
module_directory = os.path.join(*__name__.split('.')[:-1])
class UdDemoXilinxExperiment(UdXilinxExperiment.UdXilinxExperiment):
FILES = {
'PLD' : 'cpld.jed',
'FPGA' : 'fpga.bit',
}
def __init__(self, coord_address, locator, cfg_manager, *args, **kwargs):
super(UdDemoXilinxExperiment,self).__init__(coord_address, locator, cfg_manager, *args, **kwargs)
file_path = data_filename(os.path.join(module_directory, self.FILES[self._xilinx_device]))
self.file_content = ExperimentUtil.serialize(open(file_path, "rb").read())
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info")
def do_start_experiment(self, *args, **kwargs):
"""
Handles experiment startup, returning certain initial configuration parameters.
(Thus makes use of the API version 2).
"""
super(UdDemoXilinxExperiment, self).do_send_file_to_device(self.file_content, "program")
return json.dumps({ "initial_configuration" : """{ "webcam" : "%s", "expected_programming_time" : %s }""" % (self.webcam_url, self._programmer_time), "batch" : False })
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info")
def do_dispose(self):
super(UdDemoXilinxExperiment, self).do_dispose()
return "ok"
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info",except_for='file_content')
def do_send_file_to_device(self, file_content, file_info):
return "sending file not possible in demo"
@logged("info")
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
def do_send_command_to_device(self, command):
return super(UdDemoXilinxExperiment, self).do_send_command_to_device(command)
|
zstars/weblabdeusto
|
server/src/experiments/ud_demo_xilinx/server.py
|
Python
|
bsd-2-clause
| 2,623 | 0.006484 |
__author__ = 'roman'
from django.utils.functional import SimpleLazyObject
from . import get_card as _get_card
def get_card(request):
if not hasattr(request, '_cached_card'):
request._cached_card = _get_card(request)
return request._cached_card
class CardAuthMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Card authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'card.middleware.CardAuthMiddleware'."
)
request.card = SimpleLazyObject(lambda: get_card(request))
|
suprotkin/atm
|
atm/card/middleware.py
|
Python
|
gpl-2.0
| 751 | 0.001332 |
# Step 1: Make all the "turtle" commands available to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
SimonDevon/simple-python-shapes
|
square1.py
|
Python
|
mit
| 236 | 0 |
#!/usr/bin/env python3
import os
import shutil
import subprocess
import gettext
version = '4.4.0'
builds = [
{ 'language': 'de', 'paper': 'a4paper', 'babel': 'ngerman' },
{ 'language': 'en', 'paper': 'letterpaper', 'babel': 'USenglish' },
{ 'language': 'es', 'paper': 'a4paper', 'babel': 'spanish' },
{ 'language': 'fr', 'paper': 'a4paper', 'babel': 'french' },
{ 'language': 'hu', 'paper': 'a4paper', 'babel': 'magyar' },
{ 'language': 'it', 'paper': 'a4paper', 'babel': 'italian' },
{ 'language': 'sl', 'paper': 'a4paper', 'babel': 'slovene' },
{ 'language': 'uk', 'paper': 'a4paper', 'babel': 'ukrainian' },
]
for i in builds:
for manual in [ 'admin', 'user' ]:
language = i['language']
print( 'Building for language "%s"' % ( language ) )
subprocess.Popen( ['msgfmt', 'locale/%s/LC_MESSAGES/%s.po' % ( language, manual ), '-o',
'locale/%s/LC_MESSAGES/%s.mo' % ( language, manual ) ] ).wait()
env = os.environ.copy()
with open('%s/index.rst' % (manual)) as f:
title = f.readline().rstrip()
title = gettext.translation(manual, 'locale', [language], None, True).gettext(title)
env['TITLE'] = title;
env['LANGUAGE'] = language
env['PAPER'] = i['paper']
env['INDEX'] = '%s/index' % ( manual )
env['BABEL'] = i['babel']
env['VERSION'] = version
env['SPHINXOPTS'] = '-j%s' % ( os.cpu_count()+1 )
shutil.rmtree('_build', True)
subprocess.Popen( ['make', 'latexpdf' ], env=env ).wait()
shutil.copyfile('_build/latex/veyon.pdf', 'veyon-%s-manual-%s_%s.pdf' % ( manual, language, version ))
|
iTALC/documentation
|
build-manuals.py
|
Python
|
gpl-3.0
| 1,538 | 0.046164 |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_backup, 'volume2-backup1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
[TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot9'],
[TestAction.clone_vm, 'vm1', 'vm3', 'full'],
[TestAction.delete_volume_snapshot, 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.change_vm_image, 'vm2'],
[TestAction.delete_vm_snapshot, 'vm2-snapshot9'],
])
'''
The final status:
Running:['vm1', 'vm3']
Stopped:['vm2']
Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume2-backup1']
attached:['volume1', 'volume2', 'volume3', 'clone@volume1', 'clone@volume2', 'clone@volume3']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'vm2-snapshot9']
Expunged:[]
Ha:[]
Group:
'''
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/xc_path8.py
|
Python
|
apache-2.0
| 1,642 | 0.017052 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask import Blueprint
blueprint = Blueprint('first', __name__, url_prefix='/',
template_folder='templates', static_folder='static')
|
MSusik/invenio
|
invenio/testsuite/test_apps/first/views.py
|
Python
|
gpl-2.0
| 951 | 0.01367 |
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class STARInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing STAR 2.4.0g1 on %s" % (node.alias))
node.ssh.execute('wget -c -P /opt/software/star https://github.com/alexdobin/STAR/archive/STAR_2.4.0g1.tar.gz')
node.ssh.execute('tar -xzf /opt/software/star/STAR_2.4.0g1.tar.gz -C /opt/software/star')
node.ssh.execute('make STAR -C /opt/software/star/STAR-STAR_2.4.0g1/source')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/star/;touch /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo "set root /opt/software/star/STAR-STAR_2.4.0g1" >> /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root/bin/Linux_x86_64" >> /usr/local/Modules/applications/star/2.4.0g1')
|
meissnert/StarCluster-Plugins
|
STAR_2_4_0g1.py
|
Python
|
mit
| 1,005 | 0.018905 |
#! /bin/python2
import numpy
import cv2
import os
import struct
BLACK = (0,)
WHITE = (255,)
DIR_OUT = "./img/"
SIZE_CANVAS = 50
SIZE_FEATURE = 28
SIZE_BLOCK = 32
DIGITS = tuple([chr(ord("0") + i) for i in range(10)] + [""])
FONTS = (cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN,
cv2.FONT_HERSHEY_DUPLEX, cv2.FONT_HERSHEY_COMPLEX,
cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
def clear_path():
if not os.path.isdir(DIR_OUT):
os.mkdir(DIR_OUT)
def get_tf(angle, center, offset):
a_radian = numpy.radians(angle)
c = numpy.cos(a_radian)
s = numpy.sin(a_radian)
tl = numpy.matrix([[1.0, 0.0, -center[0]], [0.0, 1.0, -center[1]], [0.0, 0.0, 1.0]])
rot = numpy.matrix([[c, -s, 0.0 ], [s, c, 0.0], [0.0, 0.0, 1.0]])
retl = numpy.matrix([[1.0, 0.0, (center[0] + offset[0])], [0.0, 1.0, (center[1] + offset[1])], [0.0, 0.0, 1.0]])
return retl * rot * tl
os.system("rm -rf " + DIR_OUT + "*")
def create_dataset(fn_f, fn_l, num_sample):
fl = open(fn_l, "wb")
ff = open(fn_f, "wb")
# headers
fl.write(struct.pack(">i", 2049))
fl.write(struct.pack(">i", num_sample))
ff.write(struct.pack(">i", 2051))
ff.write(struct.pack(">i", num_sample))
ff.write(struct.pack(">i", SIZE_FEATURE))
ff.write(struct.pack(">i", SIZE_FEATURE))
canvas = numpy.ones((SIZE_CANVAS, SIZE_CANVAS), dtype = numpy.uint8) * 255
# cv2.imwrite(dir_img + "canvas.png", canvas)
for id_img in range(num_sample):
copy = numpy.copy(canvas)
id_digit = numpy.random.randint(0, len(DIGITS))
id_font = numpy.random.randint(0, len(FONTS))
thickness = numpy.random.randint(1, 3)
base_line = cv2.getTextSize(DIGITS[id_digit], FONTS[id_font], 1.0, thickness)[1] + 1
scale_font = float(numpy.random.randint(40, 60)) / 100.0
scale = float(SIZE_BLOCK) * 0.5 * scale_font / float(base_line)
shift = float(SIZE_CANVAS) / 2.0 - float(SIZE_BLOCK) * 0.5 * scale_font
cv2.putText(copy, DIGITS[id_digit], (0, 2 * base_line + 1),
FONTS[id_font], 1.0, BLACK, thickness)
copy = cv2.warpAffine(copy, numpy.matrix([[scale, 0.0, shift], [0.0, scale, shift]]),
copy.shape, borderValue = WHITE)
# draw lines
thickness_line = numpy.random.randint(1, 3)
cv2.line(copy, (0, (SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line),
(SIZE_CANVAS - 1, (SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line),
BLACK, thickness_line)
cv2.line(copy, (0, (SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line),
(SIZE_CANVAS - 1, (SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line),
BLACK, thickness_line)
cv2.line(copy, ((SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line, 0),
((SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line, SIZE_CANVAS - 1),
BLACK, thickness_line)
cv2.line(copy, ((SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line, 0),
((SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line, SIZE_CANVAS - 1),
BLACK, thickness_line)
# rotation
copy = cv2.warpAffine(copy, get_tf(float(numpy.random.randint(-10,11)), (float(SIZE_CANVAS) / 2.0, float(SIZE_CANVAS) / 2.0),
(numpy.random.randint(-3, 4), numpy.random.randint(-3, 4)))[0:2, :],
copy.shape, borderValue = WHITE)
copy = copy[(SIZE_CANVAS - SIZE_FEATURE) / 2:(SIZE_CANVAS + SIZE_FEATURE) / 2,
(SIZE_CANVAS - SIZE_FEATURE) / 2:(SIZE_CANVAS + SIZE_FEATURE) / 2]
# cv2.imwrite(DIR_OUT + "{}.png".format(id_img), copy)
copy[copy < 192] = 0
copy[copy >= 192] = 255
copy = copy.astype(numpy.uint8)
ff.write(copy.data)
fl.write(numpy.uint8(id_digit))
if id_img % 1000 == 0:
print id_img, num_sample
fl.close()
ff.close()
create_dataset(DIR_OUT + "printed_feature_train", DIR_OUT + "printed_label_train", 100000)
print "training data complete"
create_dataset(DIR_OUT + "printed_feature_valid", DIR_OUT + "printed_label_valid", 10000)
print "test data complete"
|
t-lou/JSudokuSolver
|
digit_generator/digit_gen.py
|
Python
|
apache-2.0
| 4,207 | 0.013311 |
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers, serializers, viewsets
from .views import HomePageView
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^admin/', admin.site.urls),
url(r'^', include('slackdata.urls')),
]
|
jbarciauskas/slack-stats
|
openedxstats/urls.py
|
Python
|
bsd-3-clause
| 319 | 0 |
from django.apps import AppConfig
class BugReportsConfig(AppConfig):
name = "bug_reports"
|
DemocracyClub/UK-Polling-Stations
|
polling_stations/apps/bug_reports/apps.py
|
Python
|
bsd-3-clause
| 96 | 0 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fields
class oehealth_patient_medication(osv.Model):
_name = 'oehealth.patient.medication'
_columns = {
'patient_id': fields.many2one('oehealth.patient', string='Patient',),
#'doctor': fields.many2one('oehealth.physician', string='Physician',
# help='Physician who prescribed the medicament'),
'adverse_reaction': fields.text(string='Adverse Reactions',
help='Side effects or adverse reactions that the patient experienced'),
'notes': fields.text(string='Extra Info'),
'is_active': fields.boolean(string='Active',
help='Check if the patient is currently taking the medication'),
'course_completed': fields.boolean(string='Course Completed'),
'template': fields.many2one('oehealth.medication.template',
string='Medication Template', ),
'discontinued_reason': fields.char(size=256,
string='Reason for discontinuation',
help='Short description for discontinuing the treatment'),
'discontinued': fields.boolean(string='Discontinued'),
}
oehealth_patient_medication()
|
CLVsol/oehealth
|
oehealth_prescription/oehealth_patient_medication.py
|
Python
|
agpl-3.0
| 2,721 | 0.009555 |
import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from perf.errsim import *
def plot_x_vs_pmf(params, show=True, fpath=None):
def plot(ax, x, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(x, pmf[x], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-1)
ax.set_ylabel('PMF, $p_X(x)$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols, $x$')
ax.set_title('Symbol Error PMF (Prob. of x errors in n digits)')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_x_vs_pndc(params, show=True, fpath=None):
def plot(ax, x, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
pndc = prob_ndc(pmf)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(x, pndc[x], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-1)
ax.set_ylabel('$P_{ndc}(t)$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols, $x$')
ax.set_title('Probability of not-decoding-correctly')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_t_vs_ober(params, show=True, fpath=None):
def plot(ax, t, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(t, ober[t], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols corrected, $t$')
ax.set_title('Number of Symbols Corrected vs. Output BER')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_r_vs_ober(params, show=True, fpath=None):
def plot(axes, t, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
n = param['n']
frac_t = 100 * t / n
k = n - 2 * t
r = k / n
axes[0].plot(frac_t, ober[t], **plotargs)
axes[1].plot(r, ober[t], **plotargs)
plt.close('all')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=plt.figaspect(1/2))
t = np.arange(16)
for param in params:
plot(axes, t, param.copy(), lw=1.5)
for ax in axes:
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
axes[0].set_xlim(0, 10)
axes[0].set_xlabel('Fraction of Symbols corrected, $t/n$ [%]')
axes[0].set_title('Fraction of Symbols corrected vs. Output BER')
axes[0].legend(loc='upper right', fontsize=12)
axes[1].set_xlim(0.8, 1.0)
axes[1].set_xlabel('Coding Rate, $R = k/n = (n - 2t)/n$')
axes[1].set_title('Coding Rate vs. Output BER')
axes[1].legend(loc='upper left', fontsize=12)
plt.tight_layout()
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_pe_vs_ober(params, show=True, fpath=None):
def plot(ax, pe, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(pe, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
pe = 10.0 ** np.arange(-15, -0.5, 0.5)
for param in params:
plot(ax, pe, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(pe[0], pe[-1])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('Input BER, $BER_i$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Input vs. Output BER')
ax.legend(loc='upper left', fontsize=12)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_ebn0_vs_ober(params, show=True, fpath=None):
def plot(ax, ebn0, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
n = param['n']
t = param['t']
R = (n - 2 * t)/n
esn0 = ebn0 + dB(R)
pe = esn02pe(esn0)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(ebn0, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
ebn0 = np.arange(5, 20.5, 0.5)
# Uncoded (FEC input) for reference
pe = esn02pe(ebn0)
iber = ber_in(pe=pe, pb=0.5)
ax.plot(ebn0, pe, lw=1.5, color='black', label='Uncoded BSC')
ax.plot(ebn0, iber, lw=1.5, color='black', linestyle='dashed',
label='Uncoded GBMM(pb=0.5)')
for param in params:
plot(ax, ebn0, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_yscale('log')
ax.set_xlim(ebn0[0], ebn0[-1])
ax.set_xticks(ebn0[::2])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('$E_b/N_0 [dB]$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Eb/N0 vs. Output BER')
ax.legend(fontsize=10)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
if __name__ == '__main__':
argp = argparse.ArgumentParser(description='Create code performance plots.')
argp.add_argument('dir', metavar='DIR', help='plots directory')
argp.add_argument('--no-show', dest='show', action='store_false',
help='Don\'t show, just save to file.')
argns = argp.parse_args()
dirpath = os.path.abspath(argns.dir)
os.makedirs(dirpath, exist_ok=True)
# pe vs ober
params = [
# GBMM
dict(pb=0.5, m=8, n=124, t=4),
dict(pb=0.5, m=8, n=124, t=6),
dict(pb=0.5, m=8, n=124, t=8),
dict(pb=0.5, m=8, n=248, t=4),
dict(pb=0.5, m=8, n=248, t=6),
dict(pb=0.5, m=8, n=248, t=8),
dict(pb=0.5, m=10, n=528, t=7),
# BSC
dict(pb=None, m=8, n=124, t=4),
dict(pb=None, m=8, n=248, t=4)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, 'pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, 'ebn0-vs-ober.png'))
params = [
# GBMM
dict(pb=0.5, m=8, n=240//8, t=1),
dict(pb=0.5, m=8, n=240//8, t=2),
dict(pb=0.5, m=8, n=240//8, t=3),
# BSC
dict(pb=None, m=8, n=240//8, t=1),
dict(pb=None, m=8, n=240//8, t=2),
dict(pb=None, m=8, n=240//8, t=3)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-ebn0-vs-ober.png'))
params = [
# GBMM
dict(pb=0.5, m=8, n=120//8, t=1),
dict(pb=0.5, m=8, n=120//8, t=2),
dict(pb=0.5, m=8, n=120//8, t=3),
# BSC
dict(pb=None, m=8, n=120//8, t=1),
dict(pb=None, m=8, n=120//8, t=2),
dict(pb=None, m=8, n=120//8, t=3)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-ebn0-vs-ober.png'))
#sys.exit()
# Short codes
params = [
# GBMM
dict(pe=1e-12, pb=0.5, m=5, n=240//5),
dict(pe=1e-12, pb=0.5, m=8, n=240//8),
# BSC
dict(pe=1e-12, pb=None, m=5, n=240//5),
dict(pe=1e-12, pb=None, m=8, n=240//8)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, '240bits-x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, '240bits-x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-t-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-r-vs-ober.png'))
# Very short codes
params = [
# GBMM
dict(pe=1e-12, pb=0.5, m=5, n=120//5),
dict(pe=1e-12, pb=0.5, m=8, n=120//8),
# BSC
dict(pe=1e-12, pb=None, m=5, n=120//5),
dict(pe=1e-12, pb=None, m=8, n=120//8)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, '120bits-x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, '120bits-x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-t-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-r-vs-ober.png'))
# Practical codes
params = [
# GBMM
dict(pe=1e-6, pb=0.5, m=8, n=124),
dict(pe=1e-6, pb=0.5, m=8, n=248),
dict(pe=1e-6, pb=0.5, m=10, n=264),
dict(pe=1e-6, pb=0.5, m=10, n=528),
# BSC
dict(pe=1e-6, pb=None, m=8, n=124),
dict(pe=1e-6, pb=None, m=8, n=248)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, 'x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, 'x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, 't-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, 'r-vs-ober.png'))
|
r-rathi/error-control-coding
|
perf/perf_plots.py
|
Python
|
mit
| 11,256 | 0.002221 |
from __future__ import print_function
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
class ImageMenu(Gtk.EventBox):
def __init__ (self, image, child):
GObject.GObject.__init__(self)
self.add(image)
self.subwindow = Gtk.Window()
self.subwindow.set_decorated(False)
self.subwindow.set_resizable(False)
self.subwindow.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.subwindow.add(child)
self.subwindow.connect_after("draw", self.__sub_onExpose)
self.subwindow.connect("button_press_event", self.__sub_onPress)
self.subwindow.connect("motion_notify_event", self.__sub_onMotion)
self.subwindow.connect("leave_notify_event", self.__sub_onMotion)
self.subwindow.connect("delete-event", self.__sub_onDelete)
self.subwindow.connect("focus-out-event", self.__sub_onFocusOut)
child.show_all()
self.setOpen(False)
self.connect("button_press_event", self.__onPress)
def setOpen (self, isopen):
self.isopen = isopen
if isopen:
topwindow = self.get_parent()
while not isinstance(topwindow, Gtk.Window):
topwindow = topwindow.get_parent()
x, y = topwindow.get_window().get_position()
x += self.get_allocation().x + self.get_allocation().width
y += self.get_allocation().y
self.subwindow.move(x, y)
self.subwindow.props.visible = isopen
self.set_state(self.isopen and Gtk.StateType.SELECTED or Gtk.StateType.NORMAL)
def __onPress (self, self_, event):
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
self.setOpen(not self.isopen)
def __sub_setGrabbed (self, grabbed):
if grabbed and not Gdk.pointer_is_grabbed():
Gdk.pointer_grab(self.subwindow.get_window(), True,
Gdk.EventMask.LEAVE_NOTIFY_MASK|
Gdk.EventMask.POINTER_MOTION_MASK|
Gdk.EventMask.BUTTON_PRESS_MASK,
None, None, Gdk.CURRENT_TIME)
Gdk.keyboard_grab(self.subwindow.get_window(), True, Gdk.CURRENT_TIME)
elif Gdk.pointer_is_grabbed():
Gdk.pointer_ungrab(Gdk.CURRENT_TIME)
Gdk.keyboard_ungrab(Gdk.CURRENT_TIME)
def __sub_onMotion (self, subwindow, event):
a = subwindow.get_allocation()
self.__sub_setGrabbed(not (0 <= event.x < a.width and 0 <= event.y < a.height))
def __sub_onPress (self, subwindow, event):
a = subwindow.get_allocation()
if not (0 <= event.x < a.width and 0 <= event.y < a.height):
Gdk.pointer_ungrab(event.time)
self.setOpen(False)
def __sub_onExpose (self, subwindow, ctx):
a = subwindow.get_allocation()
context = subwindow.get_window().cairo_create()
context.set_line_width(2)
context.rectangle (a.x, a.y, a.width, a.height)
sc = self.get_style_context()
found, color = sc.lookup_color("p_dark_color")
context.set_source_rgba(*color)
context.stroke()
self.__sub_setGrabbed(self.isopen)
def __sub_onDelete (self, subwindow, event):
self.setOpen(False)
return True
def __sub_onFocusOut (self, subwindow, event):
self.setOpen(False)
def switchWithImage (image, dialog):
parent = image.get_parent()
parent.remove(image)
imageMenu = ImageMenu(image, dialog)
parent.add(imageMenu)
imageMenu.show()
if __name__ == "__main__":
win = Gtk.Window()
vbox = Gtk.VBox()
vbox.add(Gtk.Label(label="Her er der en kat"))
image = Gtk.Image.new_from_icon_name("gtk-properties", Gtk.IconSize.BUTTON)
vbox.add(image)
vbox.add(Gtk.Label(label="Her er der ikke en kat"))
win.add(vbox)
table = Gtk.Table(2, 2)
table.attach(Gtk.Label(label="Minutes:"), 0, 1, 0, 1)
spin1 = Gtk.SpinButton(Gtk.Adjustment(0,0,100,1))
table.attach(spin1, 1, 2, 0, 1)
table.attach(Gtk.Label(label="Gain:"), 0, 1, 1, 2)
spin2 = Gtk.SpinButton(Gtk.Adjustment(0,0,100,1))
table.attach(spin2, 1, 2, 1, 2)
table.set_border_width(6)
switchWithImage(image, table)
def onValueChanged (spin):
print(spin.get_value())
spin1.connect("value-changed", onValueChanged)
spin2.connect("value-changed", onValueChanged)
win.show_all()
win.connect("delete-event", Gtk.main_quit)
Gtk.main()
|
rajrakeshdr/pychess
|
lib/pychess/widgets/ImageMenu.py
|
Python
|
gpl-3.0
| 4,643 | 0.0112 |
# -*- coding: utf-8 -*-
"""
test_sphinx
~~~~~~~~~~~
General Sphinx test and check output.
"""
import sys
import pytest
import sphinx
from ipypublish.sphinx.tests import get_test_source_dir
from ipypublish.tests.utils import HTML2JSONParser
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_basic"))
def test_basic(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_basic_v2")
else:
data_regression.check(parser.parsed, basename="test_basic_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_sortkeys"))
def test_sortkeys(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_sortkeys_v2")
else:
data_regression.check(parser.parsed, basename="test_sortkeys_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_unsorted"))
def test_unsorted(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_unsorted_v2")
else:
data_regression.check(parser.parsed, basename="test_unsorted_v1")
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_missingref")
)
def test_missingref(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
if (
"could not relabel bibglossary reference [missingkey]" not in warnings
and "WARNING: citation not found: missingkey" not in warnings # sphinx < 2
): # sphinx >= 2
raise AssertionError(
"should raise warning for missing citation `missingkey`: {}".format(
warnings
)
)
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_duplicatekey")
)
def test_duplicatekey(app, status, warning, get_sphinx_app_output):
with pytest.raises(KeyError):
app.build()
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="SyntaxError on import of texsoup/data.py line 135",
)
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_tex"))
def test_load_tex(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
|
chrisjsewell/ipypublish
|
ipypublish/sphinx/tests/test_bibgloss.py
|
Python
|
bsd-3-clause
| 3,453 | 0.002317 |
#! /usr/bin/env python
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Carinhas
# Copyright 2013-2014 Carlo Oliveira <carlo@nce.ufrj.br>,
# `Labase <http://labase.selfip.org/>`__; `GPL <http://is.gd/3Udt>`__.
#
# Carinhas é um software livre; você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
############################################################
SuperPython - Teste de Funcionalidade Web
############################################################
Verifica a funcionalidade do servidor web.
"""
__author__ = 'carlo'
import unittest
import sys
import bottle
import os
import sys
import os
project_server = os.path.dirname(os.path.abspath(__file__))
project_server = os.path.join(project_server, '../src/')
# print(project_server)
sys.path.insert(0, project_server)
# make sure the default templates directory is known to Bottle
templates_dir = os.path.join(project_server, 'server/views/')
# print(templates_dir)
if templates_dir not in bottle.TEMPLATE_PATH:
bottle.TEMPLATE_PATH.insert(0, templates_dir)
if sys.version_info[0] == 2:
from mock import MagicMock, patch
else:
from unittest.mock import MagicMock, patch, ANY
from webtest import TestApp
from server.control import application as appbottle
import server.modelo_redis as cs
import server.control as ct
class FunctionalWebTest(unittest.TestCase):
def setUp(self):
cs.DBF = '/tmp/redis_test.db'
pass
def test_default_page(self):
""" test_default_page """
app = TestApp(appbottle)
response = app.get('/static/index.html')
self.assertEqual('200 OK', response.status)
self.assertTrue('<title>Jogo Eica - Cadastro</title>' in response.text, response.text[:1000])
def test_default_redirect(self):
"""test_default_redirect """
app = TestApp(appbottle)
response = app.get('/')
self.assertEqual('302 Found', response.status)
def test_register(self):
"""test_register """
# app = TestApp(appbottle)
# response = app.get('/static/register?doc_id="10000001"&module=projeto2222')
rec_id, response = self._get_id('3333')
self.assertEqual('200 OK', response.status)
self.assertTrue(rec_id in response, str(response))
# rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071'
expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015',"
received_record = cs.DRECORD.get(rec_id)
assert expected_record in str(received_record),\
"{}: {}".format(rec_id, received_record)
def _get_id(self, ref_id='e0cb4e39e071', url='/static/register?doc_id="10000001"&module=projeto2222'):
"""test_store """
app = TestApp(appbottle)
user, idade, ano, sexo = 'projeto2222-lastcodename', '00015', '0009', 'outro'
user_data = dict(doc_id=ref_id, user=user, idade=idade, ano=ano, sexo=sexo)
response = app.get(url, params=user_data)
return str(response).split('ver = main("')[1].split('")')[0], response
def test_store(self):
"""test_store """
app = TestApp(appbottle)
# response = app.get('/static/register?doc_id="10000001"&module=projeto2222')
# rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071'
rec_id, _ = self._get_id()
response = app.post('/record/store', self._pontua(rec_id))
self.assertEqual('200 OK', response.status)
self.assertTrue('", "tempo": "20' in response, str(response))
# self.assertTrue('{"module": "projeto2222", "jogada": [{"carta": "2222",' in str(response), str(response))
expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015',"
received_record = str(response)
assert expected_record.replace("'", '"') in received_record,\
"{}: {}".format(rec_id, received_record)
def _pontua(self, ref_id):
ct.LAST = ref_id
jogada = {"doc_id": ref_id,
"carta": 2222,
"casa": 2222,
"move": 2222,
"ponto": 2222,
"tempo": 2222,
"valor": 2222}
return jogada
def test_pontos(self):
rec_id, response = self._get_id()
app = TestApp(appbottle)
app.post('/record/store', self._pontua(rec_id))
ct.LAST = rec_id
response = app.get('/pontos')
self.assertEqual('200 OK', response.status)
self.assertTrue('projeto2222-lastcodename' in response, str(response))
self.assertTrue('<h3>Idade: 10 Genero: outro Ano Escolar: 9</h3>' in response, str(response))
self.assertTrue('<td><span>2222<span></td>' in response, str(response))
if __name__ == '__main__':
unittest.main()
|
labase/eica
|
tests/testwebfunctionaldb.py
|
Python
|
gpl-2.0
| 5,489 | 0.00293 |
from time import time
from pychess.Utils.lutils.lmovegen import genAllMoves
from pychess.Utils.lutils.lmove import toLAN
def do_perft(board, depth, root):
nodes = 0
if depth == 0:
return 1
for move in genAllMoves(board):
board.applyMove(move)
if board.opIsChecked():
board.popMove()
continue
count = do_perft(board, depth - 1, root - 1)
nodes += count
board.popMove()
if root > 0:
print("%8s %10d %10d" % (toLAN(board, move), count, nodes))
return nodes
def perft(board, depth, root):
for i in range(depth):
start_time = time()
nodes = do_perft(board, i + 1, root)
ttime = time() - start_time
print("%2d %10d %5.2f %12.2fnps" %
(i + 1, nodes, ttime, nodes / ttime if ttime > 0 else nodes))
|
leogregianin/pychess
|
lib/pychess/Utils/lutils/perft.py
|
Python
|
gpl-3.0
| 857 | 0 |
#!/usr/bin/python
from __future__ import print_function
import weather, time
a = time.time(); weather.hourly.load("ottawa"); print time.time() - a
raw_input()
|
endlisnis/weather-records
|
testHourly.py
|
Python
|
gpl-3.0
| 159 | 0.018868 |
from unittest import mock
from django.conf import settings
from django.db import connection, models
from django.db.models.functions import Lower, Upper
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import Book, ChildModel1, ChildModel2
class SimpleIndexesTests(SimpleTestCase):
def test_suffix(self):
self.assertEqual(models.Index.suffix, 'idx')
def test_repr(self):
index = models.Index(fields=['title'])
multi_col_index = models.Index(fields=['title', 'author'])
partial_index = models.Index(fields=['title'], name='long_books_idx', condition=models.Q(pages__gt=400))
covering_index = models.Index(
fields=['title'],
name='include_idx',
include=['author', 'pages'],
)
opclasses_index = models.Index(
fields=['headline', 'body'],
name='opclasses_idx',
opclasses=['varchar_pattern_ops', 'text_pattern_ops'],
)
func_index = models.Index(Lower('title'), name='book_func_idx')
self.assertEqual(repr(index), "<Index: fields='title'>")
self.assertEqual(repr(multi_col_index), "<Index: fields='title, author'>")
self.assertEqual(repr(partial_index), "<Index: fields='title' condition=(AND: ('pages__gt', 400))>")
self.assertEqual(
repr(covering_index),
"<Index: fields='title' include='author, pages'>",
)
self.assertEqual(
repr(opclasses_index),
"<Index: fields='headline, body' "
"opclasses='varchar_pattern_ops, text_pattern_ops'>",
)
self.assertEqual(repr(func_index), "<Index: expressions='Lower(F(title))'>")
def test_eq(self):
index = models.Index(fields=['title'])
same_index = models.Index(fields=['title'])
another_index = models.Index(fields=['title', 'author'])
index.model = Book
same_index.model = Book
another_index.model = Book
self.assertEqual(index, same_index)
self.assertEqual(index, mock.ANY)
self.assertNotEqual(index, another_index)
def test_eq_func(self):
index = models.Index(Lower('title'), models.F('author'), name='book_func_idx')
same_index = models.Index(Lower('title'), 'author', name='book_func_idx')
another_index = models.Index(Lower('title'), name='book_func_idx')
self.assertEqual(index, same_index)
self.assertEqual(index, mock.ANY)
self.assertNotEqual(index, another_index)
def test_index_fields_type(self):
with self.assertRaisesMessage(ValueError, 'Index.fields must be a list or tuple.'):
models.Index(fields='title')
def test_index_fields_strings(self):
msg = 'Index.fields must contain only strings with field names.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=[models.F('title')])
def test_fields_tuple(self):
self.assertEqual(models.Index(fields=('title',)).fields, ['title'])
def test_requires_field_or_expression(self):
msg = 'At least one field or expression is required to define an index.'
with self.assertRaisesMessage(ValueError, msg):
models.Index()
def test_expressions_and_fields_mutually_exclusive(self):
msg = "Index.fields and expressions are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
models.Index(Upper('foo'), fields=['field'])
def test_opclasses_requires_index_name(self):
with self.assertRaisesMessage(ValueError, 'An index must be named to use opclasses.'):
models.Index(opclasses=['jsonb_path_ops'])
def test_opclasses_requires_list_or_tuple(self):
with self.assertRaisesMessage(ValueError, 'Index.opclasses must be a list or tuple.'):
models.Index(name='test_opclass', fields=['field'], opclasses='jsonb_path_ops')
def test_opclasses_and_fields_same_length(self):
msg = 'Index.fields and Index.opclasses must have the same number of elements.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(name='test_opclass', fields=['field', 'other'], opclasses=['jsonb_path_ops'])
def test_condition_requires_index_name(self):
with self.assertRaisesMessage(ValueError, 'An index must be named to use condition.'):
models.Index(condition=models.Q(pages__gt=400))
def test_expressions_requires_index_name(self):
msg = 'An index must be named to use expressions.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(Lower('field'))
def test_expressions_with_opclasses(self):
msg = (
'Index.opclasses cannot be used with expressions. Use '
'django.contrib.postgres.indexes.OpClass() instead.'
)
with self.assertRaisesMessage(ValueError, msg):
models.Index(
Lower('field'),
name='test_func_opclass',
opclasses=['jsonb_path_ops'],
)
def test_condition_must_be_q(self):
with self.assertRaisesMessage(ValueError, 'Index.condition must be a Q instance.'):
models.Index(condition='invalid', name='long_book_idx')
def test_include_requires_list_or_tuple(self):
msg = 'Index.include must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(name='test_include', fields=['field'], include='other')
def test_include_requires_index_name(self):
msg = 'A covering index must be named.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['field'], include=['other'])
def test_name_auto_generation(self):
index = models.Index(fields=['author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_0f5565_idx')
# '-' for DESC columns should be accounted for in the index name.
index = models.Index(fields=['-author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_708765_idx')
# fields may be truncated in the name. db_column is used for naming.
long_field_index = models.Index(fields=['pages'])
long_field_index.set_name_with_model(Book)
self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx')
# suffix can't be longer than 3 characters.
long_field_index.suffix = 'suff'
msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?'
with self.assertRaisesMessage(AssertionError, msg):
long_field_index.set_name_with_model(Book)
@isolate_apps('model_indexes')
def test_name_auto_generation_with_quoted_db_table(self):
class QuotedDbTable(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = '"t_quoted"'
index = models.Index(fields=['name'])
index.set_name_with_model(QuotedDbTable)
self.assertEqual(index.name, 't_quoted_name_e4ed1b_idx')
def test_deconstruction(self):
index = models.Index(fields=['title'], db_tablespace='idx_tbls')
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{'fields': ['title'], 'name': 'model_index_title_196f42_idx', 'db_tablespace': 'idx_tbls'}
)
def test_deconstruct_with_condition(self):
index = models.Index(
name='big_book_index',
fields=['title'],
condition=models.Q(pages__gt=400),
)
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
'fields': ['title'],
'name': 'model_index_title_196f42_idx',
'condition': models.Q(pages__gt=400),
}
)
def test_deconstruct_with_include(self):
index = models.Index(
name='book_include_idx',
fields=['title'],
include=['author'],
)
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
'fields': ['title'],
'name': 'model_index_title_196f42_idx',
'include': ('author',),
},
)
def test_deconstruct_with_expressions(self):
index = models.Index(Upper('title'), name='book_func_idx')
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, (Upper('title'),))
self.assertEqual(kwargs, {'name': 'book_func_idx'})
def test_clone(self):
index = models.Index(fields=['title'])
new_index = index.clone()
self.assertIsNot(index, new_index)
self.assertEqual(index.fields, new_index.fields)
def test_clone_with_expressions(self):
index = models.Index(Upper('title'), name='book_func_idx')
new_index = index.clone()
self.assertIsNot(index, new_index)
self.assertEqual(index.expressions, new_index.expressions)
def test_name_set(self):
index_names = [index.name for index in Book._meta.indexes]
self.assertCountEqual(
index_names,
[
'model_index_title_196f42_idx',
'model_index_isbn_34f975_idx',
'model_indexes_book_barcode_idx',
],
)
def test_abstract_children(self):
index_names = [index.name for index in ChildModel1._meta.indexes]
self.assertEqual(
index_names,
['model_index_name_440998_idx', 'model_indexes_childmodel1_idx'],
)
index_names = [index.name for index in ChildModel2._meta.indexes]
self.assertEqual(
index_names,
['model_index_name_b6c374_idx', 'model_indexes_childmodel2_idx'],
)
class IndexesTests(TestCase):
@skipUnlessDBFeature('supports_tablespaces')
def test_db_tablespace(self):
editor = connection.schema_editor()
# Index with db_tablespace attribute.
for fields in [
# Field with db_tablespace specified on model.
['shortcut'],
# Field without db_tablespace specified on model.
['author'],
# Multi-column with db_tablespaces specified on model.
['shortcut', 'isbn'],
# Multi-column without db_tablespace specified on model.
['title', 'author'],
]:
with self.subTest(fields=fields):
index = models.Index(fields=fields, db_tablespace='idx_tbls2')
self.assertIn('"idx_tbls2"', str(index.create_sql(Book, editor)).lower())
# Indexes without db_tablespace attribute.
for fields in [['author'], ['shortcut', 'isbn'], ['title', 'author']]:
with self.subTest(fields=fields):
index = models.Index(fields=fields)
# The DEFAULT_INDEX_TABLESPACE setting can't be tested because
# it's evaluated when the model class is defined. As a
# consequence, @override_settings doesn't work.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertIn(
'"%s"' % settings.DEFAULT_INDEX_TABLESPACE,
str(index.create_sql(Book, editor)).lower()
)
else:
self.assertNotIn('TABLESPACE', str(index.create_sql(Book, editor)))
# Field with db_tablespace specified on the model and an index without
# db_tablespace.
index = models.Index(fields=['shortcut'])
self.assertIn('"idx_tbls"', str(index.create_sql(Book, editor)).lower())
@skipUnlessDBFeature('supports_tablespaces')
def test_func_with_tablespace(self):
# Functional index with db_tablespace attribute.
index = models.Index(
Lower('shortcut').desc(),
name='functional_tbls',
db_tablespace='idx_tbls2',
)
with connection.schema_editor() as editor:
sql = str(index.create_sql(Book, editor))
self.assertIn(editor.quote_name('idx_tbls2'), sql)
# Functional index without db_tablespace attribute.
index = models.Index(Lower('shortcut').desc(), name='functional_no_tbls')
with connection.schema_editor() as editor:
sql = str(index.create_sql(Book, editor))
# The DEFAULT_INDEX_TABLESPACE setting can't be tested because it's
# evaluated when the model class is defined. As a consequence,
# @override_settings doesn't work.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertIn(
editor.quote_name(settings.DEFAULT_INDEX_TABLESPACE),
sql,
)
else:
self.assertNotIn('TABLESPACE', sql)
|
wkschwartz/django
|
tests/model_indexes/tests.py
|
Python
|
bsd-3-clause
| 13,530 | 0.0017 |
# -*- coding: utf-8 -*-
#
# pynest_api_template.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""[[ This template demonstrates how to create a docstring for the PyNEST API.
If you have modified an API, please ensure you update the docstring!
The format is based on `NumPy style docstring
<https://numpydoc.readthedocs.io/en/latest/format.html>`_ and uses
reStructuredText markup. Please review the syntax rules if you are
unfamiliar with either reStructuredText or NumPy style docstrings.
Copy this file and replace the sample text with a description of the API.
The double bracketed sections [[ ]], which provide explanations, should be
completely removed from your final version - Including this entire
docstring!
]]
"""
def GetConnections(source=None, target=None, synape_model=None, synapse_label=None):
"""Return a `SynapseCollection` representing the connection identifiers.
[[ In a single 'summary line', state what the function does ]]
[[ All functions should have a docstring with at least a summary line ]]
[[ Below summary line (separated by new line), there should be an extended
summary section that should be used to clarify functionality.]]
Any combination of `source`, `target`, `synapse_model` and
`synapse_label` parameters is permitted.
[[ Deprecation warnings should appear directly after the extended summary.
It should state in what version the object was deprecated, when it will
be removed and what recommend way obtains the same functionality]]
.. deprecated:: 1.6.0
`ndobj_old` will be removed in NumPy 2.0.0, it is replaced by
`ndobj_new` because the latter works also with array subclasses.
[[ For all headings ensure the underline --- is at least the length of the
heading ]]
Parameters
----------
source : NodeCollection, optional
Source node IDs, only connections from these
pre-synaptic neurons are returned
target : NodeCollection, optional
Target node IDs, only connections to these
postsynaptic neurons are returned
synapse_model : str, optional
Only connections with this synapse type are returned
synapse_label : int, optional
(non-negative) only connections with this synapse label are returned
Returns
-------
SynapseCollection:
Object representing the source-node_id, target-node_id, target-thread, synapse-id, port of connections, see
:py:class:`.SynapseCollection` for more.
Raises
-------
TypeError
Notes
-------
Details on the connectivity. [[ Here details regarding the code or further
explanations can be included. This section may include mathematical
equations, written in LaTeX format. You can include references to relevant
papers using the reStructuredText syntax. Do not include model formulas ]]
The discrete-time Fourier time-convolution [1]_ property states that
.. math::
x(n) * y(n) \Leftrightarrow X(e^{j\omega } )Y(e^{j\omega } )
The value of :math:`\omega` is larger than 5.
[[ The See Also section should include 2 or 3 related functions. ]]
See Also
---------
func_a : Function a with its description.
func_b, func_c
References
-----------
[[ Note the format of the reference. No bold nor italics is used. Last name
of author(s) followed by year, title in sentence case and full name of
journal followed by volume and page range. Include the doi if
applicable.]]
.. [1] Bonewald LF. (2011). The amazing osteocyte. Journal of Bone and
Mineral Research 26(2):229–238. DOI: 10.1002/jbmr.320.
"""
# [[ in line comments should be used to explain why this code is here]]
# This code was included because of bug Y when running X
# Temporary, I HOPE HOPE HOPE
if model is not None and syn_spec is not None:
raise kernel.NESTerror(
"'model' is an alias for 'syn_spec' and cannot"
" be used together with 'syn_spec'.")
|
stinebuu/nest-simulator
|
doc/userdoc/contribute/templates_styleguides/pynest_api_template.py
|
Python
|
gpl-2.0
| 4,742 | 0.001266 |
# Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.evaluator.template_functions import function_resolver
from vitrage.evaluator.template_functions import GET_PARAM
from vitrage.evaluator.template_functions.v2.functions import get_param
from vitrage.evaluator.template_validation.base import get_custom_fault_result
from vitrage.evaluator.template_validation.base import ValidationError
from vitrage.evaluator.template_validation.content.base import \
get_content_correct_result
class GetParamValidator(object):
@classmethod
def validate(cls, template, actual_params):
try:
function_resolver.validate_function(
function_resolver.FuncInfo(
name=GET_PARAM, func=get_param, error_code=0),
template,
actual_params=actual_params)
except ValidationError as e:
return get_custom_fault_result(e.code, e.details)
return get_content_correct_result()
|
openstack/vitrage
|
vitrage/evaluator/template_validation/content/v2/get_param_validator.py
|
Python
|
apache-2.0
| 1,505 | 0 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lldpparam(base_resource) :
""" Configuration for lldp params resource. """
def __init__(self) :
self._holdtimetxmult = 0
self._timer = 0
self._mode = ""
@property
def holdtimetxmult(self) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20.
"""
try :
return self._holdtimetxmult
except Exception as e:
raise e
@holdtimetxmult.setter
def holdtimetxmult(self, holdtimetxmult) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20
"""
try :
self._holdtimetxmult = holdtimetxmult
except Exception as e:
raise e
@property
def timer(self) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000.
"""
try :
return self._timer
except Exception as e:
raise e
@timer.setter
def timer(self, timer) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000
"""
try :
self._timer = timer
except Exception as e:
raise e
@property
def mode(self) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER.
"""
try :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER
"""
try :
self._mode = mode
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lldpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lldpparam
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update lldpparam.
"""
try :
if type(resource) is not list :
updateresource = lldpparam()
updateresource.holdtimetxmult = resource.holdtimetxmult
updateresource.timer = resource.timer
updateresource.mode = resource.mode
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of lldpparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = lldpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the lldpparam resources that are configured on netscaler.
"""
try :
if not name :
obj = lldpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Mode:
NONE = "NONE"
TRANSMITTER = "TRANSMITTER"
RECEIVER = "RECEIVER"
TRANSCEIVER = "TRANSCEIVER"
class lldpparam_response(base_response) :
def __init__(self, length=1) :
self.lldpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lldpparam = [lldpparam() for _ in range(length)]
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/lldp/lldpparam.py
|
Python
|
apache-2.0
| 5,812 | 0.031142 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from flask_script import Manager, Shell, Server
from flask_migrate import MigrateCommand
from gakkgakk.app import create_app
from gakkgakk.models import User
from gakkgakk.settings import DevConfig, ProdConfig
from gakkgakk.database import db
reload(sys)
sys.setdefaultencoding('utf-8')
app = create_app(ProdConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server(host='0.0.0.0', threaded=True))
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
jkaberg/GakkGakk
|
manage.py
|
Python
|
mit
| 1,062 | 0 |
from fnmatch import fnmatchcase
from trac.config import Option
from trac.core import *
from trac.perm import IPermissionPolicy
revision = "$Rev: 11490 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/permissions/public_wiki_policy.py $"
class PublicWikiPolicy(Component):
"""Allow public access to some wiki pages.
This is a sample permission policy plugin illustrating how to check
permission on realms.
Don't forget to integrate that plugin in the appropriate place in the
list of permission policies:
{{{
[trac]
permission_policies = PublicWikiPolicy, DefaultPermissionPolicy
}}}
Then you can configure which pages you want to make public:
{{{
[public_wiki]
view = Public*
modify = PublicSandbox/*
}}}
"""
implements(IPermissionPolicy)
view = Option('public_wiki', 'view', 'Public*',
"""Case-sensitive glob pattern used for granting view permission on
all Wiki pages matching it.""")
modify = Option('public_wiki', 'modify', 'Public*',
"""Case-sensitive glob pattern used for granting modify permissions
on all Wiki pages matching it.""")
def check_permission(self, action, username, resource, perm):
if resource: # fine-grained permission check
if resource.realm == 'wiki': # wiki realm or resource
if resource.id: # ... it's a resource
if action == 'WIKI_VIEW': # (think 'VIEW' here)
pattern = self.view
else:
pattern = self.modify
if fnmatchcase(resource.id, pattern):
return True
else: # ... it's a realm
return True
# this policy ''may'' grant permissions on some wiki pages
else: # coarse-grained permission check
#
# support for the legacy permission checks: no resource specified
# and realm information in the action name itself.
#
if action.startswith('WIKI_'):
return True
# this policy ''may'' grant permissions on some wiki pages
|
cghr/cghr-chef-repository
|
cookbooks/trac/files/default/plugins-stock/public_wiki_policy.py
|
Python
|
apache-2.0
| 2,244 | 0.004902 |
#!/usr/bin/env python
from settings import Settings
from scan import Scanner
from logger import Logger
def main():
try:
#Read config file
settings=Settings()
#Set up logger
logger=Logger(settings)
#Create scanner
scanner=Scanner(settings,logger)
#Begin scanning
scanner.StartScanning()
except KeyboardInterrupt:
scanner.StopScanning()
if __name__ == "__main__":
main()
|
SteveAbb/Vestigo
|
Vestigo/vestigo.py
|
Python
|
mit
| 405 | 0.081481 |
"""
An estimator for modelling data from a mixture of Gaussians,
using an objective function based on minimum message length.
"""
__all__ = [
"GaussianMixture",
"kullback_leibler_for_multivariate_normals",
"responsibility_matrix",
"split_component", "merge_component", "delete_component",
]
import logging
import numpy as np
import scipy
import scipy.stats as stats
import scipy.optimize as op
from collections import defaultdict
from sklearn.cluster import k_means_ as kmeans
logger = logging.getLogger(__name__)
def _total_parameters(K, D, covariance_type):
r"""
Return the total number of model parameters :math:`Q`, if a full
covariance matrix structure is assumed.
.. math:
Q = \frac{K}{2}\left[D(D+3) + 2\right] - 1
:param K:
The number of Gaussian mixtures.
:param D:
The dimensionality of the data.
:returns:
The total number of model parameters, :math:`Q`.
"""
return (0.5 * D * (D + 3) * K) + (K - 1)
def _responsibility_matrix(y, mean, covariance, weight, covariance_type):
r"""
Return the responsibility matrix,
.. math::
r_{ij} = \frac{w_{j}f\left(x_i;\theta_j\right)}{\sum_{k=1}^{K}{w_k}f\left(x_i;\theta_k\right)}
where :math:`r_{ij}` denotes the conditional probability of a datum
:math:`x_i` belonging to the :math:`j`-th component. The effective
membership associated with each component is then given by
.. math::
n_j = \sum_{i=1}^{N}r_{ij}
\textrm{and}
\sum_{j=1}^{M}n_{j} = N
where something.
:param y:
The data values, :math:`y`.
:param mu:
The mean values of the :math:`K` multivariate normal distributions.
:param cov:
The covariance matrices of the :math:`K` multivariate normal
distributions. The shape of this array will depend on the
``covariance_type``.
:param weight:
The current estimates of the relative mixing weight.
:param full_output: [optional]
If ``True``, return the responsibility matrix, and the log likelihood,
which is evaluated for free (default: ``False``).
:returns:
The responsibility matrix. If ``full_output=True``, then the
log likelihood (per observation) will also be returned.
"""
precision = _compute_precision_cholesky(covariance, covariance_type)
weighted_log_prob = np.log(weight) + \
_estimate_log_gaussian_prob(y, mean, precision, covariance_type)
log_likelihood = scipy.misc.logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
log_responsibility = weighted_log_prob - log_likelihood[:, np.newaxis]
responsibility = np.exp(log_responsibility).T
return (responsibility, log_likelihood)
class BaseGaussianMixture(object):
r"""
Model data from (potentially) many multivariate Gaussian distributions,
using minimum message length (MML) as the objective function.
:param covariance_type: [optional]
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix (default: ``diag``).
:param covariance_regularization: [optional]
Regularization strength to add to the diagonal of covariance matrices
(default: ``0``).
:param threshold: [optional]
The relative improvement in message length required before stopping an
expectation-maximization step (default: ``1e-5``).
:param max_em_iterations: [optional]
The maximum number of iterations to run per expectation-maximization
loop (default: ``10000``).
"""
parameter_names = ("mean", "covariance", "weight")
def __init__(self, covariance_type="full", covariance_regularization=0,
mixture_probability=1e-3, percent_scatter=1, predict_mixtures=3,
threshold=1e-3, max_em_iterations=10000, **kwargs):
available = ("full", "diag", )
covariance_type = covariance_type.strip().lower()
if covariance_type not in available:
raise ValueError("covariance type '{}' is invalid. "\
"Must be one of: {}".format(
covariance_type, ", ".join(available)))
if 0 > covariance_regularization:
raise ValueError(
"covariance_regularization must be a non-negative float")
if 0 >= threshold:
raise ValueError("threshold must be a positive value")
if 1 > max_em_iterations:
raise ValueError("max_em_iterations must be a positive integer")
self._threshold = threshold
self._mixture_probability = mixture_probability
self._percent_scatter = percent_scatter
self._predict_mixtures = predict_mixtures
self._max_em_iterations = max_em_iterations
self._covariance_type = covariance_type
self._covariance_regularization = covariance_regularization
return None
@property
def mean(self):
r""" Return the multivariate means of the Gaussian mixtures. """
return self._mean
@property
def covariance(self):
r""" Return the covariance matrices of the Gaussian mixtures. """
return self._covariance
@property
def weight(self):
r""" Return the relative weights of the Gaussian mixtures. """
return self._weight
@property
def covariance_type(self):
r""" Return the type of covariance stucture assumed. """
return self._covariance_type
@property
def covariance_regularization(self):
r"""
Return the regularization applied to diagonals of covariance matrices.
"""
return self._covariance_regularization
@property
def threshold(self):
r""" Return the threshold improvement required in message length. """
return self._threshold
@property
def max_em_iterations(self):
r""" Return the maximum number of expectation-maximization steps. """
return self._max_em_iterations
def _expectation(self, y, **kwargs):
r"""
Perform the expectation step of the expectation-maximization algorithm.
:param y:
The data values, :math:`y`.
:returns:
A three-length tuple containing the responsibility matrix,
the log likelihood, and the change in message length.
"""
responsibility, log_likelihood = _responsibility_matrix(
y, self.mean, self.covariance, self.weight, self.covariance_type)
ll = np.sum(log_likelihood)
I = _message_length(y, self.mean, self.covariance, self.weight,
responsibility, -ll, self.covariance_type,
**kwargs)
return (responsibility, log_likelihood, I)
def _maximization(self, y, responsibility, parent_responsibility=1,
**kwargs):
r"""
Perform the maximization step of the expectation-maximization
algorithm.
:param y:
The data values, :math:`y`.
# TODO
:param responsibility:
The responsibility matrix for all :math:`N` observations being
partially assigned to each :math:`K` component.
# TODO
"""
K = self.weight.size
N, D = y.shape
# Update the weights.
effective_membership = np.sum(responsibility, axis=1)
weight = (effective_membership + 0.5)/(N + K/2.0)
w_responsibility = parent_responsibility * responsibility
w_effective_membership = np.sum(w_responsibility, axis=1)
mean = np.empty(self.mean.shape)
for k, (R, Nk) in enumerate(zip(w_responsibility, w_effective_membership)):
mean[k] = np.sum(R * y.T, axis=1) / Nk
# TODO: Use parent responsibility when initializing?
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
# TODO: callback?
return self.set_parameters(
weight=weight, mean=mean, covariance=covariance)
def _expectation_maximization(self, y, responsibility=None, **kwargs):
r"""
Run the expectation-maximization algorithm on the current set of
multivariate Gaussian mixtures.
:param y:
A :math:`N\times{}D` array of the observations :math:`y`,
where :math:`N` is the number of observations, and :math:`D` is the
number of dimensions per observation.
:param responsibility: [optional]
The responsibility matrix for all :math:`N` observations being
partially assigned to each :math:`K` component. If ``None`` is given
then the responsibility matrix will be calculated in the first
expectation step.
"""
# Calculate log-likelihood and initial expectation step.
__init_responsibility, ll, dl = self._expectation(y, **kwargs)
if responsibility is None:
responsibility = __init_responsibility
ll_dl = [(ll.sum(), dl)]
meta = dict(warnflag=False)
for iteration in range(self.max_em_iterations):
# M-step.
self._maximization(y, responsibility, **kwargs)
# E-step.
responsibility, ll, dl = self._expectation(y, **kwargs)
# Check for convergence.
lls = ll.sum()
prev_ll, prev_dl = ll_dl[-1]
change = (lls - prev_ll)/prev_ll
ll_dl.append([lls, dl])
#print("E-M", iteration, change, self.threshold)
if abs(change) <= self.threshold:
break
else:
meta.update(warnflag=True)
logger.warn("Maximum number of E-M iterations reached ({})"\
.format(self.max_em_iterations))
meta.update(log_likelihood=lls, message_length=dl)
return (responsibility, meta)
@property
def parameters(self):
return dict([(k, getattr(self, k, None)) for k in self.parameter_names])
def set_parameters(self, **kwargs):
r"""
Set specific parameters.
"""
invalid_params = set(self.parameter_names).difference(kwargs.keys())
if invalid_params:
raise ValueError(
"unknown parameters: {}".format(", ".join(invalid_params)))
for parameter_name, value in kwargs.items():
setattr(self, "_{}".format(parameter_name), value)
return kwargs
class GaussianMixture(BaseGaussianMixture):
def __init__(self, **kwargs):
super(GaussianMixture, self).__init__(**kwargs)
# For predictions.
self._proposed_mixtures = []
# Store the following summary pieces of information about mixtures.
# (1) Sum of the log of the determinant of the covariance matrices.
# (2) The sum of the log-likelihood.
# (3) The sum of the log of the weights.
# Do we want this from each E-M step, or all steps?
# (K, sum_log_weights, sum_log_likelihood, sum_log_det_covariances)
self._mixture_predictors = []
def _optimize_split_mixture(self, y, responsibility, component_index):
r"""
Split a component from the current mixture, and run partial
expectation-maximization algorithm on the split component.
"""
U, S, V = _svd(self.covariance[component_index], self.covariance_type)
split_mean = self.mean[component_index] \
+ np.vstack([+V[0], -V[0]]) * S[0]**0.5
# Responsibilities are initialized by allocating the data points to
# the closest of the two means.
distance = np.sum((y[:, :, None] - split_mean.T)**2, axis=1).T
N, D = y.shape
split_responsibility = np.zeros((2, N))
split_responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
# Calculate the child covariance matrices.
split_covariance = _estimate_covariance_matrix(
y, split_responsibility, split_mean,
self.covariance_type, self.covariance_regularization)
split_effective_membership = np.sum(split_responsibility, axis=1)
split_weight = split_effective_membership.T \
/ np.sum(split_effective_membership)
# Integrate the split components with the existing mixture.
parent_weight = self.weight[component_index]
parent_responsibility = responsibility[component_index]
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=split_mean, weight=split_weight,
covariance=split_covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(
y, parent_responsibility=responsibility[component_index])
if self.weight.size > 1:
# Integrate the partial mixture with the full mixture.
weight = np.hstack([self.weight,
[parent_weight * mixture.weight[1]]])
weight[component_index] = parent_weight * mixture.weight[0]
mean = np.vstack([self.mean, [mixture.mean[1]]])
mean[component_index] = mixture.mean[0]
covariance = np.vstack([self.covariance, [mixture.covariance[1]]])
covariance[component_index] = mixture.covariance[0]
responsibility = np.vstack([responsibility,
[parent_responsibility * R[1]]])
responsibility[component_index] = parent_responsibility * R[0]
mixture.set_parameters(
mean=mean, covariance=covariance, weight=weight)
R, meta = mixture._expectation_maximization(
y, responsibility=responsibility)
# Store the mixture.
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return (len(self._proposed_mixtures) - 1, R, meta)
# Run
kwds = dict(
threshold=self._threshold,
max_em_iterations=self._max_em_iterations,
covariance_type=self._covariance_type,
covariance_regularization=self._covariance_regularization)
# Run E-M on the split mixture, keeping all else fixed.
#(dict(mean=mu, covariance=cov, weight=weight), responsibility, meta, dl)
params, R, meta, dl = _expectation_maximization(y, split_mean, split_covariance,
split_weight, responsibility=split_responsibility,
parent_responsibility=parent_responsibility,
**kwds)
if self.weight.size > 1:
# Integrate the child mixtures back.
weight = np.hstack([self.weight, [parent_weight * params["weight"][1]]])
weight[component_index] = parent_weight * params["weight"][0]
mean = np.vstack([self.mean, [params["mean"][1]]])
mean[component_index] = params["mean"][0]
covariance = np.vstack([self.covariance, [params["covariance"][1]]])
covariance[component_index] = params["covariance"][0]
responsibility = np.vstack([responsibility,
[parent_responsibility * R[1]]])
responsibility[component_index] \
= parent_responsibility * R[0]
return _expectation_maximization(y, mean, covariance, weight,
responsibility=responsibility, **kwds)
else:
return (params, R, meta, dl)
def _initialize_parameters(self, y, **kwargs):
r"""
Return initial estimates of the parameters.
:param y:
The data values, :math:`y`.
# TODO COMMON DOCS
"""
# If you *really* know what you're doing, then you can give your own.
if kwargs.get("__initialize", None) is not None:
logger.warn("Using specified initialization point.")
return self.set_parameters(**kwargs.pop("__initialize"))
weight = np.ones(1)
mean = np.mean(y, axis=0).reshape((1, -1))
N, D = y.shape
covariance = _estimate_covariance_matrix(y, np.ones((1, N)), mean,
self.covariance_type, self.covariance_regularization)
# Set parameters.
return self.set_parameters(
weight=weight, mean=mean, covariance=covariance)
def _predict_message_length_change(self, K, N, lower_bound_sigma=5):
r"""
Predict the minimum message length of a target mixture of K Gaussian
distributions, where K is an integer larger than the current mixture.
:param K:
The target number of Gaussian distributions. This must be an
integer value larger than the current number of Gaussian mixtures.
:returns:
A pdf of some description. #TODO #YOLO
"""
current_K, D = self.mean.shape
#K = current_K + 1 if K is None else int(K)
K = np.atleast_1d(K)
if np.any(current_K >= K):
raise ValueError(
"the target K mixture must contain more Gaussians than the "\
"current mixture ({} > {})".format(K, current_K))
predictors = np.array(self._mixture_predictors)
kwds = dict(target_K=K, predictors=predictors)
dK = K - current_K
slw_expectation, slw_variance, slw_upper \
= self._approximate_sum_log_weights(**kwds)
# Now approximate the sum of the negative log-likelihood, minus the
# sum of the log of the determinant of the covariance matrices.
nll_mslogdetcov_expectation, nll_mslogdetcov_variance \
= self._approximate_nllpslogdetcov(**kwds)
# Calculate the change in message length.
current_ll = np.max(predictors.T[2][predictors.T[0] == current_K])
slogdet = _slogdet(self.covariance, self.covariance_type)
dI_expectation = dK * (
(1 - D/2.0)*np.log(2) + 0.25 * (D*(D+3) + 2)*np.log(N/(2*np.pi))) \
+ 0.5 * (D*(D+3)/2.0 - 1) * (slw_expectation - np.sum(np.log(self.weight))) \
- np.array([np.sum(np.log(current_K + np.arange(_))) for _ in dK])\
+ 0.5 * np.log(_total_parameters(K, D, self.covariance_type)/float(_total_parameters(current_K, D, self.covariance_type))) \
- (D + 2)/2.0 * (np.sum(slogdet)) \
+ current_ll + nll_mslogdetcov_expectation
dI_scatter = nll_mslogdetcov_variance**0.5
dI_lower_bound = dK * (
(1 - D/2.0)*np.log(2) + 0.25 * (D*(D+3) + 2)*np.log(N/(2*np.pi))) \
+ 0.5 * (D*(D+3)/2.0 - 1) * (slw_upper - np.sum(np.log(self.weight))) \
- np.array([np.sum(np.log(current_K + np.arange(_))) for _ in dK])\
+ 0.5 * np.log(_total_parameters(K, D, self.covariance_type)/float(_total_parameters(current_K, D, self.covariance_type))) \
- (D + 2)/2.0 * (np.sum(slogdet)) \
+ current_ll + nll_mslogdetcov_expectation \
- lower_bound_sigma * dI_scatter
result = (dI_expectation, dI_scatter, dI_lower_bound)
return result if np.array(dK).size > 1 \
else tuple([_[0] for _ in result])
def _approximate_sum_log_weights(self, target_K, predictors=None):
r"""
Return an approximate expectation of the function:
.. math:
\sum_{k=1}^{K}\log{w_k}
Where :math:`K` is the number of mixtures, and :math:`w` is a multinomial
distribution. The approximating function is:
.. math:
\sum_{k=1}^{K}\log{w_k} \approx -K\log{K}
:param target_K:
The number of target Gaussian mixtures.
"""
if predictors is None:
predictors = np.array(self._mixture_predictors)
k, slw = (predictors.T[0], predictors.T[1])
# Upper bound.
upper_bound = lambda k, c=0: -k * np.log(k) + c
#upper = -target_K * np.log(target_K)
# Some expectation value.
if 2 > len(k):
# Don't provide an expectation value.
expectation = upper_bound(target_K)
variance \
= np.abs(upper_bound(target_K**2) - upper_bound(target_K)**2)
else:
lower_values = [[k[0], slw[0]]]
for k_, slw_ in zip(k[1:], slw[1:]):
if k_ == lower_values[-1][0] and slw_ < lower_values[-1][1]:
lower_values[-1][1] = slw_
elif k_ > lower_values[-1][0]:
lower_values.append([k_, slw_])
lower_values = np.array(lower_values)
function = lambda x, *p: -x * p[0] * np.log(x) + p[1]
# Expectation, from the best that can be done.
exp_params, exp_cov = op.curve_fit(
function, lower_values.T[0], lower_values.T[1], p0=[1, 0])
expectation = function(target_K, *exp_params)
#exp_params, exp_cov = op.curve_fit(function, k, slw, p0=[1, 0])
#expectation = function(target_K, *exp_params)
variance = 0.0
return (expectation, variance, upper_bound(target_K))
def _approximate_nllpslogdetcov(self, target_K, predictors=None,
draws=100):
r"""
Approximate the function:
.. math:
-\sum_{n=1}^{N}\log\sum_{k=1}^{K+\Delta{}K}w_{k}f_{k}(y_{n}|\mu_k,C_k) + \frac{(D + 2)}{2}\sum_{k=1}^{(K + \Delta{}K)}\log{|C_k|^{(K+\Delta{}K)}}
"""
if predictors is None:
predictors = np.array(self._mixture_predictors)
k, y = (predictors.T[0], predictors.T[-1])
k = np.unique(predictors.T[0])
y = np.empty(k.shape)
yerr = np.empty(k.shape)
for i, k_ in enumerate(k):
match = (predictors.T[0] == k_)
values = np.log(predictors.T[-1][match])
y[i] = np.median(values)
yerr[i] = np.std(values)
# The zero-th entry of yerr occurs when K = 2, and we only have one
# estimate of y, so the std is zero.
#yerr[0] = yerr[1]
yerr[yerr==0] = np.max(yerr)
f = lambda x, *p: np.polyval(p, x)
p0 = np.zeros(2)
#p0 = np.array([-1, y[0]])
#f = lambda x, *p: np.polyval(p, 1.0/x)
#p0 = np.hstack([1, np.zeros(min(k.size - 2, 3))])
op_params, op_cov = op.curve_fit(f, k, y,
p0=p0, sigma=yerr, absolute_sigma=True)
"""
if target_K[0] >= 16:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(k, y)
ax.fill_between(k, y - yerr, y + yerr, alpha=0.5, zorder=-1)
op_params, op_cov = op.curve_fit(
f, k, y, p0=p0, sigma=yerr, absolute_sigma=True)
ax.plot(k, f(k, *op_params))
for i in range(4, k.size + 1):
op_params, op_cov = op.curve_fit(
f, k[:i], y[:i], p0=p0, sigma=yerr[:i], absolute_sigma=True)
ax.plot(k[i:] + 1, [f(_, *op_params) for _ in k[i:] + 1], c="g")
v = np.array([f(_, *op_params) for _ in k[i:] + 1])
stds = np.array([np.std(f(_, *(np.random.multivariate_normal(op_params, op_cov, size=100).T))) for _ in k[i:] + 1])
assert np.all(np.isfinite(stds))
ax.fill_between(k[i:] + 1, v - stds, v + stds, facecolor="g",
alpha=0.5)
#log_y = np.empty(k.shape)
#log_yerr = np.empty(k.shape)
#for i, k_ in enumerate(k):
# match = (predictors.T[0] == k_)
# values = np.log(predictors.T[-1][match])
# y[i] = np.median(values)
# yerr[i] = np.std(values)
#fig, ax = plt.subplots()
#ax.scatter(k, y)
#ax.scatter(k, y + yerr, facecolor="g")
raise a
"""
exp_f = lambda x, *p: np.exp(f(x, *p))
target_K = np.atleast_1d(target_K)
expectation = np.array([exp_f(tk, *op_params) for tk in target_K])
if not np.all(np.isfinite(op_cov)):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = k
ax.scatter(x, y)
ax.scatter(x, y + yerr, facecolor="g")
ax.plot(x, f(x, *op_params), c='r')
fig, ax = plt.subplots()
ax.scatter(x, np.exp(y))
ax.scatter(x, np.exp(y + yerr), facecolor='g')
ax.plot(target_K, expectation, c='r')
ax.plot(x, exp_f(x, *op_params), c='m')
variance = np.array([np.var(exp_f(tk,
*(np.random.multivariate_normal(op_params, op_cov, size=draws).T)))
for tk in target_K])
ax.fill_between(target_K, expectation - variance**0.5, expectation + variance**0.5, facecolor='r', alpha=0.5)
raise a
variance = np.array([np.var(exp_f(tk,
*(np.random.multivariate_normal(op_params, op_cov, size=draws).T)))
for tk in target_K])
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = k
ax.scatter(x, np.exp(y))
ax.scatter(x, np.exp(y + yerr), facecolor='g')
ax.plot(target_K, expectation, c='r')
ax.fill_between(target_K, expectation - variance**0.5, expectation + variance**0.5, facecolor='r', alpha=0.5)
raise a
"""
return (expectation, variance)
def _ftl_jump(self, y, K, **kwargs):
r"""
Jump to a totally new mixture of K number of gaussians.
"""
logger.debug("Re-initializing with K-means++ at K = {}".format(K))
# Initialize new centroids by k-means++
mixtures = []
mls = []
for z in range(30):
mean = kmeans._k_init(y, K, kmeans.row_norms(y, squared=True),
kmeans.check_random_state(None))
# Calculate weights by L2 distances to closest centers.
distance = np.sum((y[:, :, None] - mean.T)**2, axis=1).T
N, D = y.shape
responsibility = np.zeros((K, N))
responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
weight = responsibility.sum(axis=1)/N
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=mean, weight=weight, covariance=covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(
y, parent_responsibility=responsibility)
raise UnsureError
mixtures.append(mixture)
mls.append(meta["message_length"])
print(np.std(mls))
index = np.argmin(mls)
mixture = mixtures[index]
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return mixture, R, meta #(len(self._proposed_mixtures) - 1, R, meta)
raise a
#self.set_parameters(
# weight=weight, mean=mean, covariance=covariance)
#return responsibility
def _merge_component_with_closest_component(self, y, responsibility, index, **kwargs):
R, meta, mixture = _merge_components(y, self.mean, self.covariance, self.weight,
responsibility, index, index_b, **kwargs)
return mixture, R, meta
def _component_kl_distances(self):
r"""
Calculate the K-L distances for all current components.
"""
K = self.weight.size
if K == 1: return ([])
kl = np.inf * np.ones((K, K))
for i in range(K):
for j in range(i + 1, K):
kl[i, j] = kullback_leibler_for_multivariate_normals(
self.mean[i], self.covariance[i],
self.mean[j], self.covariance[j])
kl[j, i] = kullback_leibler_for_multivariate_normals(
self.mean[j], self.covariance[j],
self.mean[i], self.covariance[i])
# Best for each *from*.
indices = list(zip(*(np.arange(K), np.argsort(kl, axis=1).T[0])))
_ = np.array(indices).T
sorted_indices = np.argsort(kl[_[0], _[1]])
return tuple([indices[_] for _ in sorted_indices if indices[_][0] != indices[_][1]])
return foo
def _optimize_merge_mixture(self, y, responsibility, a_index):
b_index = _index_of_most_similar_component(y,
self.mean, self.covariance, a_index)
# Initialize.
weight_k = np.sum(self.weight[[a_index, b_index]])
responsibility_k = np.sum(responsibility[[a_index, b_index]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
self.covariance_type, self.covariance_regularization)
# Delete the b-th component.
del_index = np.max([a_index, b_index])
keep_index = np.min([a_index, b_index])
new_mean = np.delete(self.mean, del_index, axis=0)
new_covariance = np.delete(self.covariance, del_index, axis=0)
new_weight = np.delete(self.weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
mixture = self.__class__(
threshold=1e-3, # MAGICself.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
mixture.set_parameters(mean=new_mean, weight=new_weight,
covariance=new_covariance)
R, meta = mixture._expectation_maximization(
y, responsibility=new_responsibility)
#R, ll, I = mixture._expectation(y)
#meta = {"log_likelihood": ll.sum(), "message_length": I}
N, D = y.shape
# Store the mixture.
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return (len(self._proposed_mixtures) - 1, R, meta)
raise a
def _consider_merging_components(self, y, responsibility, current_I):
for i, j in self._component_kl_distances():
# Initialize the merge.
weight_k = np.sum(self.weight[[i, j]])
responsibility_k = np.sum(responsibility[[i, j]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
self.covariance_type, self.covariance_regularization)
del_index = np.max([i, j])
keep_index = np.min([i, j])
new_mean = np.delete(self.mean, del_index, axis=0)
new_covariance = np.delete(self.covariance, del_index, axis=0)
new_weight = np.delete(self.weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
mixture = self.__class__(
threshold=1e-3, # MAGICself.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
mixture.set_parameters(mean=new_mean, weight=new_weight,
covariance=new_covariance)
# Calculate message length.
R, ll, I = mixture._expectation(y)
logger.debug("Considered merging {} {} --> {}".format(i, j, I))
if I < current_I:
logger.debug("omg this is better! ({} < {})".format(
I, current_I))
# Run E-M on this.
R, meta = mixture._expectation_maximization(
y, responsibility=R)
N, D = y.shape
# Store the mixture.
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
return mixture
# (len(self._proposed_mixtures) - 1, R, meta)
else:
logger.debug(
"Considered merging, but nothing immediately looked great")
return None
def lognormal_search(self, y, **kwargs):
N, D = y.shape
dist = scipy.stats.lognorm(1, loc=0, scale=(N/2.0)**0.5)
initial_draws = 10
initial_k = []
while initial_draws > len(initial_k):
K = int(np.round(dist.rvs()))
if K not in initial_k:
initial_k.append(K)
# Instead should we just keep drawing until we have a good prediction?
# TODO
# Propose some mixtures.
row_norms = kmeans.row_norms(y, squared=True)
initial_k = np.repeat(np.arange(1, 21), 10)
initial_k = np.arange(1, 200)
print("INITIALS", initial_k)
mixtures = []
mls = []
for K in initial_k:
logger.debug("Trialling k = {}".format(K))
mean = kmeans._k_init(y, K, row_norms,
kmeans.check_random_state(None))
# Calculate weights by L2 distances to closest centers.
distance = np.sum((y[:, :, None] - mean.T)**2, axis=1).T
N, D = y.shape
responsibility = np.zeros((K, N))
responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
weight = responsibility.sum(axis=1)/N
try:
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=mean, weight=weight, covariance=covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(y)
except:
print("FAILED")
mls.append(np.nan)
mixtures.append(None)
else:
# y, responsibility=responsibility)
# Record some messag elength
mls.append(meta["message_length"])
mixtures.append(mixture)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(initial_k, mls)
raise a
def search(self, y, **kwargs):
r"""
Simultaneously perform model selection and parameter estimation for an
unknown number of multivariate Gaussian distributions.
:param y:
A :math:`N\times{}D` array of the observations :math:`y`,
where :math:`N` is the number of observations, and :math:`D` is
the number of dimensions per observation.
"""
# Initialize.
# --> Start on "splitting_mode"
# --> If we hyperjump, then try merging mode.
N, D = y.shape
# Initialize the mixture.
self._initialize_parameters(y, **kwargs)
R, ll, I = self._expectation(y, **kwargs)
converged, just_jumped = (False, False)
while not converged:
while True:
K = self.weight.size
logger.debug("State: {} {}".format(K, I))
# Do a very fast scan of component merging.
mixture = self._consider_merging_components(y, R, I)
if mixture is not None:
logger.debug("ACCEPTED A FAST MERGE")
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
break
if just_jumped or K > 1:
# Try to merge components.
best_merge = []
for k in range(K):
try:
idx, _, meta = self._optimize_merge_mixture(y, R, k)
except:
continue
logger.debug("Merging: {} {} {}".format(K, k, meta))
if k == 0 \
or best_merge[-1] > meta["message_length"]:
best_merge = [idx, meta["message_length"]]
# TODO: Run E-M each time?
if best_merge[-1] < I:
idx, I = best_merge
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
# TODO: Consider hyperjump?
continue
else:
just_jumped = False
else:
# Split all components.
best_split = []
for k in range(K):
idx, _, meta = self._optimize_split_mixture(y, R, k)
logger.debug("Splitting: {} {} {}".format(K, k, meta))
if k == 0 \
or best_split[-1] > meta["message_length"]:
best_split = [idx, meta["message_length"]]
if best_split[-1] < I:
idx, I = best_split
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
else:
converged = True
break
# Consider hyperjump.
if self.weight.size > 2:
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
logger.debug("Actual: {}".format(I))
logger.debug("Prediction for next mixture: {}".format(I + dI[0]))
logger.debug("Predicted lower bound for next mixture: {}".format(I + dI_lower[0]))
logger.debug("Predicted delta for next mixture: {} {}".format(dI[0], pI_scatter[0]))
logger.debug("K = {}".format(self.weight.size))
ommp = 1 - self._mixture_probability
acceptable_jump \
= (abs(100 * pI_scatter/pI) < self._percent_scatter) \
* (stats.norm(dI, pI_scatter).cdf(0) > self._mixture_probability)
if any(acceptable_jump):
K_jump = K_dK[np.where(acceptable_jump)[0]]
# If the jumps are noisy, be conservative.
idx = np.where(np.diff(K_jump) > 1)[0]
idx = idx[0] if len(idx) > 0 else -1
K_jump = K_jump[idx]
if K_jump - K > 1:
logger.debug(
"We should JUMP to K = {}!".format(K_jump))
mixture, _, meta = self._ftl_jump(y, K_jump)
logger.debug("New meta: {}".format(meta))
if meta["message_length"] < I:
# Set the current mixture.
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
just_jumped = True
else:
#This is a bad jump, so don't accept it.
None
# I think we are converged.
elif best_split[-1] > I:
converged = True
break
import matplotlib.pyplot as plt
logger.debug("Ended at K = {}".format(self.weight.size))
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
fig, ax = plt.subplots()
ax.scatter(y.T[0], y.T[1], facecolor="#666666", alpha=0.5)
K = self.weight.size
for k in range(K):
mean = self.mean[k][:2]
cov = self.covariance[k]
vals, vecs = np.linalg.eigh(cov[:2, :2])
order = vals.argsort()[::-1]
vals = vals[order]
vecs = vecs[:,order]
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 2 * 1 * np.sqrt(vals)
ellip = Ellipse(xy=mean, width=width, height=height, angle=theta,
facecolor="r", alpha=0.5)
ax.add_artist(ellip)
ax.scatter([mean[0]], [mean[1]], facecolor="r")
fig, ax = plt.subplots()
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
ax.scatter(K_dK, pI)
ax.scatter(K_dK, pI + dI_lower, facecolor="r")
raise a
fig, axes = plt.subplots(2)
axes[0].scatter(y.T[0], y.T[1])
axes[1].scatter(y.T[0], y.T[2])
raise a
"""
# Delete all components.
K = self.weight.size
best_merge = []
if K > 2:
# TODO: Some heuristic just to say only try merge if we
# hyperjumped?
for k in range(K):
idx, _, meta = self._optimize_merge_mixture(y, R, k)
print("k", k, meta)
if k == 0 \
or best_merge[-1] > meta["message_length"]:
best_merge = [idx, meta["message_length"]]
# Split all components, and run partial E-M on each.
K = self.weight.size
best_perturbation = []
hyperjump = False
for k in range(K):
# Split the mixture, run partial E-M then full E-M.
idx, _, meta = self._optimize_split_mixture(y, R, k)
logger.debug(
"partial EM {} {} {} {}".format(K, k, idx, meta))
# FTL jump!
if k > 0 and self.weight.size > 2:
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
logger.debug("Actual: {}".format(I))
logger.debug("Prediction for next mixture: {}".format(I + dI[0]))
logger.debug("Predicted lower bound for next mixture: {}".format(I + dI_lower[0]))
logger.debug("Predicted delta for next mixture: {} {}".format(dI[0], pI_scatter[0]))
logger.debug("K = {}".format(self.weight.size))
ommp = 1 - self._mixture_probability
acceptable_jump \
= (abs(100 * pI_scatter/pI) < self._percent_scatter) \
* (stats.norm(dI, pI_scatter).cdf(0) < ommp)
#= (stats.norm(pI, pI_scatter).cdf(I) < ommp) \
if any(acceptable_jump):
K_jump = K_dK[np.where(acceptable_jump)[0]]
# If the jumps are noisy, be conservative.
idx = np.where(np.diff(K_jump) > 1)[0]
idx = idx[0] if idx else -1
K_jump = K_jump[idx]
raise a
if K_jump - K > 1:
logger.debug(
"We should JUMP to K = {}!".format(K_jump))
mixture, R, meta = self._ftl_jump(y, K_jump)
logger.debug("New meta: {}".format(meta))
# Set the current mixture.
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
hyperjump = True
break
if k == 0 \
or best_perturbation[-1] > meta["message_length"]:
best_perturbation = [idx, meta["message_length"]]
if hyperjump:
print("Hyperjump EARLY!")
continue
# Is the best perturbation better than the current mixture?
if best_perturbation[-1] < I and (len(best_merge) == 0 or best_perturbation[-1] < best_merge[-1]):
idx, I = best_perturbation
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
elif len(best_merge) > 0 and best_merge[-1] < I and best_merge[-1] < best_perturbation[-1]:
idx, I = best_merge
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
else:
# All split perturbations had longer message lengths.
converged = True
logger.debug(
"All split perturbations had longer message lengths.")
break
# To update message length, max log likelihood tec
# TODO refactor
R, ll, I = self._expectation(y, **kwargs)
# Only start making predictions when we have some data.
if self.weight.size > 2:
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
logger.debug("Actual: {}".format(I))
logger.debug("Prediction for next mixture: {}".format(I + dI[0]))
logger.debug("Predicted lower bound for next mixture: {}".format(I + dI_lower[0]))
logger.debug("Predicted delta for next mixture: {} {}".format(dI[0], pI_scatter[0]))
logger.debug("K = {}".format(self.weight.size))
ommp = 1 - self._mixture_probability
acceptable_jump \
= (abs(100 * pI_scatter/pI) < self._percent_scatter) \
* (stats.norm(dI, pI_scatter).cdf(0) < ommp)
#= (stats.norm(pI, pI_scatter).cdf(I) < ommp) \
if any(acceptable_jump):
K_jump = K_dK[np.where(acceptable_jump)[0]]
# If the jumps are noisy, be conservative.
idx = np.where(np.diff(K_jump) > 1)[0]
idx = idx[0] if idx else -1
K_jump = K_jump[idx]
if K_jump - K > 1:
logger.debug(
"We should JUMP to K = {}!".format(K_jump))
mixture, R, meta = self._ftl_jump(y, K_jump)
logger.debug("New meta: {}".format(meta))
# Set the current mixture.
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
else:
# Just split to K+1
continue
if converged:
logger.debug("Skipping final sweep")
break
logger.debug("Doing final sweep")
# Do a final sweep to be sure.
K = self.weight.size
best_perturbation = []
for k in range(K):
perturbation = self._propose_split_mixtures(y, R, k)
if k == 0 \
or best_perturbation[-1] > perturbation[-1]:
best_perturbation = [k] + list(perturbation)
logger.debug("Actual: {}".format(best_perturbation[-1]))
if best_perturbation[-1] < I:
k, params, _R, _meta, I = best_perturbation
self.set_parameters(**params)
R, ll, I = self._expectation(y, **kwargs)
# Make a prediction for the next one either way.
pdf = self._predict_message_length_change(K + 1, N)
logger.debug("Prediction for next mixture: {}".format(pdf))
else:
# Converged.
converged = True
"""
logger.debug("Ended at K = {}".format(self.weight.size))
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
fig, ax = plt.subplots()
ax.scatter(y.T[0], y.T[1], facecolor="#666666", alpha=0.5)
K = self.weight.size
for k in range(K):
mean = self.mean[k][:2]
cov = self.covariance[k]
vals, vecs = np.linalg.eigh(cov[:2, :2])
order = vals.argsort()[::-1]
vals = vals[order]
vecs = vecs[:,order]
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 2 * 1 * np.sqrt(vals)
ellip = Ellipse(xy=mean, width=width, height=height, angle=theta,
facecolor="r", alpha=0.5)
ax.add_artist(ellip)
ax.scatter([mean[0]], [mean[1]], facecolor="r")
fig, ax = plt.subplots()
foo = np.array(self._mixture_predictors)
ax.scatter(foo.T[0], -foo.T[2] - foo.T[3])
raise a
def kullback_leibler_for_multivariate_normals(mu_a, cov_a, mu_b, cov_b):
r"""
Return the Kullback-Leibler distance from one multivariate normal
distribution with mean :math:`\mu_a` and covariance :math:`\Sigma_a`,
to another multivariate normal distribution with mean :math:`\mu_b` and
covariance matrix :math:`\Sigma_b`. The two distributions are assumed to
have the same number of dimensions, such that the Kullback-Leibler
distance is
.. math::
D_{\mathrm{KL}}\left(\mathcal{N}_{a}||\mathcal{N}_{b}\right) =
\frac{1}{2}\left(\mathrm{Tr}\left(\Sigma_{b}^{-1}\Sigma_{a}\right) + \left(\mu_{b}-\mu_{a}\right)^\top\Sigma_{b}^{-1}\left(\mu_{b} - \mu_{a}\right) - k + \ln{\left(\frac{\det{\Sigma_{b}}}{\det{\Sigma_{a}}}\right)}\right)
where :math:`k` is the number of dimensions and the resulting distance is
given in units of nats.
.. warning::
It is important to remember that
:math:`D_{\mathrm{KL}}\left(\mathcal{N}_{a}||\mathcal{N}_{b}\right) \neq D_{\mathrm{KL}}\left(\mathcal{N}_{b}||\mathcal{N}_{a}\right)`.
:param mu_a:
The mean of the first multivariate normal distribution.
:param cov_a:
The covariance matrix of the first multivariate normal distribution.
:param mu_b:
The mean of the second multivariate normal distribution.
:param cov_b:
The covariance matrix of the second multivariate normal distribution.
:returns:
The Kullback-Leibler distance from distribution :math:`a` to :math:`b`
in units of nats. Dividing the result by :math:`\log_{e}2` will give
the distance in units of bits.
"""
if len(cov_a.shape) == 1:
cov_a = cov_a * np.eye(cov_a.size)
if len(cov_b.shape) == 1:
cov_b = cov_b * np.eye(cov_b.size)
U, S, V = np.linalg.svd(cov_a)
Ca_inv = np.dot(np.dot(V.T, np.linalg.inv(np.diag(S))), U.T)
U, S, V = np.linalg.svd(cov_b)
Cb_inv = np.dot(np.dot(V.T, np.linalg.inv(np.diag(S))), U.T)
k = mu_a.size
offset = mu_b - mu_a
return 0.5 * np.sum([
np.trace(np.dot(Ca_inv, cov_b)),
+ np.dot(offset.T, np.dot(Cb_inv, offset)),
- k,
+ np.log(np.linalg.det(cov_b)/np.linalg.det(cov_a))
])
def _parameters_per_mixture(D, covariance_type):
r"""
Return the number of parameters per Gaussian component, given the number
of observed dimensions and the covariance type.
:param D:
The number of dimensions per data point.
:param covariance_type:
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix.
:returns:
The number of parameters required to fully specify the multivariate
mean and covariance matrix of a :math:`D`-dimensional Gaussian.
"""
if covariance_type == "full":
return int(D + D*(D + 1)/2.0)
elif covariance_type == "diag":
return 2 * D
else:
raise ValueError("unknown covariance type '{}'".format(covariance_type))
def log_kappa(D):
cd = -0.5 * D * np.log(2 * np.pi) + 0.5 * np.log(D * np.pi)
return -1 + 2 * cd/D
def _message_length(y, mu, cov, weight, responsibility, nll,
covariance_type, eps=0.10, dofail=False, full_output=False, **kwargs):
# THIS IS SO BAD
N, D = y.shape
M = weight.size
# I(M) = M\log{2} + constant
I_m = M # [bits]
# I(w) = \frac{(M - 1)}{2}\log{N} - \frac{1}{2}\sum_{j=1}^{M}\log{w_j} - (M - 1)!
I_w = (M - 1) / 2.0 * np.log(N) \
- 0.5 * np.sum(np.log(weight)) \
- scipy.special.gammaln(M)
# TODO: why gammaln(M) ~= log(K-1)! or (K-1)!
#- np.math.factorial(M - 1) \
#+ 1
I_w = I_w/np.log(2) # [bits]
if D == 1:
log_F_m = np.log(2) + (2 * np.log(N)) - 4 * np.log(cov.flatten()[0]**0.5)
raise UnsureError
else:
if covariance_type == "diag":
cov_ = np.array([_ * np.eye(D) for _ in cov])
else:
# full
cov_ = cov
log_det_cov = np.log(np.linalg.det(cov_))
# TODO: What about for diag.
log_F_m = 0.5 * D * (D + 3) * np.log(np.sum(responsibility, axis=1))
log_F_m += -log_det_cov
log_F_m += -(D * np.log(2) + (D + 1) * log_det_cov)
# TODO: No prior on h(theta).. thus -\sum_{j=1}^{M}\log{h\left(\theta_j\right)} = 0
# TODO: bother about including this? -N * D * np.log(eps)
AOM = 0.001 # MAGIC
Il = nll - (D * N * np.log(AOM))
Il = Il/np.log(2) # [bits]
"""
if D == 1:log_likelihood
# R1
R1 = 10 # MAGIC
R2 = 2 # MAGIC
log_prior = D * np.log(R1) # mu
log_prior += np.log(R2)
log_prior += np.log(cov.flatten()[0]**0.5)
else:
R1 = 10
log_prior = D * np.log(R1) + 0.5 * (D + 1) * log_det_cov
"""
log_prior = 0
I_t = (log_prior + 0.5 * log_F_m)/np.log(2)
sum_It = np.sum(I_t)
K = M
Q = _total_parameters(K, D, covariance_type)
lattice = 0.5 * Q * log_kappa(Q) / np.log(2)
part1 = I_m + I_w + np.sum(I_t) + lattice
part2 = Il + (0.5 * Q)/np.log(2)
I = part1 + part2
assert I > 0
if full_output:
return (I, dict(I_m=I_m, I_w=I_w, log_F_m=log_F_m, nll=nll, I_l=Il, I_t=I_t,
lattice=lattice, part1=part1, part2=part2))
return I
def _index_of_most_similar_component(y, mean, covariance, index):
r"""
Find the index of the most similar component, as judged by K-L divergence.
"""
K, D = mean.shape
D_kl = np.inf * np.ones(K)
for k in range(K):
if k == index: continue
D_kl[k] = kullback_leibler_for_multivariate_normals(
mean[index], covariance[index], mean[index], covariance[index])
return np.nanargmin(D_kl)
def _merge_component_with_closest_component(y, mean, covariance, weight,
responsibility, index, **kwargs):
index_b = _index_of_most_similar_component(y, mean, covariance, index)
return _merge_components(
y, mean, covariance, weight, responsibility, index, index_b, **kwargs)
def _merge_components(y, mean, covariance, weight, responsibility, index_a,
index_b, **kwargs):
r"""
Merge a component from the mixture with its "closest" component, as
judged by the Kullback-Leibler distance.
:param y:
A :math:`N\times{}D` array of the observations :math:`y`,
where :math:`N` is the number of observations, and :math:`D` is the
number of dimensions per observation.
"""
logger.debug("Merging component {} (of {}) with {}".format(
a_index, weight.size, b_index))
# Initialize.
weight_k = np.sum(weight[[a_index, b_index]])
responsibility_k = np.sum(responsibility[[a_index, b_index]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
kwargs["covariance_type"], kwargs["covariance_regularization"])
# Delete the b-th component.
del_index = np.max([a_index, b_index])
keep_index = np.min([a_index, b_index])
new_mean = np.delete(mu, del_index, axis=0)
new_covariance = np.delete(cov, del_index, axis=0)
new_weight = np.delete(weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
# Calculate log-likelihood.
# Generate a mixture.
mixture = GaussianMixture()
raise a
#return _expectation_maximization(y, new_mean, new_covariance, new_weight,
# responsibility=new_responsibility, **kwargs)
def _compute_precision_cholesky(covariances, covariance_type):
r"""
Compute the Cholesky decomposition of the precision of the covariance
matrices provided.
:param covariances:
An array of covariance matrices.
:param covariance_type:
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix.
"""
singular_matrix_error = "Failed to do Cholesky decomposition"
if covariance_type in "full":
M, D, _ = covariances.shape
cholesky_precision = np.empty((M, D, D))
for m, covariance in enumerate(covariances):
try:
cholesky_cov = scipy.linalg.cholesky(covariance, lower=True)
except scipy.linalg.LinAlgError:
raise ValueError(singular_matrix_error)
cholesky_precision[m] = scipy.linalg.solve_triangular(
cholesky_cov, np.eye(D), lower=True).T
elif covariance_type in "diag":
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(singular_matrix_error)
cholesky_precision = covariances**(-0.5)
else:
raise NotImplementedError("nope")
return cholesky_precision
def _slogdet(covariance, covariance_type):
if covariance_type == "full":
sign, slogdet = np.linalg.slogdet(covariance)
assert np.all(sign == 1)
return slogdet
elif covariance_type == "diag":
K, D = covariance.shape
cov = np.array([_ * np.eye(D) for _ in covariance])
sign, slogdet = np.linalg.slogdet(cov)
assert np.all(sign == 1)
return slogdet
def _estimate_covariance_matrix_full(y, responsibility, mean,
covariance_regularization=0):
N, D = y.shape
M, N = responsibility.shape
membership = np.sum(responsibility, axis=1)
I = np.eye(D)
cov = np.empty((M, D, D))
for m, (mu, rm, nm) in enumerate(zip(mean, responsibility, membership)):
diff = y - mu
denominator = nm - 1 if nm > 1 else nm
cov[m] = np.dot(rm * diff.T, diff) / denominator \
+ covariance_regularization * I
return cov
def _estimate_covariance_matrix(y, responsibility, mean, covariance_type,
covariance_regularization):
available = {
"full": _estimate_covariance_matrix_full,
"diag": _estimate_covariance_matrix_diag
}
try:
function = available[covariance_type]
except KeyError:
raise ValueError("unknown covariance type")
return function(y, responsibility, mean, covariance_regularization)
def _estimate_covariance_matrix_diag(y, responsibility, mean,
covariance_regularization=0):
N, D = y.shape
M, N = responsibility.shape
denominator = np.sum(responsibility, axis=1)
denominator[denominator > 1] = denominator[denominator > 1] - 1
membership = np.sum(responsibility, axis=1)
I = np.eye(D)
cov = np.empty((M, D))
for m, (mu, rm, nm) in enumerate(zip(mean, responsibility, membership)):
diff = y - mu
denominator = nm - 1 if nm > 1 else nm
cov[m] = np.dot(rm, diff**2) / denominator + covariance_regularization
#cov[m] = rm * diff**2 / denominator + covariance_regularization
return cov
#avg_X2 = np.dot(responsibility, y * y) / denominator
#avg_means2 = mean**2
#avg_X_means = mean * np.dot(responsibility, y) / denominator
#return avg_X2 - 2 * avg_X_means + avg_means2 + covariance_regularization
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like,
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precision_cholesky, covariance_type):
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precision_cholesky, covariance_type, n_features)
if covariance_type in 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precision_cholesky)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type in 'diag':
precisions = precision_cholesky**2
log_prob = (np.sum((means ** 2 * precisions), 1) - 2.0 * np.dot(X, (means * precisions).T) + np.dot(X**2, precisions.T))
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
def _svd(covariance, covariance_type):
if covariance_type == "full":
return np.linalg.svd(covariance)
elif covariance_type == "diag":
return np.linalg.svd(covariance * np.eye(covariance.size))
else:
raise ValueError("unknown covariance type")
|
andycasey/snob
|
snob/nips_search.py
|
Python
|
mit
| 67,954 | 0.004709 |
from constants import constants, callback_name_list
from controller import plan_controller, navigable_list_controller, navigable_inline_keyboard_controller, settings_controller
from telepot.namedtuple import ReplyKeyboardRemove
from bot import bot
from decorators.callback import callback_dict as callback_list
"""
callback_list = {
callback_name_list["setting"]: settings_controller.set_settings,
}
"""
def handle_callback_data(msg, action_prefix):
callback_data = msg['data']
message = msg['message']['text'] if 'text' in msg['message'] else msg['message']['caption']
chat_id = msg['message']['chat']['id']
callback_query_id = msg['id']
inline_message_id = msg['message']["from"]["id"]
message_id = msg['message']['message_id']
for callback in callback_list:
if callback_data.startswith(callback):
answer = callback_list[callback](callback_query_id, callback_data, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id)
if answer == None:
action_prefix[chat_id] = " "
else:
action_prefix[chat_id] = answer
break
else:
bot.sendMessage(chat_id, constants["callbackNotFound"], reply_markup=ReplyKeyboardRemove())
action_prefix[chat_id] = " "
bot.answerCallbackQuery(callback_query_id)
|
AntoDev96/GuidaSky
|
handlers/callback.py
|
Python
|
gpl-3.0
| 1,351 | 0.005181 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides class BaseCodeEditor; base class for
CodeEditor class in Coder
and CodeBox class in dlgCode (code component)
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import wx
import wx.stc
import sys
from pkg_resources import parse_version
from psychopy.constants import PY3
from psychopy import logging
from psychopy import prefs
from ..themes import ThemeMixin
from psychopy.localization import _translate
class BaseCodeEditor(wx.stc.StyledTextCtrl, ThemeMixin):
"""Provides base class for code editors
See the wxPython demo styledTextCtrl 2.
"""
def __init__(self, parent, ID, pos, size, style):
wx.stc.StyledTextCtrl.__init__(self, parent, ID, pos, size, style)
self.notebook = parent
self.UNSAVED = False
self.filename = ""
self.fileModTime = None # was file modified outside of CodeEditor
self.AUTOCOMPLETE = True
self.autoCompleteDict = {}
self._commentType = {'Py': '#', 'JS': '//', 'Both': '//' or '#'}
# doesn't pause strangely
self.locals = None # will contain the local environment of the script
self.prevWord = None
# remove some annoying stc key commands
CTRL = wx.stc.STC_SCMOD_CTRL
self.CmdKeyClear(ord('['), CTRL)
self.CmdKeyClear(ord(']'), CTRL)
self.CmdKeyClear(ord('/'), CTRL)
self.CmdKeyClear(ord('/'), CTRL | wx.stc.STC_SCMOD_SHIFT)
# 4 means 'tabs are bad'; 1 means 'flag inconsistency'
self.SetMargins(0, 0)
self.SetUseTabs(False)
self.SetTabWidth(4)
self.SetIndent(4)
self.SetBufferedDraw(False)
self.SetEOLMode(wx.stc.STC_EOL_LF)
# setup margins for line numbers
self.SetMarginType(0, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginWidth(0, 40)
# Setup a margin to hold fold markers
self.SetMarginType(1, wx.stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(1, wx.stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(1, True)
self.SetMarginWidth(1, 12)
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPEN,
wx.stc.STC_MARK_BOXMINUS, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDER,
wx.stc.STC_MARK_BOXPLUS, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERSUB,
wx.stc.STC_MARK_VLINE, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERTAIL,
wx.stc.STC_MARK_LCORNER, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEREND,
wx.stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPENMID,
wx.stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERMIDTAIL,
wx.stc.STC_MARK_TCORNER, "white", "#808080")
# Set what kind of events will trigger a modified event
self.SetModEventMask(wx.stc.STC_MOD_DELETETEXT |
wx.stc.STC_MOD_INSERTTEXT)
# Bind context menu
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def OnContextMenu(self, event):
"""Sets the context menu for components using code editor base class"""
if not hasattr(self, "UndoID"):
# Create a new ID for all items
self.UndoID = wx.NewId()
self.RedoID = wx.NewId()
self.CutID = wx.NewId()
self.CopyID = wx.NewId()
self.PasteID = wx.NewId()
self.DeleteID = wx.NewId()
self.SelectAllID = wx.NewId()
# Bind items to relevant method
self.Bind(wx.EVT_MENU, self.onUndo, id=self.UndoID)
self.Bind(wx.EVT_MENU, self.onRedo, id=self.RedoID)
self.Bind(wx.EVT_MENU, self.onCut, id=self.CutID)
self.Bind(wx.EVT_MENU, self.onCopy, id=self.CopyID)
self.Bind(wx.EVT_MENU, self.onPaste, id=self.PasteID)
self.Bind(wx.EVT_MENU, self.onDelete, id=self.DeleteID)
self.Bind(wx.EVT_MENU, self.onSelectAll, id=self.SelectAllID)
# Create menu and menu items
menu = wx.Menu()
undoItem = wx.MenuItem(menu, self.UndoID, _translate("Undo"))
redoItem = wx.MenuItem(menu, self.RedoID, _translate("Redo"))
cutItem = wx.MenuItem(menu, self.CutID, _translate("Cut"))
copyItem = wx.MenuItem(menu, self.CopyID, _translate("Copy"))
pasteItem = wx.MenuItem(menu, self.PasteID, _translate("Paste"))
deleteItem = wx.MenuItem(menu, self.DeleteID, _translate("Delete"))
selectItem = wx.MenuItem(menu, self.SelectAllID, _translate("Select All"))
# Check whether items should be enabled
undoItem.Enable(self.CanUndo())
redoItem.Enable(self.CanRedo())
cutItem.Enable(self.CanCut())
copyItem.Enable(self.CanCopy())
pasteItem.Enable(self.CanPaste())
deleteItem.Enable(self.CanCopy())
# Append items to menu
menu.Append(undoItem)
menu.Append(redoItem)
menu.AppendSeparator()
menu.Append(cutItem)
menu.Append(copyItem)
menu.Append(pasteItem)
menu.AppendSeparator()
menu.Append(deleteItem)
menu.Append(selectItem)
self.PopupMenu(menu)
menu.Destroy()
def onUndo(self, event):
"""For context menu Undo"""
foc = self.FindFocus()
if hasattr(foc, 'Undo'):
foc.Undo()
def onRedo(self, event):
"""For context menu Redo"""
foc = self.FindFocus()
if hasattr(foc, 'Redo'):
foc.Redo()
def onCut(self, event):
"""For context menu Cut"""
foc = self.FindFocus()
if hasattr(foc, 'Cut'):
foc.Cut()
def onCopy(self, event):
"""For context menu Copy"""
foc = self.FindFocus()
if hasattr(foc, 'Copy'):
foc.Copy()
def onPaste(self, event):
"""For context menu Paste"""
foc = self.FindFocus()
if hasattr(foc, 'Paste'):
foc.Paste()
def onSelectAll(self, event):
"""For context menu Select All"""
foc = self.FindFocus()
if hasattr(foc, 'SelectAll'):
foc.SelectAll()
def onDelete(self, event):
"""For context menu Delete"""
foc = self.FindFocus()
if hasattr(foc, 'DeleteBack'):
foc.DeleteBack()
def OnKeyPressed(self, event):
pass
def HashtagCounter(self, text, nTags=0):
# Hashtag counter - counts lines beginning with hashtags in selected text
for lines in text.splitlines():
if lines.startswith('#'):
nTags += 1
elif lines.startswith('//'):
nTags += 2
return nTags
def toggleCommentLines(self):
codeType = "Py"
if hasattr(self, "codeType"):
codeType = self.codeType
startText, endText = self._GetPositionsBoundingSelectedLines()
nLines = len(self._GetSelectedLineNumbers())
nHashtags = self.HashtagCounter(self.GetTextRange(startText, endText))
passDec = False # pass decision - only pass if line is blank
# Test decision criteria, and catch devision errors
# when caret starts at line with no text, or at beginning of line...
try:
devCrit, decVal = .6, nHashtags / nLines # Decision criteria and value
except ZeroDivisionError:
if self.LineLength(self.GetCurrentLine()) == 1:
self._ReplaceSelectedLines(self._commentType[codeType])
devCrit, decVal, passDec = 1, 0, True
else:
self.CharRightExtend() # Move caret so line is counted
devCrit, decVal = .6, nHashtags / len(self._GetSelectedLineNumbers())
newText = ''
# Add or remove hashtags/JS comments from selected text, but pass if # added tp blank line
if decVal < devCrit and passDec == False:
for lineNo in self._GetSelectedLineNumbers():
lineText = self.GetLine(lineNo)
newText = newText + self._commentType[codeType] + lineText
elif decVal >= devCrit and passDec == False:
for lineNo in self._GetSelectedLineNumbers():
lineText = self.GetLine(lineNo)
if lineText.startswith(self._commentType[codeType]):
lineText = lineText[len(self._commentType[codeType]):]
newText = newText + lineText
self._ReplaceSelectedLines(newText)
def _GetSelectedLineNumbers(self):
# used for the comment/uncomment machinery from ActiveGrid
selStart, selEnd = self._GetPositionsBoundingSelectedLines()
start = self.LineFromPosition(selStart)
end = self.LineFromPosition(selEnd)
if selEnd == self.GetTextLength():
end += 1
return list(range(start, end))
def _GetPositionsBoundingSelectedLines(self):
# used for the comment/uncomment machinery from ActiveGrid
startPos = self.GetCurrentPos()
endPos = self.GetAnchor()
if startPos > endPos:
startPos, endPos = endPos, startPos
if endPos == self.PositionFromLine(self.LineFromPosition(endPos)):
# If it's at the very beginning of a line, use the line above it
# as the ending line
endPos = endPos - 1
selStart = self.PositionFromLine(self.LineFromPosition(startPos))
selEnd = self.PositionFromLine(self.LineFromPosition(endPos) + 1)
return selStart, selEnd
def _ReplaceSelectedLines(self, text):
# used for the comment/uncomment machinery from ActiveGrid
# If multi line selection - keep lines selected
# For single lines, move to next line and select that line
if len(text) == 0:
return
selStart, selEnd = self._GetPositionsBoundingSelectedLines()
self.SetSelection(selStart, selEnd)
self.ReplaceSelection(text)
if len(text.splitlines()) > 1:
self.SetSelection(selStart, selStart + len(text))
else:
self.SetSelection(
self.GetCurrentPos(),
self.GetLineEndPosition(self.GetCurrentLine()))
def smartIdentThisLine(self):
codeType = "Py"
if hasattr(self, "codeType"):
codeType = self.codeType
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
prevLine = self.GetLine(startLineNum - 1)
prevIndent = self.GetLineIndentation(startLineNum - 1)
signal = {'Py': ':', 'JS': '{'}
# set the indent
self.SetLineIndentation(startLineNum, prevIndent)
self.VCHome()
# check for a colon (Python) or curly brace (JavaScript) to signal an indent
prevLogical = prevLine.split(self._commentType[codeType])[0]
prevLogical = prevLogical.strip()
if len(prevLogical) > 0 and prevLogical[-1] == signal[codeType]:
self.CmdKeyExecute(wx.stc.STC_CMD_TAB)
elif len(prevLogical) > 0 and prevLogical[-1] == '}' and codeType == 'JS':
self.CmdKeyExecute(wx.stc.STC_SCMOD_SHIFT + wx.stc.STC_CMD_TAB)
def smartIndent(self):
# find out about current positions and indentation
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
prevLine = self.GetLine(startLineNum - 1)
prevIndent = self.GetLineIndentation(startLineNum - 1)
startLineIndent = self.GetLineIndentation(startLineNum)
# calculate how much we need to increment/decrement the current lines
incr = prevIndent - startLineIndent
# check for a colon to signal an indent decrease
prevLogical = prevLine.split('#')[0]
prevLogical = prevLogical.strip()
if len(prevLogical) > 0 and prevLogical[-1] == ':':
incr = incr + 4
# set each line to the correct indentation
self.BeginUndoAction()
for lineNum in range(startLineNum, endLineNum + 1):
thisIndent = self.GetLineIndentation(lineNum)
self.SetLineIndentation(lineNum, thisIndent + incr)
self.EndUndoAction()
def shouldTrySmartIndent(self):
# used when the user presses tab key: decide whether to insert
# a tab char or whether to smart indent text
# if some text has been selected then use indentation
if len(self.GetSelectedText()) > 0:
return True
# test whether any text precedes current pos
lineText, posOnLine = self.GetCurLine()
textBeforeCaret = lineText[:posOnLine]
if textBeforeCaret.split() == []:
return True
else:
return False
def indentSelection(self, howFar=4):
# Indent or outdent current selection by 'howFar' spaces
# (which could be positive or negative int).
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
# go through line-by-line
self.BeginUndoAction()
for lineN in range(startLineNum, endLineNum + 1):
newIndent = self.GetLineIndentation(lineN) + howFar
if newIndent < 0:
newIndent = 0
self.SetLineIndentation(lineN, newIndent)
self.EndUndoAction()
def Paste(self, event=None):
dataObj = wx.TextDataObject()
clip = wx.Clipboard().Get()
clip.Open()
success = clip.GetData(dataObj)
clip.Close()
if success:
txt = dataObj.GetText()
# dealing with unicode error in wx3 for Mac
if parse_version(wx.__version__) >= parse_version('3') and sys.platform == 'darwin' and not PY3:
try:
# if we can decode from utf-8 then all is good
txt.decode('utf-8')
except Exception as e:
logging.error(str(e))
# if not then wx conversion broke so get raw data instead
txt = dataObj.GetDataHere()
self.ReplaceSelection(txt.replace("\r\n", "\n").replace("\r", "\n"))
self.analyseScript()
def analyseScript(self):
"""Analyse the script."""
pass
@property
def edgeGuideVisible(self):
return self.GetEdgeMode() != wx.stc.STC_EDGE_NONE
@edgeGuideVisible.setter
def edgeGuideVisible(self, value):
if value is True:
self.SetEdgeMode(wx.stc.STC_EDGE_LINE)
else:
self.SetEdgeMode(wx.stc.STC_EDGE_NONE)
@property
def edgeGuideColumn(self):
return self.GetEdgeColumn()
@edgeGuideColumn.setter
def edgeGuideColumn(self, value):
self.SetEdgeColumn(value)
# def _applyAppTheme(self, target=None):
# """Overrides theme change from ThemeMixin.
# Don't call - this is called at the end of theme.setter"""
# # ThemeMixin._applyAppTheme() # only needed for children
# spec = ThemeMixin.codeColors
# base = spec['base']
#
# # Check for language specific spec
# if self.GetLexer() in self.lexers:
# lexer = self.lexers[self.GetLexer()]
# else:
# lexer = 'invlex'
# if lexer in spec:
# # If there is lang specific spec, delete subkey...
# lang = spec[lexer]
# del spec[lexer]
# #...and append spec to root, overriding any generic spec
# spec.update({key: lang[key] for key in lang})
# else:
# lang = {}
#
# # Override base font with user spec if present
# key = 'outputFont' if isinstance(self, wx.py.shell.Shell) else 'codeFont'
# if prefs.coder[key] != "From theme...":
# base['font'] = prefs.coder[key]
#
# # Pythonise the universal data (hex -> rgb, tag -> wx int)
# invalid = []
# for key in spec:
# # Check that key is in tag list and full spec is defined, discard if not
# if key in self.tags \
# and all(subkey in spec[key] for subkey in ['bg', 'fg', 'font']):
# spec[key]['bg'] = self.hex2rgb(spec[key]['bg'], base['bg'])
# spec[key]['fg'] = self.hex2rgb(spec[key]['fg'], base['fg'])
# if not spec[key]['font']:
# spec[key]['font'] = base['font']
# spec[key]['size'] = int(self.prefs['codeFontSize'])
# else:
# invalid += [key]
# for key in invalid:
# del spec[key]
# # Set style for undefined lexers
# for key in [getattr(wx._stc, item) for item in dir(wx._stc) if item.startswith("STC_LEX")]:
# self.StyleSetBackground(key, base['bg'])
# self.StyleSetForeground(key, base['fg'])
# self.StyleSetSpec(key, "face:%(font)s,size:%(size)d" % base)
# # Set style from universal data
# for key in spec:
# if self.tags[key] is not None:
# self.StyleSetBackground(self.tags[key], spec[key]['bg'])
# self.StyleSetForeground(self.tags[key], spec[key]['fg'])
# self.StyleSetSpec(self.tags[key], "face:%(font)s,size:%(size)d" % spec[key])
# # Apply keywords
# for level, val in self.lexkw.items():
# self.SetKeyWords(level, " ".join(val))
#
# # Make sure there's some spec for margins
# if 'margin' not in spec:
# spec['margin'] = base
# # Set margin colours to match linenumbers if set
# if 'margin' in spec:
# mar = spec['margin']['bg']
# else:
# mar = base['bg']
# self.SetFoldMarginColour(True, mar)
# self.SetFoldMarginHiColour(True, mar)
#
# # Make sure there's some spec for caret
# if 'caret' not in spec:
# spec['caret'] = base
# # Set caret colour
# self.SetCaretForeground(spec['caret']['fg'])
# self.SetCaretLineBackground(spec['caret']['bg'])
# self.SetCaretWidth(1 + ('bold' in spec['caret']['font']))
#
# # Make sure there's some spec for selection
# if 'select' not in spec:
# spec['select'] = base
# spec['select']['bg'] = self.shiftColour(base['bg'], 30)
# # Set selection colour
# self.SetSelForeground(True, spec['select']['fg'])
# self.SetSelBackground(True, spec['select']['bg'])
#
# # Set wrap point
# self.edgeGuideColumn = self.prefs['edgeGuideColumn']
# self.edgeGuideVisible = self.edgeGuideColumn > 0
#
# # Set line spacing
# spacing = min(int(self.prefs['lineSpacing'] / 2), 64) # Max out at 64
# self.SetExtraAscent(spacing)
# self.SetExtraDescent(spacing)
|
psychopy/versions
|
psychopy/app/coder/codeEditorBase.py
|
Python
|
gpl-3.0
| 19,323 | 0.001139 |
"""
Created on 05/12/13
@author: zw606
simple example
assumes images and labels files are named the same but in different folders
(one folder for images, one folder for labels)
"""
import glob
from os.path import join, basename
from spatch.image import spatialcontext
from spatch.image.mask import get_boundary_mask
from spatch.segmentation.patchbased import SAPS
from spatch.utilities.io import open_image, get_affine, save_3d_labels_data
from spatch.image.spatialcontext import COORDINATES, GDT
INITIAL_SPATIAL_INFO = COORDINATES
REFINEMENT_SPATIAL_INFO = GDT
def get_subject_id(fileName):
nameParts = fileName.split('.')[0].split('_')
return nameParts[0]
def initial_saps_segment(trainingSet, targetFile, imagesPath, labelsPath, patchSize, k, spatialWeight,
spatialInfoType=INITIAL_SPATIAL_INFO, maskData=None, numProcessors=21):
targetImage = open_image(join(imagesPath, targetFile))
# Ensure target subject is not included in atlases
targetId = get_subject_id(targetFile)
trainingSet = [x for x in trainingSet if get_subject_id(x) != targetId]
# initialise the spatial-pbs object
saps = SAPS(imagesPath, labelsPath, patchSize, boundaryDilation=None,
spatialWeight=spatialWeight, minValue=None, maxValue=None,
spatialInfoType=spatialInfoType)
# get results
results = saps.label_image(targetImage, k, trainingSet, queryMaskDict=maskData, numProcessors=numProcessors)
return results
def refinement_saps_segment(trainingSet, targetFile, imagesPath, labelsPath, patchSize, k, spatialWeight,
prevResultsPath, dtLabels, boundaryRefinementSize=2, preDtErosion=None,
spatialInfoType=REFINEMENT_SPATIAL_INFO, numProcessors=21):
targetImage = open_image(join(imagesPath, targetFile))
# Ensure target subject is not included in atlases
targetId = get_subject_id(targetFile)
trainingSet = [x for x in trainingSet if get_subject_id(x) != targetId]
# initialise the spatial-pbs object
saps = SAPS(imagesPath, labelsPath, patchSize, boundaryDilation=boundaryRefinementSize,
spatialWeight=spatialWeight, minValue=None, maxValue=None,
spatialInfoType=spatialInfoType)
prevResults = open_image(join(prevResultsPath, targetFile))
refinementMask = get_boundary_mask(prevResults, boundaryRefinementSize)
queryMaskDict = {1: refinementMask}
# erosion of labels before calculating spatial context
if preDtErosion is None:
preDtErosion = boundaryRefinementSize
# get spatial context to use from previous results
spatialInfo = spatialcontext.get_dt_spatial_context_dict(prevResults, spatialInfoType=spatialInfoType,
spatialLabels=dtLabels, labelErosion=preDtErosion,
imageData=targetImage).values()
# get results
results = saps.label_image(targetImage, k, trainingSet, queryMaskDict=queryMaskDict, spatialInfo=spatialInfo,
dtLabels=dtLabels, preDtErosion=preDtErosion, numProcessors=numProcessors)
return results
def run_leave_one_out(imagesPath, labelsPath, savePath, patchSize=7, k=15, spatialWeight=400,
prevResultsPath=None, dtLabels=None, preDtErosion=None, refinementSize=2,
numProcessors=8, fileName="*.nii.gz"):
"""
Assumes images are in common template space,
otherwise registration (not performed here) will be required for each target image
"""
files = glob.glob(join(imagesPath, fileName))
print "Number of files found:", len(files)
dataset = [basename(x) for x in files]
if prevResultsPath is not None:
# do refinement
for targetFile in dataset:
trainingSet = [x for x in dataset if x != targetFile]
results = refinement_saps_segment(trainingSet, targetFile, imagesPath, labelsPath,
patchSize, k, spatialWeight,
prevResultsPath, dtLabels, preDtErosion=preDtErosion,
boundaryRefinementSize=refinementSize,
numProcessors=numProcessors)
save_3d_labels_data(results, get_affine(join(imagesPath, targetFile)),
join(savePath, targetFile))
else:
# do initial segmentation
for targetFile in dataset:
trainingSet = [x for x in dataset if x != targetFile]
results = initial_saps_segment(trainingSet, targetFile, imagesPath, labelsPath,
patchSize, k, spatialWeight, numProcessors=numProcessors)
save_3d_labels_data(results, get_affine(join(imagesPath, targetFile)),
join(savePath, targetFile))
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--imagesPath", default=None,
help="Set path to images (specify folder)")
parser.add_argument("--labelsPath", default=None,
help="Set path to labels (specify folder) ")
parser.add_argument("--savePath", default=None,
help="Set path to save results (specify folder)")
parser.add_argument("--prevResultsPath", default=None,
help="Set path to initial results for refinement (specify folder)")
parser.add_argument("--fileName", default="*.nii.gz",
help="Specify which files to work on (takes regex)")
parser.add_argument("--patchSize", type=int, default=7, nargs="+",
help="Set the patch size to use")
parser.add_argument("-k", type=int, default=15,
help="Set number of nearest neighbours to use")
parser.add_argument("--spatialWeight", type=float, default=10,
help="Set path to initial results")
parser.add_argument("--dtLabels", type=int, default=None, nargs="+",
help="Set the labels (structures) to use to provide adaptive spatial context")
parser.add_argument("--preDtErosion", type=int, default=None,
help="Set the erosion of labels data to apply prior to any distance transforms")
parser.add_argument("--refinementSize", type=int, default=2,
help="Set boundary size for refinement (number of dilations-erosions used)")
parser.add_argument("--numProcessors", type=int, default=10,
help="Set number of processors to use")
options = parser.parse_args()
run_leave_one_out(options.imagesPath, options.labelsPath, options.savePath, patchSize=options.patchSize,
k=options.k, prevResultsPath=options.prevResultsPath,
dtLabels=options.dtLabels, preDtErosion=options.preDtErosion,
spatialWeight=options.spatialWeight, numProcessors=options.numProcessors,
fileName=options.fileName, refinementSize=options.refinementSize)
print "Done!"
|
iZehan/spatial-pbs
|
examples/simplerun.py
|
Python
|
bsd-3-clause
| 7,287 | 0.004117 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from random import random
from math import floor
from .common import InfoExtractor
from ..utils import (
ExtractorError,
remove_end,
sanitized_Request,
)
class IPrimaIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://play\.iprima\.cz/(?:[^/]+/)*(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://play.iprima.cz/particka/particka-92',
'info_dict': {
'id': '39152',
'ext': 'flv',
'title': 'Partička (92)',
'description': 'md5:74e9617e51bca67c3ecfb2c6f9766f45',
'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda',
'info_dict': {
'id': '9718337',
'ext': 'flv',
'title': 'Tchibo Partička - Jarní móda',
'thumbnail': 're:^http:.*\.jpg$',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/zpravy-ftv-prima-2752015',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage):
raise ExtractorError(
'%s said: You do not have permission to access this page' % self.IE_NAME, expected=True)
player_url = (
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
(floor(random() * 1073741824), floor(random() * 1073741824))
)
req = sanitized_Request(player_url)
req.add_header('Referer', url)
playerpage = self._download_webpage(req, video_id)
base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
if zoneGEO != '0':
base_url = base_url.replace('token', 'token_' + zoneGEO)
formats = []
for format_id in ['lq', 'hq', 'hd']:
filename = self._html_search_regex(
r'"%s_id":(.+?),' % format_id, webpage, 'filename')
if filename == 'null':
continue
real_id = self._search_regex(
r'Prima-(?:[0-9]{10}|WEB)-([0-9]+)[-_]',
filename, 'real video id')
if format_id == 'lq':
quality = 0
elif format_id == 'hq':
quality = 1
elif format_id == 'hd':
quality = 2
filename = 'hq/' + filename
formats.append({
'format_id': format_id,
'url': base_url,
'quality': quality,
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
'rtmp_live': True,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': real_id,
'title': remove_end(self._og_search_title(webpage), ' | Prima PLAY'),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
'description': self._search_regex(
r'<p[^>]+itemprop="description"[^>]*>([^<]+)',
webpage, 'description', default=None),
}
|
dyn888/youtube-dl
|
youtube_dl/extractor/iprima.py
|
Python
|
unlicense
| 3,713 | 0.00216 |
# -*- coding: utf-8 -*-
#
# This file is covered by the GNU Public Licence v3 licence. See http://www.gnu.org/licenses/gpl.txt
#
'''
List of controllers, with indirections to object loaded by Spring
'''
import springpython.context
from django.http import HttpResponse
from django.template import loader, Context
from os import listdir
from os.path import isdir, isfile, sep
from settings import APPLICATION_CONTEXTS, TEMPLATE_DIRS, DEBUG
import logging
LOGGER = logging.getLogger('app')
class CommonController(object):
def __init__(self):
self.prehandler = None
self.posthandler = None
self.urls = []
def _geturls(self):
raise Exception("No URL defined")
def prehandle(self, request):
tpl = None
if isinstance(self.prehandler, list):
for ph in self.prehandler:
if isinstance(ph, PreHandler):
tpl = ph.handle(request)
if tpl != None:
break
elif isinstance(self.prehandler, PreHandler):
tpl = self.prehandler.handle(request)
return tpl
def posthandle(self, request, tpl):
if isinstance(self.posthandler, list):
for ph in self.posthandler:
if isinstance(ph, PostHandler):
ph.handle(request, tpl)
elif isinstance(self.posthandler, PostHandler):
self.posthandler.handle(request, tpl)
class PreHandler(object):
def handle(self, request):
pass
class PostHandler(object):
def handle(self, request, tpl):
pass
# Templates loading
class TemplatesContainer(object):
def __init__(self, tpldir=TEMPLATE_DIRS, prefix=''):
self.__templates = {}
self.__tpldir = tpldir
self.__prefix = prefix
self.__load()
def after_properties_set(self):
pass
def set_app_context(self, context):
pass
def __load(self):
# Load all templates found. Replace directory by _
for fileent in listdir(self.__tpldir):
if isfile(self.__tpldir + sep + fileent):
self.__templates[fileent.replace('.html', '')] = loader.get_template(self.__prefix + fileent)
elif isdir(self.__tpldir + sep + fileent):
self.__templates[fileent] = TemplatesContainer(self.__tpldir + sep + fileent, self.__prefix + fileent + sep)
def __getattr__(self, name):
if DEBUG:
self.__load()
if name not in self.__templates:
LOGGER.error('Internal error: Template %s is missing' % (name))
raise Exception('Internal error: Template %s is missing' % (name))
return self.__templates[name]
def render(self, name, context={}):
name_i = name.split('.', 2)
tpl = self
while type(tpl) == TemplatesContainer:
try:
tpl = tpl.__getattr__(name_i.pop(0))
except:
LOGGER.error('Internal error: Template %s is missing' % (name))
raise Exception('Internal error: Template %s is missing' % (name))
return tpl.render(Context(context))
def content(self, content):
return HttpResponse(content=content, mimetype="text/html", status=200)
def response(self, name, context={}, status=200, mimetype="text/html"):
return HttpResponse(content=self.render(name, context), mimetype=mimetype, status=status)
def redirect(self, url):
return HttpResponse(content='<html><head><meta http-equiv="refresh" content="0; url=%s"/></head></html>' % url, mimetype="text/html", status=200)
def forbidden(self):
return self.response('forbidden')
def empty(self):
return self.content('')
def error(self, msg):
return self.response('message_return', { 'error':msg })
def underConstruction(self):
return self.response('under_construction')
# Controllers are entry point of the application, so this is the good place to load the application (lazy loading)
ApplicationContext = springpython.context.ApplicationContext(APPLICATION_CONTEXTS)
'''
Declare controller. This first layer has two purposes :
1/ Check security
2/ Call the IoC managed controller method
'''
# Controllers
templates = ApplicationContext.get_object('templatesContainer')
controllersmap = {}
def run_controller(request, *kargs, **kwargs):
kwargsremain = {}
for key, val in kwargs.iteritems():
if key == 'controller':
controller = kwargs['controller']
elif key == 'method':
method = kwargs['method']
elif key == 'right':
right = kwargs['right']
else:
kwargsremain[key] = val
if controller not in controllersmap.keys():
controllersmap[controller] = ApplicationContext.get_object(controller)
controllerObj = controllersmap[controller]
try:
if right is not None and request.session.get(right, default=None) is None:
tpl = templates.forbidden()
else:
tpl = controllerObj.prehandle(request)
if tpl is None:
tpl = getattr(controllerObj, method)(request, *kargs, **kwargsremain)
controllerObj.posthandle(request, tpl)
except Exception as exc:
tpl = templates.error(exc)
return tpl
|
smorand/dtol
|
src/core/controllers.py
|
Python
|
gpl-3.0
| 4,697 | 0.031935 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
# from django.conf import settings
urlpatterns = patterns('www.sight.views',
url(r'^$', 'sight_map'),
url(r'^(?P<sight_id>\d+)$', 'sight_detail'),
)
|
lantianlz/qiexing
|
www/sight/urls.py
|
Python
|
gpl-2.0
| 287 | 0 |
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino_xl.lib.tickets.models import *
from lino.api import _
Ticket.hide_elements('closed')
# class Ticket(Ticket):
# class Meta(Ticket.Meta):
# app_label = 'tickets'
# verbose_name = _("Plea")
# verbose_name_plural = _("Pleas")
# abstract = dd.is_abstract_model(__name__, 'Ticket')
# ActiveTickets._label = _("Active pleas")
# UnassignedTickets._label = _("Unassigned pleas")
# PublicTickets._label = _("Public pleas")
# TicketsToTriage._label = _("Pleas to triage")
# TicketsToTalk._label = _("Pleas to talk")
# # TicketsToDo._label = _("Pleas to to")
# AllTickets._label = _("All pleas")
dd.update_field(
'tickets.Ticket', 'upgrade_notes', verbose_name=_("Solution"))
# dd.update_field(
# 'tickets.Ticket', 'state', default=TicketStates.todo.as_callable)
class TicketDetail(TicketDetail):
main = "general history_tab more"
general = dd.Panel("""
general1:60 votes.VotesByVotable:20 uploads.UploadsByController
description:30 comments.CommentsByRFC:30 skills.DemandsByDemander #working.SessionsByTicket:20
""", label=_("General"))
general1 = """
summary:40 id:6 deadline
user:12 end_user:12 #faculty #topic
site workflow_buttons
"""
history_tab = dd.Panel("""
changes.ChangesByMaster:50 #stars.StarsByController:20
""", label=_("History"), required_roles=dd.login_required(Triager))
more = dd.Panel("""
more1:60 #skills.AssignableWorkersByTicket:20
upgrade_notes LinksByTicket skills.OffersByDemander
""", label=_("More"), required_roles=dd.login_required(Triager))
more1 = """
created modified ticket_type:10
state priority project
# standby feedback closed
"""
Tickets.detail_layout = TicketDetail()
|
lino-framework/book
|
lino_book/projects/anna/lib/tickets/models.py
|
Python
|
bsd-2-clause
| 1,862 | 0.001611 |
#Key, dictionary[key, int], int --> dictionary[key, int]
#Given a key, dictionary and increment, set the dictionary value at
#key to dictionary[key] + inc. If there is no old value, set to inc
def incrementDict(dictKey, dictionary, inc=1):
if(dictKey in dictionary):
dictionary[dictKey] += inc
else:
dictionary[dictKey] = inc
return dictionary
#dictionary[key, int] -> boolean
#Given a dictionary of counts return true if at least one is non zero
#and false otherwise
def nonZeroCount(dictionary):
for k,v in dictionary.iteritems():
assert(v >= 0)
if(v > 0):
return True
return False
|
Yagniksuchak/CodeParser
|
src/logChunk/dictUtil.py
|
Python
|
bsd-3-clause
| 602 | 0.031561 |
"""
Tests for uu module.
Nick Mathewson
"""
import unittest
from test.support import os_helper
import os
import stat
import sys
import uu
import io
plaintext = b"The symbols on top of your keyboard are !@#$%^&*()_+|~\n"
encodedtext = b"""\
M5&AE('-Y;6)O;',@;VX@=&]P(&]F('EO=7(@:V5Y8F]A<F0@87)E("% (R0E
*7B8J*"E?*WQ^"@ """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename, backtick=False):
if backtick:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext.replace(b' ', b'`') + b"\n`\nend\n")
else:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
return res
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", backtick=True)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1", True))
with self.assertRaises(TypeError):
uu.encode(inp, out, "t1", 0o644, True)
def test_decode(self):
for backtick in True, False:
inp = io.BytesIO(encodedtextwrapped(0o666, "t1", backtick=backtick))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1", backtick=backtick)
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
def test_garbage_padding(self):
# Issue #22406
encodedtext1 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x20\n"
b"end\n"
)
encodedtext2 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x60\n"
b"end\n"
)
plaintext = b"\x33" # 00110011
for encodedtext in encodedtext1, encodedtext2:
with self.subTest("uu.decode()"):
inp = io.BytesIO(encodedtext)
out = io.BytesIO()
uu.decode(inp, out, quiet=True)
self.assertEqual(out.getvalue(), plaintext)
with self.subTest("uu_codec"):
import codecs
decoded = codecs.decode(encodedtext, "uu_codec")
self.assertEqual(decoded, plaintext)
def test_newlines_escaped(self):
# Test newlines are escaped with uu.encode
inp = io.BytesIO(plaintext)
out = io.BytesIO()
filename = "test.txt\n\roverflow.txt"
safefilename = b"test.txt\\n\\roverflow.txt"
uu.encode(inp, out, filename)
self.assertIn(safefilename, out.getvalue())
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.getvalue(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def setUp(self):
# uu.encode() supports only ASCII file names
self.tmpin = os_helper.TESTFN_ASCII + "i"
self.tmpout = os_helper.TESTFN_ASCII + "o"
self.addCleanup(os_helper.unlink, self.tmpin)
self.addCleanup(os_helper.unlink, self.tmpout)
def test_encode(self):
with open(self.tmpin, 'wb') as fin:
fin.write(plaintext)
with open(self.tmpin, 'rb') as fin:
with open(self.tmpout, 'wb') as fout:
uu.encode(fin, fout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
def test_decode(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
def test_decode_filename(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
uu.decode(self.tmpin)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpin, 'rb') as f:
self.assertRaises(uu.Error, uu.decode, f)
def test_decode_mode(self):
# Verify that decode() will set the given mode for the out_file
expected_mode = 0o444
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(expected_mode, self.tmpout))
# make file writable again, so it can be removed (Windows only)
self.addCleanup(os.chmod, self.tmpout, expected_mode | stat.S_IWRITE)
with open(self.tmpin, 'rb') as f:
uu.decode(f)
self.assertEqual(
stat.S_IMODE(os.stat(self.tmpout).st_mode),
expected_mode
)
if __name__=="__main__":
unittest.main()
|
brython-dev/brython
|
www/src/Lib/test/test_uu.py
|
Python
|
bsd-3-clause
| 8,294 | 0.000844 |
""" Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .halo_attn import HaloAttn
from .lambda_layer import LambdaLayer
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .selective_kernel import SelectiveKernel
from .split_attn import SplitAttn
from .squeeze_excite import SEModule, EffectiveSEModule
def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type is not None:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network architecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'gca':
module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False)
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls
def create_attn(attn_type, channels, **kwargs):
module_cls = get_attn(attn_type)
if module_cls is not None:
# NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
return module_cls(channels, **kwargs)
return None
|
rwightman/pytorch-image-models
|
timm/models/layers/create_attn.py
|
Python
|
apache-2.0
| 3,526 | 0.002269 |
from django.conf.urls import patterns, include, url
from cover.views import CoverView
urlpatterns = patterns('cover.views',
url(r'^$', CoverView.as_view(), name='cover'),
)
|
denever/discipline_terra
|
cover/urls.py
|
Python
|
gpl-2.0
| 198 | 0.005051 |
"""Implementations of commenting abstract base class searches."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class CommentSearch:
"""The search interface for governing comment searches."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search_among_comments(self, comment_ids):
"""Execute this search among the given list of comments.
:param comment_ids: list of comments
:type comment_ids: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``comment_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_comment_results(self, comment_search_order):
"""Specify an ordering to the search results.
:param comment_search_order: comment search order
:type comment_search_order: ``osid.commenting.CommentSearchOrder``
:raise: ``NullArgument`` -- ``comment_search_order`` is ``null``
:raise: ``Unsupported`` -- ``comment_search_order`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_comment_search_record(self, comment_search_record_type):
"""Gets the comment search record corresponding to the given comment search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param comment_search_record_type: a comment search record type
:type comment_search_record_type: ``osid.type.Type``
:return: the comment search record
:rtype: ``osid.commenting.records.CommentSearchRecord``
:raise: ``NullArgument`` -- ``comment_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentSearchRecord
class CommentSearchResults:
"""This interface provides a means to capture results of a search."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_comments(self):
"""Gets the comment list resulting from a search.
:return: the comment list
:rtype: ``osid.commenting.CommentList``
:raise: ``IllegalState`` -- list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.CommentList
comments = property(fget=get_comments)
@abc.abstractmethod
def get_comment_query_inspector(self):
"""Gets the inspector for the query to examine the terns used in the search.
:return: the query inspector
:rtype: ``osid.commenting.CommentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.CommentQueryInspector
comment_query_inspector = property(fget=get_comment_query_inspector)
@abc.abstractmethod
def get_comment_search_results_record(self, comment_search_record_type):
"""Gets the comment search results record corresponding to the given comment search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param comment_search_record_type: a comment search record type
:type comment_search_record_type: ``osid.type.Type``
:return: the comment search results record
:rtype: ``osid.commenting.records.CommentSearchResultsRecord``
:raise: ``NullArgument`` -- ``comment_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentSearchResultsRecord
class BookSearch:
"""The search interface for governing book searches."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search_among_books(self, book_ids):
"""Execute this search among the given list of books.
:param book_ids: list of books
:type book_ids: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``book_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_book_results(self, book_search_order):
"""Specify an ordering to the search results.
:param book_search_order: book search order
:type book_search_order: ``osid.commenting.BookSearchOrder``
:raise: ``NullArgument`` -- ``book_search_order`` is ``null``
:raise: ``Unsupported`` -- ``book_search_order`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_book_search_record(self, book_search_record_type):
"""Gets the book search record corresponding to the given book search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param book_search_record_type: a book search record type
:type book_search_record_type: ``osid.type.Type``
:return: the book search record
:rtype: ``osid.commenting.records.BookSearchRecord``
:raise: ``NullArgument`` -- ``book_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(book_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.BookSearchRecord
class BookSearchResults:
"""This interface provides a means to capture results of a search."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_books(self):
"""Gets the book list resulting from a search.
:return: the book list
:rtype: ``osid.commenting.BookList``
:raise: ``IllegalState`` -- list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.BookList
books = property(fget=get_books)
@abc.abstractmethod
def get_book_query_inspector(self):
"""Gets the inspector for the query to examine the terns used in the search.
:return: the query inspector
:rtype: ``osid.commenting.BookQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.BookQueryInspector
book_query_inspector = property(fget=get_book_query_inspector)
@abc.abstractmethod
def get_book_search_results_record(self, book_search_record_type):
"""Gets the book search results record corresponding to the given book search record Type.
This method is used to retrieve an object implementing the
requested record.
:param book_search_record_type: a book search record type
:type book_search_record_type: ``osid.type.Type``
:return: the book search results record
:rtype: ``osid.commenting.records.BookSearchResultsRecord``
:raise: ``NullArgument`` -- ``BookSearchRecordType`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(book_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.BookSearchResultsRecord
|
mitsei/dlkit
|
dlkit/abstract_osid/commenting/searches.py
|
Python
|
mit
| 8,401 | 0.001309 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Github to Elastic class helper
#
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import json
import logging
import re
from datetime import datetime
from dateutil import parser
from .utils import get_time_diff_days
from grimoire.elk.enrich import Enrich, metadata
GITHUB = 'https://github.com/'
class GitHubEnrich(Enrich):
roles = ['assignee_data', 'user_data']
def __init__(self, db_sortinghat=None, db_projects_map=None, json_projects_map=None,
db_user='', db_password='', db_host=''):
super().__init__(db_sortinghat, db_projects_map, json_projects_map,
db_user, db_password, db_host)
self.users = {} # cache users
self.location = {} # cache users location
self.location_not_found = [] # location not found in map api
def set_elastic(self, elastic):
self.elastic = elastic
# Recover cache data from Elastic
self.geolocations = self.geo_locations_from_es()
def get_field_author(self):
return "user_data"
def get_fields_uuid(self):
return ["assignee_uuid", "user_uuid"]
def get_identities(self, item):
""" Return the identities from an item """
identities = []
item = item['data']
for identity in ['user', 'assignee']:
if item[identity]:
# In user_data we have the full user data
user = self.get_sh_identity(item[identity+"_data"])
if user:
identities.append(user)
return identities
def get_sh_identity(self, item, identity_field=None):
identity = {}
user = item # by default a specific user dict is expected
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
if not user:
return identity
identity['username'] = user['login']
identity['email'] = None
identity['name'] = None
if 'email' in user:
identity['email'] = user['email']
if 'name' in user:
identity['name'] = user['name']
return identity
def get_geo_point(self, location):
geo_point = geo_code = None
if location is None:
return geo_point
if location in self.geolocations:
geo_location = self.geolocations[location]
geo_point = {
"lat": geo_location['lat'],
"lon": geo_location['lon']
}
elif location in self.location_not_found:
# Don't call the API.
pass
else:
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'sensor': 'false', 'address': location}
r = self.requests.get(url, params=params)
try:
logging.debug("Using Maps API to find %s" % (location))
r_json = r.json()
geo_code = r_json['results'][0]['geometry']['location']
except:
if location not in self.location_not_found:
logging.debug("Can't find geocode for " + location)
self.location_not_found.append(location)
if geo_code:
geo_point = {
"lat": geo_code['lat'],
"lon": geo_code['lng']
}
self.geolocations[location] = geo_point
return geo_point
def get_github_cache(self, kind, _key):
""" Get cache data for items of _type using _key as the cache dict key """
cache = {}
res_size = 100 # best size?
_from = 0
index_github = "github/" + kind
url = self.elastic.url + "/"+index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logging.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[_key]] = item
_from += res_size
r = self.requests.get(url+"&from=%i" % _from)
type_items = r.json()
if 'hits' not in type_items:
break
return cache
def geo_locations_from_es(self):
return self.get_github_cache("geolocations", "location")
def geo_locations_to_es(self):
max_items = self.elastic.max_items_bulk
current = 0
bulk_json = ""
url = self.elastic.url + "/github/geolocations/_bulk"
logging.debug("Adding geoloc to %s (in %i packs)" % (url, max_items))
for loc in self.geolocations:
if current >= max_items:
self.requests.put(url, data=bulk_json)
bulk_json = ""
current = 0
geopoint = self.geolocations[loc]
location = geopoint.copy()
location["location"] = loc
# First upload the raw issue data to ES
data_json = json.dumps(location)
# Don't include in URL non ascii codes
safe_loc = str(loc.encode('ascii', 'ignore'),'ascii')
geo_id = str("%s-%s-%s" % (location["lat"], location["lon"],
safe_loc))
bulk_json += '{"index" : {"_id" : "%s" } }\n' % (geo_id)
bulk_json += data_json +"\n" # Bulk document
current += 1
self.requests.put(url, data = bulk_json)
logging.debug("Adding geoloc to ES Done")
def get_elastic_mappings(self):
""" geopoints type is not created in dynamic mapping """
mapping = """
{
"properties": {
"assignee_geolocation": {
"type": "geo_point"
},
"user_geolocation": {
"type": "geo_point"
},
"title_analyzed": {
"type": "string",
"index":"analyzed"
}
}
}
"""
return {"items":mapping}
def get_field_unique_id(self):
return "ocean-unique-id"
def get_project_repository(self, eitem):
repo = eitem['origin']
return repo
@metadata
def get_rich_item(self, item):
rich_issue = {}
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin"]
for f in copy_fields:
if f in item:
rich_issue[f] = item[f]
else:
rich_issue[f] = None
# The real data
issue = item['data']
rich_issue['time_to_close_days'] = \
get_time_diff_days(issue['created_at'], issue['closed_at'])
if issue['state'] != 'closed':
rich_issue['time_open_days'] = \
get_time_diff_days(issue['created_at'], datetime.utcnow())
else:
rich_issue['time_open_days'] = rich_issue['time_to_close_days']
rich_issue['user_login'] = issue['user']['login']
user = issue['user_data']
if user is not None:
rich_issue['user_name'] = user['name']
rich_issue['author_name'] = user['name']
rich_issue['user_email'] = user['email']
if rich_issue['user_email']:
rich_issue["user_domain"] = self.get_email_domain(rich_issue['user_email'])
rich_issue['user_org'] = user['company']
rich_issue['user_location'] = user['location']
rich_issue['user_geolocation'] = self.get_geo_point(user['location'])
else:
rich_issue['user_name'] = None
rich_issue['user_email'] = None
rich_issue["user_domain"] = None
rich_issue['user_org'] = None
rich_issue['user_location'] = None
rich_issue['user_geolocation'] = None
rich_issue['author_name'] = None
assignee = None
if issue['assignee'] is not None:
assignee = issue['assignee_data']
rich_issue['assignee_login'] = issue['assignee']['login']
rich_issue['assignee_name'] = assignee['name']
rich_issue['assignee_email'] = assignee['email']
if rich_issue['assignee_email']:
rich_issue["assignee_domain"] = self.get_email_domain(rich_issue['assignee_email'])
rich_issue['assignee_org'] = assignee['company']
rich_issue['assignee_location'] = assignee['location']
rich_issue['assignee_geolocation'] = \
self.get_geo_point(assignee['location'])
else:
rich_issue['assignee_name'] = None
rich_issue['assignee_login'] = None
rich_issue['assignee_email'] = None
rich_issue["assignee_domain"] = None
rich_issue['assignee_org'] = None
rich_issue['assignee_location'] = None
rich_issue['assignee_geolocation'] = None
rich_issue['id'] = issue['id']
rich_issue['id_in_repo'] = issue['html_url'].split("/")[-1]
rich_issue['title'] = issue['title']
rich_issue['title_analyzed'] = issue['title']
rich_issue['state'] = issue['state']
rich_issue['created_at'] = issue['created_at']
rich_issue['updated_at'] = issue['updated_at']
rich_issue['closed_at'] = issue['closed_at']
rich_issue['url'] = issue['html_url']
labels = ''
if 'labels' in issue:
for label in issue['labels']:
labels += label['name']+";;"
if labels != '':
labels[:-2]
rich_issue['labels'] = labels
rich_issue['repository'] = rich_issue['origin']
rich_issue['pull_request'] = True
rich_issue['item_type'] = 'pull request'
if not 'head' in issue.keys() and not 'pull_request' in issue.keys():
rich_issue['pull_request'] = False
rich_issue['item_type'] = 'issue'
rich_issue['github_repo'] = item['origin'].replace(GITHUB,'')
rich_issue['github_repo'] = re.sub('.git$', '', rich_issue['github_repo'])
rich_issue["url_id"] = rich_issue['github_repo']+"/issues/"+rich_issue['id_in_repo']
if self.prjs_map:
rich_issue.update(self.get_item_project(rich_issue))
if self.sortinghat:
rich_issue.update(self.get_item_sh(item, self.roles))
rich_issue.update(self.get_grimoire_fields(issue['created_at'], "issue"))
return rich_issue
def enrich_items(self, items):
total = super(GitHubEnrich, self).enrich_items(items)
logging.debug("Updating GitHub users geolocations in Elastic")
self.geo_locations_to_es() # Update geolocations in Elastic
return total
class GitHubUser(object):
""" Helper class to manage data from a Github user """
users = {} # cache with users from github
def __init__(self, user):
self.login = user['login']
self.email = user['email']
if 'company' in user:
self.company = user['company']
self.orgs = user['orgs']
self.org = self._getOrg()
self.name = user['name']
self.location = user['location']
def _getOrg(self):
company = None
if self.company:
company = self.company
if company is None:
company = ''
# Return the list of orgs
for org in self.orgs:
company += org['login'] +";;"
company = company[:-2]
return company
|
sanacl/GrimoireELK
|
grimoire/elk/github.py
|
Python
|
gpl-3.0
| 12,563 | 0.002468 |
from django.core.management.base import BaseCommand
from candidates.models import OrganizationExtra
class Command(BaseCommand):
def handle(self, *args, **options):
for party_extra in OrganizationExtra.objects \
.filter(base__classification='Party') \
.select_related('base') \
.prefetch_related('images'):
images = list(party_extra.images.all())
if len(images) < 2:
continue
print "====================================================="
party = party_extra.base
print len(images), party_extra.slug, party.name.encode('utf-8')
for image in images:
print ' --'
print ' ' + image.source.encode('utf-8')
print ' ' + image.image.url
|
datamade/yournextmp-popit
|
candidates/management/commands/candidates_parties_with_multiple_emblems.py
|
Python
|
agpl-3.0
| 834 | 0 |
'''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class MessagePump(object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout()
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font)
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.stdout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt):
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg(self, evt):
pass
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/gooey/gui/windows/runtime_display_panel.py
|
Python
|
mit
| 1,831 | 0.009285 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.